You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

847 lines
26 KiB

Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
3 years ago
  1. package synchronizer
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "io/ioutil"
  7. "math/big"
  8. "os"
  9. "sort"
  10. "testing"
  11. "time"
  12. ethCommon "github.com/ethereum/go-ethereum/common"
  13. "github.com/hermeznetwork/hermez-node/common"
  14. dbUtils "github.com/hermeznetwork/hermez-node/db"
  15. "github.com/hermeznetwork/hermez-node/db/historydb"
  16. "github.com/hermeznetwork/hermez-node/db/statedb"
  17. "github.com/hermeznetwork/hermez-node/eth"
  18. "github.com/hermeznetwork/hermez-node/test"
  19. "github.com/hermeznetwork/hermez-node/test/til"
  20. "github.com/jinzhu/copier"
  21. "github.com/stretchr/testify/assert"
  22. "github.com/stretchr/testify/require"
  23. )
  24. var tokenConsts = map[common.TokenID]eth.ERC20Consts{}
  25. type timer struct {
  26. time int64
  27. }
  28. func (t *timer) Time() int64 {
  29. currentTime := t.time
  30. t.time++
  31. return currentTime
  32. }
  33. func accountsCmp(accounts []common.Account) func(i, j int) bool {
  34. return func(i, j int) bool { return accounts[i].Idx < accounts[j].Idx }
  35. }
  36. // Check Sync output and HistoryDB state against expected values generated by
  37. // til
  38. func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBlock *common.BlockData) {
  39. // Check Blocks
  40. dbBlocks, err := s.historyDB.GetAllBlocks()
  41. require.NoError(t, err)
  42. dbBlocks = dbBlocks[1:] // ignore block 0, added by default in the DB
  43. assert.Equal(t, blockNum, len(dbBlocks))
  44. assert.Equal(t, int64(blockNum), dbBlocks[blockNum-1].Num)
  45. assert.NotEqual(t, dbBlocks[blockNum-1].Hash, dbBlocks[blockNum-2].Hash)
  46. assert.Greater(t, dbBlocks[blockNum-1].Timestamp.Unix(), dbBlocks[blockNum-2].Timestamp.Unix())
  47. // Check Tokens
  48. assert.Equal(t, len(block.Rollup.AddedTokens), len(syncBlock.Rollup.AddedTokens))
  49. dbTokens, err := s.historyDB.GetAllTokens()
  50. require.NoError(t, err)
  51. dbTokens = dbTokens[1:] // ignore token 0, added by default in the DB
  52. for i, token := range block.Rollup.AddedTokens {
  53. dbToken := dbTokens[i]
  54. syncToken := syncBlock.Rollup.AddedTokens[i]
  55. assert.Equal(t, block.Block.Num, syncToken.EthBlockNum)
  56. assert.Equal(t, token.TokenID, syncToken.TokenID)
  57. assert.Equal(t, token.EthAddr, syncToken.EthAddr)
  58. tokenConst := tokenConsts[token.TokenID]
  59. assert.Equal(t, tokenConst.Name, syncToken.Name)
  60. assert.Equal(t, tokenConst.Symbol, syncToken.Symbol)
  61. assert.Equal(t, tokenConst.Decimals, syncToken.Decimals)
  62. var tokenCpy historydb.TokenWithUSD
  63. //nolint:gosec
  64. require.Nil(t, copier.Copy(&tokenCpy, &token)) // copy common.Token to historydb.TokenWithUSD
  65. require.Nil(t, copier.Copy(&tokenCpy, &tokenConst)) // copy common.Token to historydb.TokenWithUSD
  66. tokenCpy.ItemID = dbToken.ItemID // we don't care about ItemID
  67. assert.Equal(t, tokenCpy, dbToken)
  68. }
  69. // Check submitted L1UserTxs
  70. assert.Equal(t, len(block.Rollup.L1UserTxs), len(syncBlock.Rollup.L1UserTxs))
  71. dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs()
  72. require.NoError(t, err)
  73. // Ignore BatchNum in syncBlock.L1UserTxs because this value is set by
  74. // the HistoryDB. Also ignore EffectiveAmount & EffectiveDepositAmount
  75. // because this value is set by StateDB.ProcessTxs.
  76. for i := range syncBlock.Rollup.L1UserTxs {
  77. syncBlock.Rollup.L1UserTxs[i].BatchNum = block.Rollup.L1UserTxs[i].BatchNum
  78. assert.Nil(t, syncBlock.Rollup.L1UserTxs[i].EffectiveDepositAmount)
  79. assert.Nil(t, syncBlock.Rollup.L1UserTxs[i].EffectiveAmount)
  80. }
  81. assert.Equal(t, block.Rollup.L1UserTxs, syncBlock.Rollup.L1UserTxs)
  82. for _, tx := range block.Rollup.L1UserTxs {
  83. var dbTx *common.L1Tx
  84. // Find tx in DB output
  85. for _, _dbTx := range dbL1UserTxs {
  86. if *tx.ToForgeL1TxsNum == *_dbTx.ToForgeL1TxsNum &&
  87. tx.Position == _dbTx.Position {
  88. dbTx = new(common.L1Tx)
  89. *dbTx = _dbTx
  90. // NOTE: Overwrite EffectiveFromIdx in L1UserTx
  91. // from db because we don't expect
  92. // EffectiveFromIdx to be set yet, as this tx
  93. // is not in yet forged
  94. dbTx.EffectiveFromIdx = 0
  95. break
  96. }
  97. }
  98. // If the tx has been forged in this block, this will be
  99. // reflected in the DB, and so the Effective values will be
  100. // already set
  101. if dbTx.BatchNum != nil {
  102. tx.EffectiveAmount = tx.Amount
  103. tx.EffectiveDepositAmount = tx.DepositAmount
  104. }
  105. assert.Equal(t, &tx, dbTx) //nolint:gosec
  106. }
  107. // Check Batches
  108. assert.Equal(t, len(block.Rollup.Batches), len(syncBlock.Rollup.Batches))
  109. dbBatches, err := s.historyDB.GetAllBatches()
  110. require.NoError(t, err)
  111. dbL1CoordinatorTxs, err := s.historyDB.GetAllL1CoordinatorTxs()
  112. require.NoError(t, err)
  113. dbL2Txs, err := s.historyDB.GetAllL2Txs()
  114. require.NoError(t, err)
  115. dbExits, err := s.historyDB.GetAllExits()
  116. require.NoError(t, err)
  117. // dbL1CoordinatorTxs := []common.L1Tx{}
  118. for i, batch := range block.Rollup.Batches {
  119. var dbBatch *common.Batch
  120. // Find batch in DB output
  121. for _, _dbBatch := range dbBatches {
  122. if batch.Batch.BatchNum == _dbBatch.BatchNum {
  123. dbBatch = new(common.Batch)
  124. *dbBatch = _dbBatch
  125. break
  126. }
  127. }
  128. syncBatch := syncBlock.Rollup.Batches[i]
  129. // We don't care about TotalFeesUSD. Use the syncBatch that
  130. // has a TotalFeesUSD inserted by the HistoryDB
  131. batch.Batch.TotalFeesUSD = syncBatch.Batch.TotalFeesUSD
  132. assert.Equal(t, batch.CreatedAccounts, syncBatch.CreatedAccounts)
  133. batch.Batch.NumAccounts = len(batch.CreatedAccounts)
  134. // Test field by field to facilitate debugging of errors
  135. assert.Equal(t, len(batch.L1UserTxs), len(syncBatch.L1UserTxs))
  136. // NOTE: EffectiveFromIdx is set to til L1UserTxs in
  137. // `FillBlocksForgedL1UserTxs` function
  138. for j := range syncBatch.L1UserTxs {
  139. assert.NotEqual(t, 0, syncBatch.L1UserTxs[j].EffectiveFromIdx)
  140. }
  141. assert.Equal(t, batch.L1UserTxs, syncBatch.L1UserTxs)
  142. // NOTE: EffectiveFromIdx is set to til L1CoordinatorTxs in
  143. // `FillBlocksExtra` function
  144. for j := range syncBatch.L1CoordinatorTxs {
  145. assert.NotEqual(t, 0, syncBatch.L1CoordinatorTxs[j].EffectiveFromIdx)
  146. }
  147. assert.Equal(t, batch.L1CoordinatorTxs, syncBatch.L1CoordinatorTxs)
  148. assert.Equal(t, batch.L2Txs, syncBatch.L2Txs)
  149. // In exit tree, we only check AccountIdx and Balance, because
  150. // it's what we have precomputed before.
  151. require.Equal(t, len(batch.ExitTree), len(syncBatch.ExitTree))
  152. for j := range batch.ExitTree {
  153. exit := &batch.ExitTree[j]
  154. assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx)
  155. assert.Equal(t, exit.Balance, syncBatch.ExitTree[j].Balance)
  156. *exit = syncBatch.ExitTree[j]
  157. }
  158. assert.Equal(t, batch.Batch, syncBatch.Batch)
  159. assert.Equal(t, batch, syncBatch)
  160. assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
  161. // Check forged L1UserTxs from DB, and check effective values
  162. // in sync output
  163. for j, tx := range batch.L1UserTxs {
  164. var dbTx *common.L1Tx
  165. // Find tx in DB output
  166. for _, _dbTx := range dbL1UserTxs {
  167. if *tx.BatchNum == *_dbTx.BatchNum &&
  168. tx.Position == _dbTx.Position {
  169. dbTx = new(common.L1Tx)
  170. *dbTx = _dbTx
  171. break
  172. }
  173. }
  174. assert.Equal(t, &tx, dbTx) //nolint:gosec
  175. syncTx := &syncBlock.Rollup.Batches[i].L1UserTxs[j]
  176. assert.Equal(t, syncTx.DepositAmount, syncTx.EffectiveDepositAmount)
  177. assert.Equal(t, syncTx.Amount, syncTx.EffectiveAmount)
  178. }
  179. // Check L1CoordinatorTxs from DB
  180. for _, tx := range batch.L1CoordinatorTxs {
  181. var dbTx *common.L1Tx
  182. // Find tx in DB output
  183. for _, _dbTx := range dbL1CoordinatorTxs {
  184. if *tx.BatchNum == *_dbTx.BatchNum &&
  185. tx.Position == _dbTx.Position {
  186. dbTx = new(common.L1Tx)
  187. *dbTx = _dbTx
  188. break
  189. }
  190. }
  191. assert.Equal(t, &tx, dbTx) //nolint:gosec
  192. }
  193. // Check L2Txs from DB
  194. for _, tx := range batch.L2Txs {
  195. var dbTx *common.L2Tx
  196. // Find tx in DB output
  197. for _, _dbTx := range dbL2Txs {
  198. if tx.BatchNum == _dbTx.BatchNum &&
  199. tx.Position == _dbTx.Position {
  200. dbTx = new(common.L2Tx)
  201. *dbTx = _dbTx
  202. break
  203. }
  204. }
  205. assert.Equal(t, &tx, dbTx) //nolint:gosec
  206. }
  207. // Check Exits from DB
  208. for _, exit := range batch.ExitTree {
  209. var dbExit *common.ExitInfo
  210. // Find exit in DB output
  211. for _, _dbExit := range dbExits {
  212. if exit.BatchNum == _dbExit.BatchNum &&
  213. exit.AccountIdx == _dbExit.AccountIdx {
  214. dbExit = new(common.ExitInfo)
  215. *dbExit = _dbExit
  216. break
  217. }
  218. }
  219. // Compare MerkleProof in JSON because unmarshaled 0
  220. // big.Int leaves the internal big.Int array at nil,
  221. // and gives trouble when comparing big.Int with
  222. // internal big.Int array != nil but empty.
  223. mtp, err := json.Marshal(exit.MerkleProof)
  224. require.NoError(t, err)
  225. dbMtp, err := json.Marshal(dbExit.MerkleProof)
  226. require.NoError(t, err)
  227. assert.Equal(t, mtp, dbMtp)
  228. dbExit.MerkleProof = exit.MerkleProof
  229. assert.Equal(t, &exit, dbExit) //nolint:gosec
  230. }
  231. }
  232. // Compare accounts from HistoryDB with StateDB (they should match)
  233. dbAccounts, err := s.historyDB.GetAllAccounts()
  234. require.NoError(t, err)
  235. sdbAccounts, err := s.stateDB.TestGetAccounts()
  236. require.NoError(t, err)
  237. assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts)
  238. }
  239. func assertEqualAccountsHistoryDBStateDB(t *testing.T, hdbAccs, sdbAccs []common.Account) {
  240. assert.Equal(t, len(hdbAccs), len(sdbAccs))
  241. sort.SliceStable(hdbAccs, accountsCmp(hdbAccs))
  242. sort.SliceStable(sdbAccs, accountsCmp(sdbAccs))
  243. for i := range hdbAccs {
  244. hdbAcc := hdbAccs[i]
  245. sdbAcc := sdbAccs[i]
  246. assert.Equal(t, hdbAcc.Idx, sdbAcc.Idx)
  247. assert.Equal(t, hdbAcc.TokenID, sdbAcc.TokenID)
  248. assert.Equal(t, hdbAcc.EthAddr, sdbAcc.EthAddr)
  249. assert.Equal(t, hdbAcc.BJJ, sdbAcc.BJJ)
  250. }
  251. }
  252. // ethAddTokens adds the tokens from the blocks to the blockchain
  253. func ethAddTokens(blocks []common.BlockData, client *test.Client) {
  254. for _, block := range blocks {
  255. for _, token := range block.Rollup.AddedTokens {
  256. consts := eth.ERC20Consts{
  257. Name: fmt.Sprintf("Token %d", token.TokenID),
  258. Symbol: fmt.Sprintf("TK%d", token.TokenID),
  259. Decimals: 18,
  260. }
  261. tokenConsts[token.TokenID] = consts
  262. client.CtlAddERC20(token.EthAddr, consts)
  263. }
  264. }
  265. }
  266. var chainID uint16 = 0
  267. var deleteme = []string{}
  268. func TestMain(m *testing.M) {
  269. exitVal := m.Run()
  270. for _, dir := range deleteme {
  271. if err := os.RemoveAll(dir); err != nil {
  272. panic(err)
  273. }
  274. }
  275. os.Exit(exitVal)
  276. }
  277. func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
  278. // Int State DB
  279. dir, err := ioutil.TempDir("", "tmpdb")
  280. require.NoError(t, err)
  281. deleteme = append(deleteme, dir)
  282. stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
  283. require.NoError(t, err)
  284. // Init History DB
  285. pass := os.Getenv("POSTGRES_PASS")
  286. db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
  287. require.NoError(t, err)
  288. historyDB := historydb.NewHistoryDB(db, nil)
  289. // Clear DB
  290. test.WipeDB(historyDB.DB())
  291. return stateDB, historyDB
  292. }
  293. func newBigInt(s string) *big.Int {
  294. v, ok := new(big.Int).SetString(s, 10)
  295. if !ok {
  296. panic(fmt.Errorf("Can't set big.Int from %s", s))
  297. }
  298. return v
  299. }
  300. func TestSyncGeneral(t *testing.T) {
  301. //
  302. // Setup
  303. //
  304. stateDB, historyDB := newTestModules(t)
  305. // Init eth client
  306. var timer timer
  307. clientSetup := test.NewClientSetupExample()
  308. clientSetup.ChainID = big.NewInt(int64(chainID))
  309. bootCoordAddr := clientSetup.AuctionVariables.BootCoordinator
  310. client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
  311. // Create Synchronizer
  312. s, err := NewSynchronizer(client, historyDB, stateDB, Config{
  313. StatsRefreshPeriod: 0 * time.Second,
  314. })
  315. require.NoError(t, err)
  316. ctx := context.Background()
  317. //
  318. // First Sync from an initial state
  319. //
  320. stats := s.Stats()
  321. assert.Equal(t, false, stats.Synced())
  322. // Test Sync for rollup genesis block
  323. syncBlock, discards, err := s.Sync2(ctx, nil)
  324. require.NoError(t, err)
  325. require.Nil(t, discards)
  326. require.NotNil(t, syncBlock)
  327. require.Nil(t, syncBlock.Rollup.Vars)
  328. require.Nil(t, syncBlock.Auction.Vars)
  329. require.Nil(t, syncBlock.WDelayer.Vars)
  330. assert.Equal(t, int64(1), syncBlock.Block.Num)
  331. stats = s.Stats()
  332. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  333. assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
  334. assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
  335. vars := s.SCVars()
  336. assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
  337. assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
  338. assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
  339. dbBlocks, err := s.historyDB.GetAllBlocks()
  340. require.NoError(t, err)
  341. assert.Equal(t, 2, len(dbBlocks))
  342. assert.Equal(t, int64(1), dbBlocks[1].Num)
  343. // Sync again and expect no new blocks
  344. syncBlock, discards, err = s.Sync2(ctx, nil)
  345. require.NoError(t, err)
  346. require.Nil(t, discards)
  347. require.Nil(t, syncBlock)
  348. //
  349. // Generate blockchain and smart contract data, and fill the test smart contracts
  350. //
  351. // Generate blockchain data with til
  352. set1 := `
  353. Type: Blockchain
  354. AddToken(1)
  355. AddToken(2)
  356. AddToken(3)
  357. CreateAccountDeposit(1) C: 2000 // Idx=256+2=258
  358. CreateAccountDeposit(2) A: 2000 // Idx=256+3=259
  359. CreateAccountDeposit(1) D: 500 // Idx=256+4=260
  360. CreateAccountDeposit(2) B: 500 // Idx=256+5=261
  361. CreateAccountDeposit(2) C: 500 // Idx=256+6=262
  362. CreateAccountCoordinator(1) A // Idx=256+0=256
  363. CreateAccountCoordinator(1) B // Idx=256+1=257
  364. > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{5}
  365. > batchL1 // forge defined L1UserTxs{5}, freeze L1UserTxs{nil}
  366. > block // blockNum=2
  367. CreateAccountDepositTransfer(1) E-A: 1000, 200 // Idx=256+7=263
  368. ForceTransfer(1) C-B: 80
  369. ForceExit(1) A: 100
  370. ForceExit(1) B: 80
  371. ForceTransfer(1) A-D: 100
  372. Transfer(1) C-A: 100 (126)
  373. Exit(1) C: 50 (100)
  374. Exit(1) D: 30 (100)
  375. > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{3}
  376. > batchL1 // forge L1UserTxs{3}, freeze defined L1UserTxs{nil}
  377. > block // blockNum=3
  378. `
  379. tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
  380. tilCfgExtra := til.ConfigExtra{
  381. BootCoordAddr: bootCoordAddr,
  382. CoordUser: "A",
  383. }
  384. blocks, err := tc.GenerateBlocks(set1)
  385. require.NoError(t, err)
  386. // Sanity check
  387. require.Equal(t, 2, len(blocks))
  388. // blocks 0 (blockNum=2)
  389. i := 0
  390. require.Equal(t, 2, int(blocks[i].Block.Num))
  391. require.Equal(t, 3, len(blocks[i].Rollup.AddedTokens))
  392. require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
  393. require.Equal(t, 2, len(blocks[i].Rollup.Batches))
  394. require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
  395. // Set StateRoots for batches manually (til doesn't set it)
  396. blocks[i].Rollup.Batches[0].Batch.StateRoot =
  397. newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
  398. blocks[i].Rollup.Batches[1].Batch.StateRoot =
  399. newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
  400. // blocks 1 (blockNum=3)
  401. i = 1
  402. require.Equal(t, 3, int(blocks[i].Block.Num))
  403. require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
  404. require.Equal(t, 2, len(blocks[i].Rollup.Batches))
  405. require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
  406. // Set StateRoots for batches manually (til doesn't set it)
  407. blocks[i].Rollup.Batches[0].Batch.StateRoot =
  408. newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
  409. blocks[i].Rollup.Batches[1].Batch.StateRoot =
  410. newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
  411. // Generate extra required data
  412. ethAddTokens(blocks, client)
  413. err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
  414. require.NoError(t, err)
  415. tc.FillBlocksL1UserTxsBatchNum(blocks)
  416. err = tc.FillBlocksForgedL1UserTxs(blocks)
  417. require.NoError(t, err)
  418. // Add block data to the smart contracts
  419. err = client.CtlAddBlocks(blocks)
  420. require.NoError(t, err)
  421. //
  422. // Sync to synchronize the current state from the test smart contracts,
  423. // and check the outcome
  424. //
  425. // Block 2
  426. syncBlock, discards, err = s.Sync2(ctx, nil)
  427. require.NoError(t, err)
  428. require.Nil(t, discards)
  429. require.NotNil(t, syncBlock)
  430. assert.Nil(t, syncBlock.Rollup.Vars)
  431. assert.Nil(t, syncBlock.Auction.Vars)
  432. assert.Nil(t, syncBlock.WDelayer.Vars)
  433. assert.Equal(t, int64(2), syncBlock.Block.Num)
  434. stats = s.Stats()
  435. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  436. assert.Equal(t, int64(3), stats.Eth.LastBlock.Num)
  437. assert.Equal(t, int64(2), stats.Sync.LastBlock.Num)
  438. checkSyncBlock(t, s, 2, &blocks[0], syncBlock)
  439. // Block 3
  440. syncBlock, discards, err = s.Sync2(ctx, nil)
  441. assert.NoError(t, err)
  442. require.NoError(t, err)
  443. require.Nil(t, discards)
  444. require.NotNil(t, syncBlock)
  445. assert.Nil(t, syncBlock.Rollup.Vars)
  446. assert.Nil(t, syncBlock.Auction.Vars)
  447. assert.Nil(t, syncBlock.WDelayer.Vars)
  448. assert.Equal(t, int64(3), syncBlock.Block.Num)
  449. stats = s.Stats()
  450. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  451. assert.Equal(t, int64(3), stats.Eth.LastBlock.Num)
  452. assert.Equal(t, int64(3), stats.Sync.LastBlock.Num)
  453. checkSyncBlock(t, s, 3, &blocks[1], syncBlock)
  454. // Block 4
  455. // Generate 2 withdraws manually
  456. _, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256, big.NewInt(100), []*big.Int{}, true)
  457. require.NoError(t, err)
  458. _, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258, big.NewInt(50), []*big.Int{}, false)
  459. require.NoError(t, err)
  460. client.CtlMineBlock()
  461. syncBlock, discards, err = s.Sync2(ctx, nil)
  462. require.NoError(t, err)
  463. require.Nil(t, discards)
  464. require.NotNil(t, syncBlock)
  465. assert.Nil(t, syncBlock.Rollup.Vars)
  466. assert.Nil(t, syncBlock.Auction.Vars)
  467. assert.Nil(t, syncBlock.WDelayer.Vars)
  468. assert.Equal(t, int64(4), syncBlock.Block.Num)
  469. stats = s.Stats()
  470. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  471. assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
  472. assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
  473. vars = s.SCVars()
  474. assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
  475. assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
  476. assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
  477. dbExits, err := s.historyDB.GetAllExits()
  478. require.NoError(t, err)
  479. foundA1, foundC1 := false, false
  480. for _, exit := range dbExits {
  481. if exit.AccountIdx == 256 && exit.BatchNum == 4 {
  482. foundA1 = true
  483. assert.Equal(t, int64(4), *exit.InstantWithdrawn)
  484. }
  485. if exit.AccountIdx == 258 && exit.BatchNum == 3 {
  486. foundC1 = true
  487. assert.Equal(t, int64(4), *exit.DelayedWithdrawRequest)
  488. }
  489. }
  490. assert.True(t, foundA1)
  491. assert.True(t, foundC1)
  492. // Block 5
  493. // Update variables manually
  494. rollupVars, auctionVars, wDelayerVars, err := s.historyDB.GetSCVars()
  495. require.NoError(t, err)
  496. rollupVars.ForgeL1L2BatchTimeout = 42
  497. _, err = client.RollupUpdateForgeL1L2BatchTimeout(rollupVars.ForgeL1L2BatchTimeout)
  498. require.NoError(t, err)
  499. auctionVars.OpenAuctionSlots = 17
  500. _, err = client.AuctionSetOpenAuctionSlots(auctionVars.OpenAuctionSlots)
  501. require.NoError(t, err)
  502. wDelayerVars.WithdrawalDelay = 99
  503. _, err = client.WDelayerChangeWithdrawalDelay(wDelayerVars.WithdrawalDelay)
  504. require.NoError(t, err)
  505. client.CtlMineBlock()
  506. syncBlock, discards, err = s.Sync2(ctx, nil)
  507. require.NoError(t, err)
  508. require.Nil(t, discards)
  509. require.NotNil(t, syncBlock)
  510. assert.NotNil(t, syncBlock.Rollup.Vars)
  511. assert.NotNil(t, syncBlock.Auction.Vars)
  512. assert.NotNil(t, syncBlock.WDelayer.Vars)
  513. assert.Equal(t, int64(5), syncBlock.Block.Num)
  514. stats = s.Stats()
  515. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  516. assert.Equal(t, int64(5), stats.Eth.LastBlock.Num)
  517. assert.Equal(t, int64(5), stats.Sync.LastBlock.Num)
  518. vars = s.SCVars()
  519. assert.NotEqual(t, clientSetup.RollupVariables, vars.Rollup)
  520. assert.NotEqual(t, clientSetup.AuctionVariables, vars.Auction)
  521. assert.NotEqual(t, clientSetup.WDelayerVariables, vars.WDelayer)
  522. dbRollupVars, dbAuctionVars, dbWDelayerVars, err := s.historyDB.GetSCVars()
  523. require.NoError(t, err)
  524. // Set EthBlockNum for Vars to the blockNum in which they were updated (should be 5)
  525. rollupVars.EthBlockNum = syncBlock.Block.Num
  526. auctionVars.EthBlockNum = syncBlock.Block.Num
  527. wDelayerVars.EthBlockNum = syncBlock.Block.Num
  528. assert.Equal(t, rollupVars, dbRollupVars)
  529. assert.Equal(t, auctionVars, dbAuctionVars)
  530. assert.Equal(t, wDelayerVars, dbWDelayerVars)
  531. //
  532. // Reorg test
  533. //
  534. // Redo blocks 2-5 (as a reorg) only leaving:
  535. // - 2 create account transactions
  536. // - 2 add tokens
  537. // We add a 6th block so that the synchronizer can detect the reorg
  538. set2 := `
  539. Type: Blockchain
  540. AddToken(1)
  541. AddToken(2)
  542. CreateAccountDeposit(1) C: 2000 // Idx=256+1=257
  543. CreateAccountCoordinator(1) A // Idx=256+0=256
  544. > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{1}
  545. > batchL1 // forge defined L1UserTxs{1}, freeze L1UserTxs{nil}
  546. > block // blockNum=2
  547. > block // blockNum=3
  548. > block // blockNum=4
  549. > block // blockNum=5
  550. > block // blockNum=6
  551. `
  552. tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
  553. tilCfgExtra = til.ConfigExtra{
  554. BootCoordAddr: bootCoordAddr,
  555. CoordUser: "A",
  556. }
  557. blocks, err = tc.GenerateBlocks(set2)
  558. require.NoError(t, err)
  559. // Set StateRoots for batches manually (til doesn't set it)
  560. blocks[0].Rollup.Batches[0].Batch.StateRoot =
  561. newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
  562. blocks[0].Rollup.Batches[1].Batch.StateRoot =
  563. newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
  564. for i := 0; i < 4; i++ {
  565. client.CtlRollback()
  566. }
  567. block := client.CtlLastBlock()
  568. require.Equal(t, int64(1), block.Num)
  569. // Generate extra required data
  570. ethAddTokens(blocks, client)
  571. err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
  572. require.NoError(t, err)
  573. tc.FillBlocksL1UserTxsBatchNum(blocks)
  574. // Add block data to the smart contracts
  575. err = client.CtlAddBlocks(blocks)
  576. require.NoError(t, err)
  577. // First sync detects the reorg and discards 4 blocks
  578. syncBlock, discards, err = s.Sync2(ctx, nil)
  579. require.NoError(t, err)
  580. expetedDiscards := int64(4)
  581. require.Equal(t, &expetedDiscards, discards)
  582. require.Nil(t, syncBlock)
  583. stats = s.Stats()
  584. assert.Equal(t, false, stats.Synced())
  585. assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
  586. vars = s.SCVars()
  587. assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
  588. assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
  589. assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
  590. // At this point, the DB only has data up to block 1
  591. dbBlock, err := s.historyDB.GetLastBlock()
  592. require.NoError(t, err)
  593. assert.Equal(t, int64(1), dbBlock.Num)
  594. // Accounts in HistoryDB and StateDB must be empty
  595. dbAccounts, err := s.historyDB.GetAllAccounts()
  596. require.NoError(t, err)
  597. sdbAccounts, err := s.stateDB.TestGetAccounts()
  598. require.NoError(t, err)
  599. assert.Equal(t, 0, len(dbAccounts))
  600. assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts)
  601. // Sync blocks 2-6
  602. for i := 0; i < 5; i++ {
  603. syncBlock, discards, err = s.Sync2(ctx, nil)
  604. require.NoError(t, err)
  605. require.Nil(t, discards)
  606. require.NotNil(t, syncBlock)
  607. assert.Nil(t, syncBlock.Rollup.Vars)
  608. assert.Nil(t, syncBlock.Auction.Vars)
  609. assert.Nil(t, syncBlock.WDelayer.Vars)
  610. assert.Equal(t, int64(2+i), syncBlock.Block.Num)
  611. stats = s.Stats()
  612. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  613. assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
  614. assert.Equal(t, int64(2+i), stats.Sync.LastBlock.Num)
  615. if i == 4 {
  616. assert.Equal(t, true, stats.Synced())
  617. } else {
  618. assert.Equal(t, false, stats.Synced())
  619. }
  620. vars = s.SCVars()
  621. assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
  622. assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
  623. assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
  624. }
  625. dbBlock, err = s.historyDB.GetLastBlock()
  626. require.NoError(t, err)
  627. assert.Equal(t, int64(6), dbBlock.Num)
  628. // Accounts in HistoryDB and StateDB is only 2 entries
  629. dbAccounts, err = s.historyDB.GetAllAccounts()
  630. require.NoError(t, err)
  631. sdbAccounts, err = s.stateDB.TestGetAccounts()
  632. require.NoError(t, err)
  633. assert.Equal(t, 2, len(dbAccounts))
  634. assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts)
  635. }
  636. func TestSyncForgerCommitment(t *testing.T) {
  637. stateDB, historyDB := newTestModules(t)
  638. // Init eth client
  639. var timer timer
  640. clientSetup := test.NewClientSetupExample()
  641. clientSetup.ChainID = big.NewInt(int64(chainID))
  642. clientSetup.AuctionConstants.GenesisBlockNum = 2
  643. clientSetup.AuctionConstants.BlocksPerSlot = 4
  644. clientSetup.AuctionVariables.SlotDeadline = 2
  645. bootCoordAddr := clientSetup.AuctionVariables.BootCoordinator
  646. client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
  647. // Create Synchronizer
  648. s, err := NewSynchronizer(client, historyDB, stateDB, Config{
  649. StatsRefreshPeriod: 0 * time.Second,
  650. })
  651. require.NoError(t, err)
  652. ctx := context.Background()
  653. set := `
  654. Type: Blockchain
  655. // Slot = 0
  656. > block // 2
  657. > block // 3
  658. > block // 4
  659. > block // 5
  660. // Slot = 1
  661. > block // 6
  662. > batch
  663. > block // 7
  664. > block // 8
  665. > block // 9
  666. // Slot = 2
  667. > block // 10
  668. > block // 11
  669. > batch
  670. > block // 12
  671. > block // 13
  672. `
  673. // For each block, true when the slot that belongs to the following
  674. // block has forgerCommitment
  675. commitment := map[int64]bool{
  676. 2: false,
  677. 3: false,
  678. 4: false,
  679. 5: false,
  680. 6: false,
  681. 7: true,
  682. 8: true,
  683. 9: false,
  684. 10: false,
  685. 11: false,
  686. 12: false,
  687. 13: false,
  688. }
  689. tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
  690. blocks, err := tc.GenerateBlocks(set)
  691. assert.NoError(t, err)
  692. tilCfgExtra := til.ConfigExtra{
  693. BootCoordAddr: bootCoordAddr,
  694. CoordUser: "A",
  695. }
  696. err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
  697. require.NoError(t, err)
  698. // for i := range blocks {
  699. // for j := range blocks[i].Rollup.Batches {
  700. // blocks[i].Rollup.Batches[j].Batch.SlotNum = int64(i) / 4
  701. // }
  702. // }
  703. // be in sync
  704. for {
  705. syncBlock, discards, err := s.Sync2(ctx, nil)
  706. require.NoError(t, err)
  707. require.Nil(t, discards)
  708. if syncBlock == nil {
  709. break
  710. }
  711. }
  712. stats := s.Stats()
  713. require.Equal(t, int64(1), stats.Sync.LastBlock.Num)
  714. // Store ForgerComitmnent observed at every block by the live synchronizer
  715. syncCommitment := map[int64]bool{}
  716. // Store ForgerComitmnent observed at every block by a syncrhonizer that is restarted
  717. syncRestartedCommitment := map[int64]bool{}
  718. for _, block := range blocks {
  719. // Add block data to the smart contracts
  720. err = client.CtlAddBlocks([]common.BlockData{block})
  721. require.NoError(t, err)
  722. syncBlock, discards, err := s.Sync2(ctx, nil)
  723. require.NoError(t, err)
  724. require.Nil(t, discards)
  725. if syncBlock == nil {
  726. break
  727. }
  728. stats := s.Stats()
  729. require.True(t, stats.Synced())
  730. syncCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment
  731. s2, err := NewSynchronizer(client, historyDB, stateDB, Config{
  732. StatsRefreshPeriod: 0 * time.Second,
  733. })
  734. require.NoError(t, err)
  735. stats = s2.Stats()
  736. require.True(t, stats.Synced())
  737. syncRestartedCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment
  738. }
  739. assert.Equal(t, commitment, syncCommitment)
  740. assert.Equal(t, commitment, syncRestartedCommitment)
  741. }