You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

857 lines
26 KiB

Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Allow serving API only via new cli command - Add new command to the cli/node: `serveapi` that alows serving the API just by connecting to the PostgreSQL database. The mode flag should me passed in order to select whether we are connecting to a synchronizer database or a coordinator database. If `coord` is chosen as mode, the coordinator endpoints can be activated in order to allow inserting l2txs and authorizations into the L2DB. Summary of the implementation details - New SQL table with 3 columns (plus `item_id` pk). The table only contains a single row with `item_id` = 1. Columns: - state: historydb.StateAPI in JSON. This is the struct that is served via the `/state` API endpoint. The node will periodically update this struct and store it int he DB. The api server will query it from the DB to serve it. - config: historydb.NodeConfig in JSON. This struct contains node configuration parameters that the API needs to be aware of. It's updated once every time the node starts. - constants: historydb.Constants in JSON. This struct contains all the hermez network constants gathered via the ethereum client by the node. It's written once every time the node starts. - The HistoryDB contains methods to get and update each one of these columns individually. - The HistoryDB contains all methods that query the DB and prepare objects that will appear in the StateAPI endpoint. - The configuration used in for the `serveapi` cli/node command is defined in `config.APIServer`, and is a subset of `node.Config` in order to allow reusing the same configuration file of the node if desired. - A new object is introduced in the api: `StateAPIUpdater`, which contains all the necessary information to update the StateAPI in the DB periodically by the node. - Moved the types `SCConsts`, `SCVariables` and `SCVariablesPtr` from `syncrhonizer` to `common` for convenience.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update missing parts, improve til, and more - Node - Updated configuration to initialize the interface to all the smart contracts - Common - Moved BlockData and BatchData types to common so that they can be shared among: historydb, til and synchronizer - Remove hash.go (it was never used) - Remove slot.go (it was never used) - Remove smartcontractparams.go (it was never used, and appropriate structs are defined in `eth/`) - Comment state / status method until requirements of this method are properly defined, and move it to Synchronizer - Synchronizer - Simplify `Sync` routine to only sync one block per call, and return useful information. - Use BlockData and BatchData from common - Check that events belong to the expected block hash - In L1Batch, query L1UserTxs from HistoryDB - Fill ERC20 token information - Test AddTokens with test.Client - HistryDB - Use BlockData and BatchData from common - Add `GetAllTokens` method - Uncomment and update GetL1UserTxs (with corresponding tests) - Til - Rename all instances of RegisterToken to AddToken (to follow the smart contract implementation naming) - Use BlockData and BatchData from common - Move testL1CoordinatorTxs and testL2Txs to a separate struct from BatchData in Context - Start Context with BatchNum = 1 (which the protocol defines to be the first batchNum) - In every Batch, set StateRoot and ExitRoot to a non-nil big.Int (zero). - In all L1Txs, if LoadAmount is not used, set it to 0; if Amount is not used, set it to 0; so that no *big.Int is nil. - In L1UserTx, don't set BatchNum, because when L1UserTxs are created and obtained by the synchronizer, the BatchNum is not known yet (it's a synchronizer job to set it) - In L1UserTxs, set `UserOrigin` and set `ToForgeL1TxsNum`.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Allow serving API only via new cli command - Add new command to the cli/node: `serveapi` that alows serving the API just by connecting to the PostgreSQL database. The mode flag should me passed in order to select whether we are connecting to a synchronizer database or a coordinator database. If `coord` is chosen as mode, the coordinator endpoints can be activated in order to allow inserting l2txs and authorizations into the L2DB. Summary of the implementation details - New SQL table with 3 columns (plus `item_id` pk). The table only contains a single row with `item_id` = 1. Columns: - state: historydb.StateAPI in JSON. This is the struct that is served via the `/state` API endpoint. The node will periodically update this struct and store it int he DB. The api server will query it from the DB to serve it. - config: historydb.NodeConfig in JSON. This struct contains node configuration parameters that the API needs to be aware of. It's updated once every time the node starts. - constants: historydb.Constants in JSON. This struct contains all the hermez network constants gathered via the ethereum client by the node. It's written once every time the node starts. - The HistoryDB contains methods to get and update each one of these columns individually. - The HistoryDB contains all methods that query the DB and prepare objects that will appear in the StateAPI endpoint. - The configuration used in for the `serveapi` cli/node command is defined in `config.APIServer`, and is a subset of `node.Config` in order to allow reusing the same configuration file of the node if desired. - A new object is introduced in the api: `StateAPIUpdater`, which contains all the necessary information to update the StateAPI in the DB periodically by the node. - Moved the types `SCConsts`, `SCVariables` and `SCVariablesPtr` from `syncrhonizer` to `common` for convenience.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator to work better under real net - cli / node - Update handler of SIGINT so that after 3 SIGINTs, the process terminates unconditionally - coordinator - Store stats without pointer - In all functions that send a variable via channel, check for context done to avoid deadlock (due to no process reading from the channel, which has no queue) when the node is stopped. - Abstract `canForge` so that it can be used outside of the `Coordinator` - In `canForge` check the blockNumber in current and next slot. - Update tests due to smart contract changes in slot handling, and minimum bid defaults - TxManager - Add consts, vars and stats to allow evaluating `canForge` - Add `canForge` method (not used yet) - Store batch and nonces status (last success and last pending) - Track nonces internally instead of relying on the ethereum node (this is required to work with ganache when there are pending txs) - Handle the (common) case of the receipt not being found after the tx is sent. - Don't start the main loop until we get an initial messae fo the stats and vars (so that in the loop the stats and vars are set to synchronizer values) - When a tx fails, check and discard all the failed transactions before sending the message to stop the pipeline. This will avoid sending consecutive messages of stop the pipeline when multiple txs are detected to be failed consecutively. Also, future txs of the same pipeline after a discarded txs are discarded, and their nonces reused. - Robust handling of nonces: - If geth returns nonce is too low, increase it - If geth returns nonce too hight, decrease it - If geth returns underpriced, increase gas price - If geth returns replace underpriced, increase gas price - Add support for resending transactions after a timeout - Store `BatchInfos` in a queue - Pipeline - When an error is found, stop forging batches and send a message to the coordinator to stop the pipeline with information of the failed batch number so that in a restart, non-failed batches are not repated. - When doing a reset of the stateDB, if possible reset from the local checkpoint instead of resetting from the synchronizer. This allows resetting from a batch that is valid but not yet sent / synced. - Every time a pipeline is started, assign it a number from a counter. This allows the TxManager to ignore batches from stopped pipelines, via a message sent by the coordinator. - Avoid forging when we haven't reached the rollup genesis block number. - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the number of blocks of delay to wait before starting the pipeline when we reach a slot in which we can forge. - When detecting a reorg, only reset the pipeline if the batch from which the pipeline started changed and wasn't sent by us. - Add config parameter `ScheduleBatchBlocksAheadCheck`: ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the forger address is checked to be allowed to forge (apart from checking the next block), used to decide when to stop scheduling new batches (by stopping the pipeline). For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge, the pipeline will be stopped if we can't forge at block 15. This value should be the expected number of blocks it takes between scheduling a batch and having it mined. - Add config parameter `SendBatchBlocksMarginCheck`: SendBatchBlocksMarginCheck is the number of margin blocks ahead in which the coordinator is also checked to be allowed to forge, apart from the next block; used to decide when to stop sending batches to the smart contract. For example, if we are at block 10 and SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the batch will be discarded if we can't forge at block 15. - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout after which a non-mined ethereum transaction will be resent (reusing the nonce) with a newly calculated gas price - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price allowed for ethereum transactions - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces of pending transactions for new replacement transactions. This is useful for testing with Ganache. - Extend BatchInfo with more useful information for debugging - eth / ethereum client - Add necessary methods to create the auth object for transactions manually so that we can set the nonce, gas price, gas limit, etc manually - Update `RollupForgeBatch` to take an auth object as input (so that the coordinator can set parameters manually) - synchronizer - In stats, add `NextSlot` - In stats, store full last batch instead of just last batch number - Instead of calculating a nextSlot from scratch every time, update the current struct (only updating the forger info if we are Synced) - Afer every processed batch, check that the calculated StateDB MTRoot matches the StateRoot found in the forgeBatch event.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Allow serving API only via new cli command - Add new command to the cli/node: `serveapi` that alows serving the API just by connecting to the PostgreSQL database. The mode flag should me passed in order to select whether we are connecting to a synchronizer database or a coordinator database. If `coord` is chosen as mode, the coordinator endpoints can be activated in order to allow inserting l2txs and authorizations into the L2DB. Summary of the implementation details - New SQL table with 3 columns (plus `item_id` pk). The table only contains a single row with `item_id` = 1. Columns: - state: historydb.StateAPI in JSON. This is the struct that is served via the `/state` API endpoint. The node will periodically update this struct and store it int he DB. The api server will query it from the DB to serve it. - config: historydb.NodeConfig in JSON. This struct contains node configuration parameters that the API needs to be aware of. It's updated once every time the node starts. - constants: historydb.Constants in JSON. This struct contains all the hermez network constants gathered via the ethereum client by the node. It's written once every time the node starts. - The HistoryDB contains methods to get and update each one of these columns individually. - The HistoryDB contains all methods that query the DB and prepare objects that will appear in the StateAPI endpoint. - The configuration used in for the `serveapi` cli/node command is defined in `config.APIServer`, and is a subset of `node.Config` in order to allow reusing the same configuration file of the node if desired. - A new object is introduced in the api: `StateAPIUpdater`, which contains all the necessary information to update the StateAPI in the DB periodically by the node. - Moved the types `SCConsts`, `SCVariables` and `SCVariablesPtr` from `syncrhonizer` to `common` for convenience.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
Allow serving API only via new cli command - Add new command to the cli/node: `serveapi` that alows serving the API just by connecting to the PostgreSQL database. The mode flag should me passed in order to select whether we are connecting to a synchronizer database or a coordinator database. If `coord` is chosen as mode, the coordinator endpoints can be activated in order to allow inserting l2txs and authorizations into the L2DB. Summary of the implementation details - New SQL table with 3 columns (plus `item_id` pk). The table only contains a single row with `item_id` = 1. Columns: - state: historydb.StateAPI in JSON. This is the struct that is served via the `/state` API endpoint. The node will periodically update this struct and store it int he DB. The api server will query it from the DB to serve it. - config: historydb.NodeConfig in JSON. This struct contains node configuration parameters that the API needs to be aware of. It's updated once every time the node starts. - constants: historydb.Constants in JSON. This struct contains all the hermez network constants gathered via the ethereum client by the node. It's written once every time the node starts. - The HistoryDB contains methods to get and update each one of these columns individually. - The HistoryDB contains all methods that query the DB and prepare objects that will appear in the StateAPI endpoint. - The configuration used in for the `serveapi` cli/node command is defined in `config.APIServer`, and is a subset of `node.Config` in order to allow reusing the same configuration file of the node if desired. - A new object is introduced in the api: `StateAPIUpdater`, which contains all the necessary information to update the StateAPI in the DB periodically by the node. - Moved the types `SCConsts`, `SCVariables` and `SCVariablesPtr` from `syncrhonizer` to `common` for convenience.
3 years ago
Update coordinator, call all api update functions - Common: - Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition - API: - Add UpdateNetworkInfoBlock to update just block information, to be used when the node is not yet synchronized - Node: - Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with configurable time intervals - Synchronizer: - When mapping events by TxHash, use an array to support the possibility of multiple calls of the same function happening in the same transaction (for example, a smart contract in a single transaction could call withdraw with delay twice, which would generate 2 withdraw events, and 2 deposit events). - In Stats, keep entire LastBlock instead of just the blockNum - In Stats, add lastL1BatchBlock - Test Stats and SCVars - Coordinator: - Enable writing the BatchInfo in every step of the pipeline to disk (with JSON text files) for debugging purposes. - Move the Pipeline functionality from the Coordinator to its own struct (Pipeline) - Implement shouldL1lL2Batch - In TxManager, implement logic to perform several attempts when doing ethereum node RPC calls before considering the error. (Both for calls to forgeBatch and transaction receipt) - In TxManager, reorganize the flow and note the specific points in which actions are made when err != nil - HistoryDB: - Implement GetLastL1BatchBlockNum: returns the blockNum of the latest forged l1Batch, to help the coordinator decide when to forge an L1Batch. - EthereumClient and test.Client: - Update EthBlockByNumber to return the last block when the passed number is -1.
4 years ago
  1. package synchronizer
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "io/ioutil"
  7. "math/big"
  8. "os"
  9. "sort"
  10. "testing"
  11. "time"
  12. ethCommon "github.com/ethereum/go-ethereum/common"
  13. "github.com/hermeznetwork/hermez-node/common"
  14. dbUtils "github.com/hermeznetwork/hermez-node/db"
  15. "github.com/hermeznetwork/hermez-node/db/historydb"
  16. "github.com/hermeznetwork/hermez-node/db/l2db"
  17. "github.com/hermeznetwork/hermez-node/db/statedb"
  18. "github.com/hermeznetwork/hermez-node/eth"
  19. "github.com/hermeznetwork/hermez-node/test"
  20. "github.com/hermeznetwork/hermez-node/test/til"
  21. "github.com/jinzhu/copier"
  22. "github.com/stretchr/testify/assert"
  23. "github.com/stretchr/testify/require"
  24. )
  25. var tokenConsts = map[common.TokenID]eth.ERC20Consts{}
  26. type timer struct {
  27. time int64
  28. }
  29. func (t *timer) Time() int64 {
  30. currentTime := t.time
  31. t.time++
  32. return currentTime
  33. }
  34. func accountsCmp(accounts []common.Account) func(i, j int) bool {
  35. return func(i, j int) bool { return accounts[i].Idx < accounts[j].Idx }
  36. }
  37. // Check Sync output and HistoryDB state against expected values generated by
  38. // til
  39. func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block,
  40. syncBlock *common.BlockData) {
  41. // Check Blocks
  42. dbBlocks, err := s.historyDB.GetAllBlocks()
  43. require.NoError(t, err)
  44. dbBlocks = dbBlocks[1:] // ignore block 0, added by default in the DB
  45. assert.Equal(t, blockNum, len(dbBlocks))
  46. assert.Equal(t, int64(blockNum), dbBlocks[blockNum-1].Num)
  47. assert.NotEqual(t, dbBlocks[blockNum-1].Hash, dbBlocks[blockNum-2].Hash)
  48. assert.Greater(t, dbBlocks[blockNum-1].Timestamp.Unix(), dbBlocks[blockNum-2].Timestamp.Unix())
  49. // Check Tokens
  50. assert.Equal(t, len(block.Rollup.AddedTokens), len(syncBlock.Rollup.AddedTokens))
  51. dbTokens, err := s.historyDB.GetAllTokens()
  52. require.NoError(t, err)
  53. dbTokens = dbTokens[1:] // ignore token 0, added by default in the DB
  54. for i, token := range block.Rollup.AddedTokens {
  55. dbToken := dbTokens[i]
  56. syncToken := syncBlock.Rollup.AddedTokens[i]
  57. assert.Equal(t, block.Block.Num, syncToken.EthBlockNum)
  58. assert.Equal(t, token.TokenID, syncToken.TokenID)
  59. assert.Equal(t, token.EthAddr, syncToken.EthAddr)
  60. tokenConst := tokenConsts[token.TokenID]
  61. assert.Equal(t, tokenConst.Name, syncToken.Name)
  62. assert.Equal(t, tokenConst.Symbol, syncToken.Symbol)
  63. assert.Equal(t, tokenConst.Decimals, syncToken.Decimals)
  64. var tokenCpy historydb.TokenWithUSD
  65. //nolint:gosec
  66. require.Nil(t, copier.Copy(&tokenCpy, &token)) // copy common.Token to historydb.TokenWithUSD
  67. require.Nil(t, copier.Copy(&tokenCpy, &tokenConst)) // copy common.Token to historydb.TokenWithUSD
  68. tokenCpy.ItemID = dbToken.ItemID // we don't care about ItemID
  69. assert.Equal(t, tokenCpy, dbToken)
  70. }
  71. // Check submitted L1UserTxs
  72. assert.Equal(t, len(block.Rollup.L1UserTxs), len(syncBlock.Rollup.L1UserTxs))
  73. dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs()
  74. require.NoError(t, err)
  75. // Ignore BatchNum in syncBlock.L1UserTxs because this value is set by
  76. // the HistoryDB. Also ignore EffectiveAmount & EffectiveDepositAmount
  77. // because this value is set by StateDB.ProcessTxs.
  78. for i := range syncBlock.Rollup.L1UserTxs {
  79. syncBlock.Rollup.L1UserTxs[i].BatchNum = block.Rollup.L1UserTxs[i].BatchNum
  80. assert.Nil(t, syncBlock.Rollup.L1UserTxs[i].EffectiveDepositAmount)
  81. assert.Nil(t, syncBlock.Rollup.L1UserTxs[i].EffectiveAmount)
  82. }
  83. assert.Equal(t, block.Rollup.L1UserTxs, syncBlock.Rollup.L1UserTxs)
  84. for _, tx := range block.Rollup.L1UserTxs {
  85. var dbTx *common.L1Tx
  86. // Find tx in DB output
  87. for _, _dbTx := range dbL1UserTxs {
  88. if *tx.ToForgeL1TxsNum == *_dbTx.ToForgeL1TxsNum &&
  89. tx.Position == _dbTx.Position {
  90. dbTx = new(common.L1Tx)
  91. *dbTx = _dbTx
  92. // NOTE: Overwrite EffectiveFromIdx in L1UserTx
  93. // from db because we don't expect
  94. // EffectiveFromIdx to be set yet, as this tx
  95. // is not in yet forged
  96. dbTx.EffectiveFromIdx = 0
  97. break
  98. }
  99. }
  100. // If the tx has been forged in this block, this will be
  101. // reflected in the DB, and so the Effective values will be
  102. // already set
  103. if dbTx.BatchNum != nil {
  104. tx.EffectiveAmount = tx.Amount
  105. tx.EffectiveDepositAmount = tx.DepositAmount
  106. }
  107. assert.Equal(t, &tx, dbTx) //nolint:gosec
  108. }
  109. // Check Batches
  110. assert.Equal(t, len(block.Rollup.Batches), len(syncBlock.Rollup.Batches))
  111. dbBatches, err := s.historyDB.GetAllBatches()
  112. require.NoError(t, err)
  113. dbL1CoordinatorTxs, err := s.historyDB.GetAllL1CoordinatorTxs()
  114. require.NoError(t, err)
  115. dbL2Txs, err := s.historyDB.GetAllL2Txs()
  116. require.NoError(t, err)
  117. dbExits, err := s.historyDB.GetAllExits()
  118. require.NoError(t, err)
  119. // dbL1CoordinatorTxs := []common.L1Tx{}
  120. for i, batch := range block.Rollup.Batches {
  121. var dbBatch *common.Batch
  122. // Find batch in DB output
  123. for _, _dbBatch := range dbBatches {
  124. if batch.Batch.BatchNum == _dbBatch.BatchNum {
  125. dbBatch = new(common.Batch)
  126. *dbBatch = _dbBatch
  127. break
  128. }
  129. }
  130. syncBatch := syncBlock.Rollup.Batches[i]
  131. // We don't care about TotalFeesUSD. Use the syncBatch that
  132. // has a TotalFeesUSD inserted by the HistoryDB
  133. batch.Batch.TotalFeesUSD = syncBatch.Batch.TotalFeesUSD
  134. assert.Equal(t, batch.CreatedAccounts, syncBatch.CreatedAccounts)
  135. batch.Batch.NumAccounts = len(batch.CreatedAccounts)
  136. // Test field by field to facilitate debugging of errors
  137. assert.Equal(t, len(batch.L1UserTxs), len(syncBatch.L1UserTxs))
  138. // NOTE: EffectiveFromIdx is set to til L1UserTxs in
  139. // `FillBlocksForgedL1UserTxs` function
  140. for j := range syncBatch.L1UserTxs {
  141. assert.NotEqual(t, 0, syncBatch.L1UserTxs[j].EffectiveFromIdx)
  142. }
  143. assert.Equal(t, batch.L1UserTxs, syncBatch.L1UserTxs)
  144. // NOTE: EffectiveFromIdx is set to til L1CoordinatorTxs in
  145. // `FillBlocksExtra` function
  146. for j := range syncBatch.L1CoordinatorTxs {
  147. assert.NotEqual(t, 0, syncBatch.L1CoordinatorTxs[j].EffectiveFromIdx)
  148. }
  149. assert.Equal(t, batch.L1CoordinatorTxs, syncBatch.L1CoordinatorTxs)
  150. assert.Equal(t, batch.L2Txs, syncBatch.L2Txs)
  151. // In exit tree, we only check AccountIdx and Balance, because
  152. // it's what we have precomputed before.
  153. require.Equal(t, len(batch.ExitTree), len(syncBatch.ExitTree))
  154. for j := range batch.ExitTree {
  155. exit := &batch.ExitTree[j]
  156. assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx)
  157. assert.Equal(t, exit.Balance, syncBatch.ExitTree[j].Balance)
  158. *exit = syncBatch.ExitTree[j]
  159. }
  160. assert.Equal(t, batch.Batch, syncBatch.Batch)
  161. // Ignore updated accounts
  162. syncBatch.UpdatedAccounts = nil
  163. assert.Equal(t, batch, syncBatch)
  164. assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
  165. // Check forged L1UserTxs from DB, and check effective values
  166. // in sync output
  167. for j, tx := range batch.L1UserTxs {
  168. var dbTx *common.L1Tx
  169. // Find tx in DB output
  170. for _, _dbTx := range dbL1UserTxs {
  171. if *tx.BatchNum == *_dbTx.BatchNum &&
  172. tx.Position == _dbTx.Position {
  173. dbTx = new(common.L1Tx)
  174. *dbTx = _dbTx
  175. break
  176. }
  177. }
  178. assert.Equal(t, &tx, dbTx) //nolint:gosec
  179. syncTx := &syncBlock.Rollup.Batches[i].L1UserTxs[j]
  180. assert.Equal(t, syncTx.DepositAmount, syncTx.EffectiveDepositAmount)
  181. assert.Equal(t, syncTx.Amount, syncTx.EffectiveAmount)
  182. }
  183. // Check L1CoordinatorTxs from DB
  184. for _, tx := range batch.L1CoordinatorTxs {
  185. var dbTx *common.L1Tx
  186. // Find tx in DB output
  187. for _, _dbTx := range dbL1CoordinatorTxs {
  188. if *tx.BatchNum == *_dbTx.BatchNum &&
  189. tx.Position == _dbTx.Position {
  190. dbTx = new(common.L1Tx)
  191. *dbTx = _dbTx
  192. break
  193. }
  194. }
  195. assert.Equal(t, &tx, dbTx) //nolint:gosec
  196. }
  197. // Check L2Txs from DB
  198. for _, tx := range batch.L2Txs {
  199. var dbTx *common.L2Tx
  200. // Find tx in DB output
  201. for _, _dbTx := range dbL2Txs {
  202. if tx.BatchNum == _dbTx.BatchNum &&
  203. tx.Position == _dbTx.Position {
  204. dbTx = new(common.L2Tx)
  205. *dbTx = _dbTx
  206. break
  207. }
  208. }
  209. assert.Equal(t, &tx, dbTx) //nolint:gosec
  210. }
  211. // Check Exits from DB
  212. for _, exit := range batch.ExitTree {
  213. var dbExit *common.ExitInfo
  214. // Find exit in DB output
  215. for _, _dbExit := range dbExits {
  216. if exit.BatchNum == _dbExit.BatchNum &&
  217. exit.AccountIdx == _dbExit.AccountIdx {
  218. dbExit = new(common.ExitInfo)
  219. *dbExit = _dbExit
  220. break
  221. }
  222. }
  223. // Compare MerkleProof in JSON because unmarshaled 0
  224. // big.Int leaves the internal big.Int array at nil,
  225. // and gives trouble when comparing big.Int with
  226. // internal big.Int array != nil but empty.
  227. mtp, err := json.Marshal(exit.MerkleProof)
  228. require.NoError(t, err)
  229. dbMtp, err := json.Marshal(dbExit.MerkleProof)
  230. require.NoError(t, err)
  231. assert.Equal(t, mtp, dbMtp)
  232. dbExit.MerkleProof = exit.MerkleProof
  233. assert.Equal(t, &exit, dbExit) //nolint:gosec
  234. }
  235. }
  236. // Compare accounts from HistoryDB with StateDB (they should match)
  237. dbAccounts, err := s.historyDB.GetAllAccounts()
  238. require.NoError(t, err)
  239. sdbAccounts, err := s.stateDB.TestGetAccounts()
  240. require.NoError(t, err)
  241. assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts)
  242. }
  243. func assertEqualAccountsHistoryDBStateDB(t *testing.T, hdbAccs, sdbAccs []common.Account) {
  244. assert.Equal(t, len(hdbAccs), len(sdbAccs))
  245. sort.SliceStable(hdbAccs, accountsCmp(hdbAccs))
  246. sort.SliceStable(sdbAccs, accountsCmp(sdbAccs))
  247. for i := range hdbAccs {
  248. hdbAcc := hdbAccs[i]
  249. sdbAcc := sdbAccs[i]
  250. assert.Equal(t, hdbAcc.Idx, sdbAcc.Idx)
  251. assert.Equal(t, hdbAcc.TokenID, sdbAcc.TokenID)
  252. assert.Equal(t, hdbAcc.EthAddr, sdbAcc.EthAddr)
  253. assert.Equal(t, hdbAcc.BJJ, sdbAcc.BJJ)
  254. }
  255. }
  256. // ethAddTokens adds the tokens from the blocks to the blockchain
  257. func ethAddTokens(blocks []common.BlockData, client *test.Client) {
  258. for _, block := range blocks {
  259. for _, token := range block.Rollup.AddedTokens {
  260. consts := eth.ERC20Consts{
  261. Name: fmt.Sprintf("Token %d", token.TokenID),
  262. Symbol: fmt.Sprintf("TK%d", token.TokenID),
  263. Decimals: 18,
  264. }
  265. tokenConsts[token.TokenID] = consts
  266. client.CtlAddERC20(token.EthAddr, consts)
  267. }
  268. }
  269. }
  270. var chainID uint16 = 0
  271. var deleteme = []string{}
  272. func TestMain(m *testing.M) {
  273. exitVal := m.Run()
  274. for _, dir := range deleteme {
  275. if err := os.RemoveAll(dir); err != nil {
  276. panic(err)
  277. }
  278. }
  279. os.Exit(exitVal)
  280. }
  281. func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db.L2DB) {
  282. // Int State DB
  283. dir, err := ioutil.TempDir("", "tmpdb")
  284. require.NoError(t, err)
  285. deleteme = append(deleteme, dir)
  286. stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
  287. Type: statedb.TypeSynchronizer, NLevels: 32})
  288. require.NoError(t, err)
  289. // Init History DB
  290. pass := os.Getenv("POSTGRES_PASS")
  291. db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
  292. require.NoError(t, err)
  293. historyDB := historydb.NewHistoryDB(db, db, nil)
  294. // Clear DB
  295. test.WipeDB(historyDB.DB())
  296. // Init L2 DB
  297. l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
  298. return stateDB, historyDB, l2DB
  299. }
  300. func newBigInt(s string) *big.Int {
  301. v, ok := new(big.Int).SetString(s, 10)
  302. if !ok {
  303. panic(fmt.Errorf("Can't set big.Int from %s", s))
  304. }
  305. return v
  306. }
  307. func TestSyncGeneral(t *testing.T) {
  308. //
  309. // Setup
  310. //
  311. stateDB, historyDB, l2DB := newTestModules(t)
  312. // Init eth client
  313. var timer timer
  314. clientSetup := test.NewClientSetupExample()
  315. clientSetup.ChainID = big.NewInt(int64(chainID))
  316. bootCoordAddr := clientSetup.AuctionVariables.BootCoordinator
  317. client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
  318. // Create Synchronizer
  319. s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{
  320. StatsRefreshPeriod: 0 * time.Second,
  321. })
  322. require.NoError(t, err)
  323. ctx := context.Background()
  324. //
  325. // First Sync from an initial state
  326. //
  327. stats := s.Stats()
  328. assert.Equal(t, false, stats.Synced())
  329. // Test Sync for rollup genesis block
  330. syncBlock, discards, err := s.Sync(ctx, nil)
  331. require.NoError(t, err)
  332. require.Nil(t, discards)
  333. require.NotNil(t, syncBlock)
  334. require.Nil(t, syncBlock.Rollup.Vars)
  335. require.Nil(t, syncBlock.Auction.Vars)
  336. require.Nil(t, syncBlock.WDelayer.Vars)
  337. assert.Equal(t, int64(1), syncBlock.Block.Num)
  338. stats = s.Stats()
  339. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  340. assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
  341. assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
  342. vars := s.SCVars()
  343. assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
  344. assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
  345. assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
  346. dbBlocks, err := s.historyDB.GetAllBlocks()
  347. require.NoError(t, err)
  348. assert.Equal(t, 2, len(dbBlocks))
  349. assert.Equal(t, int64(1), dbBlocks[1].Num)
  350. // Sync again and expect no new blocks
  351. syncBlock, discards, err = s.Sync(ctx, nil)
  352. require.NoError(t, err)
  353. require.Nil(t, discards)
  354. require.Nil(t, syncBlock)
  355. //
  356. // Generate blockchain and smart contract data, and fill the test smart contracts
  357. //
  358. // Generate blockchain data with til
  359. set1 := `
  360. Type: Blockchain
  361. AddToken(1)
  362. AddToken(2)
  363. AddToken(3)
  364. CreateAccountDeposit(1) C: 2000 // Idx=256+2=258
  365. CreateAccountDeposit(2) A: 2000 // Idx=256+3=259
  366. CreateAccountDeposit(1) D: 500 // Idx=256+4=260
  367. CreateAccountDeposit(2) B: 500 // Idx=256+5=261
  368. CreateAccountDeposit(2) C: 500 // Idx=256+6=262
  369. CreateAccountCoordinator(1) A // Idx=256+0=256
  370. CreateAccountCoordinator(1) B // Idx=256+1=257
  371. > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{5}
  372. > batchL1 // forge defined L1UserTxs{5}, freeze L1UserTxs{nil}
  373. > block // blockNum=2
  374. CreateAccountDepositTransfer(1) E-A: 1000, 200 // Idx=256+7=263
  375. ForceTransfer(1) C-B: 80
  376. ForceExit(1) A: 100
  377. ForceExit(1) B: 80
  378. ForceTransfer(1) A-D: 100
  379. Transfer(1) C-A: 100 (126)
  380. Exit(1) C: 50 (100)
  381. Exit(1) D: 30 (100)
  382. > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{3}
  383. > batchL1 // forge L1UserTxs{3}, freeze defined L1UserTxs{nil}
  384. > block // blockNum=3
  385. `
  386. tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
  387. tilCfgExtra := til.ConfigExtra{
  388. BootCoordAddr: bootCoordAddr,
  389. CoordUser: "A",
  390. }
  391. blocks, err := tc.GenerateBlocks(set1)
  392. require.NoError(t, err)
  393. // Sanity check
  394. require.Equal(t, 2, len(blocks))
  395. // blocks 0 (blockNum=2)
  396. i := 0
  397. require.Equal(t, 2, int(blocks[i].Block.Num))
  398. require.Equal(t, 3, len(blocks[i].Rollup.AddedTokens))
  399. require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
  400. require.Equal(t, 2, len(blocks[i].Rollup.Batches))
  401. require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
  402. // Set StateRoots for batches manually (til doesn't set it)
  403. blocks[i].Rollup.Batches[0].Batch.StateRoot =
  404. newBigInt("11432094872416618651837327395264042968926668786266585816625577088890451620254")
  405. blocks[i].Rollup.Batches[1].Batch.StateRoot =
  406. newBigInt("16914212635847451457076355431350059348585556180740555407203882688922702410093")
  407. // blocks 1 (blockNum=3)
  408. i = 1
  409. require.Equal(t, 3, int(blocks[i].Block.Num))
  410. require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
  411. require.Equal(t, 2, len(blocks[i].Rollup.Batches))
  412. require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
  413. // Set StateRoots for batches manually (til doesn't set it)
  414. blocks[i].Rollup.Batches[0].Batch.StateRoot =
  415. newBigInt("13535760140937349829640752733057594576151546047374619177689224612061148090678")
  416. blocks[i].Rollup.Batches[1].Batch.StateRoot =
  417. newBigInt("19413739476363469870744893742469056615496274423228302914851564791727474664804")
  418. // Generate extra required data
  419. ethAddTokens(blocks, client)
  420. err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
  421. require.NoError(t, err)
  422. tc.FillBlocksL1UserTxsBatchNum(blocks)
  423. err = tc.FillBlocksForgedL1UserTxs(blocks)
  424. require.NoError(t, err)
  425. // Add block data to the smart contracts
  426. err = client.CtlAddBlocks(blocks)
  427. require.NoError(t, err)
  428. //
  429. // Sync to synchronize the current state from the test smart contracts,
  430. // and check the outcome
  431. //
  432. // Block 2
  433. syncBlock, discards, err = s.Sync(ctx, nil)
  434. require.NoError(t, err)
  435. require.Nil(t, discards)
  436. require.NotNil(t, syncBlock)
  437. assert.Nil(t, syncBlock.Rollup.Vars)
  438. assert.Nil(t, syncBlock.Auction.Vars)
  439. assert.Nil(t, syncBlock.WDelayer.Vars)
  440. assert.Equal(t, int64(2), syncBlock.Block.Num)
  441. stats = s.Stats()
  442. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  443. assert.Equal(t, int64(3), stats.Eth.LastBlock.Num)
  444. assert.Equal(t, int64(2), stats.Sync.LastBlock.Num)
  445. checkSyncBlock(t, s, 2, &blocks[0], syncBlock)
  446. // Block 3
  447. syncBlock, discards, err = s.Sync(ctx, nil)
  448. assert.NoError(t, err)
  449. require.NoError(t, err)
  450. require.Nil(t, discards)
  451. require.NotNil(t, syncBlock)
  452. assert.Nil(t, syncBlock.Rollup.Vars)
  453. assert.Nil(t, syncBlock.Auction.Vars)
  454. assert.Nil(t, syncBlock.WDelayer.Vars)
  455. assert.Equal(t, int64(3), syncBlock.Block.Num)
  456. stats = s.Stats()
  457. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  458. assert.Equal(t, int64(3), stats.Eth.LastBlock.Num)
  459. assert.Equal(t, int64(3), stats.Sync.LastBlock.Num)
  460. checkSyncBlock(t, s, 3, &blocks[1], syncBlock)
  461. // Block 4
  462. // Generate 2 withdraws manually
  463. _, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256,
  464. big.NewInt(100), []*big.Int{}, true)
  465. require.NoError(t, err)
  466. _, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258,
  467. big.NewInt(50), []*big.Int{}, false)
  468. require.NoError(t, err)
  469. client.CtlMineBlock()
  470. syncBlock, discards, err = s.Sync(ctx, nil)
  471. require.NoError(t, err)
  472. require.Nil(t, discards)
  473. require.NotNil(t, syncBlock)
  474. assert.Nil(t, syncBlock.Rollup.Vars)
  475. assert.Nil(t, syncBlock.Auction.Vars)
  476. assert.Nil(t, syncBlock.WDelayer.Vars)
  477. assert.Equal(t, int64(4), syncBlock.Block.Num)
  478. stats = s.Stats()
  479. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  480. assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
  481. assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
  482. vars = s.SCVars()
  483. assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
  484. assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
  485. assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
  486. dbExits, err := s.historyDB.GetAllExits()
  487. require.NoError(t, err)
  488. foundA1, foundC1 := false, false
  489. for _, exit := range dbExits {
  490. if exit.AccountIdx == 256 && exit.BatchNum == 4 {
  491. foundA1 = true
  492. assert.Equal(t, int64(4), *exit.InstantWithdrawn)
  493. }
  494. if exit.AccountIdx == 258 && exit.BatchNum == 3 {
  495. foundC1 = true
  496. assert.Equal(t, int64(4), *exit.DelayedWithdrawRequest)
  497. }
  498. }
  499. assert.True(t, foundA1)
  500. assert.True(t, foundC1)
  501. // Block 5
  502. // Update variables manually
  503. rollupVars, auctionVars, wDelayerVars, err := s.historyDB.GetSCVars()
  504. require.NoError(t, err)
  505. rollupVars.ForgeL1L2BatchTimeout = 42
  506. _, err = client.RollupUpdateForgeL1L2BatchTimeout(rollupVars.ForgeL1L2BatchTimeout)
  507. require.NoError(t, err)
  508. auctionVars.OpenAuctionSlots = 17
  509. _, err = client.AuctionSetOpenAuctionSlots(auctionVars.OpenAuctionSlots)
  510. require.NoError(t, err)
  511. wDelayerVars.WithdrawalDelay = 99
  512. _, err = client.WDelayerChangeWithdrawalDelay(wDelayerVars.WithdrawalDelay)
  513. require.NoError(t, err)
  514. client.CtlMineBlock()
  515. syncBlock, discards, err = s.Sync(ctx, nil)
  516. require.NoError(t, err)
  517. require.Nil(t, discards)
  518. require.NotNil(t, syncBlock)
  519. assert.NotNil(t, syncBlock.Rollup.Vars)
  520. assert.NotNil(t, syncBlock.Auction.Vars)
  521. assert.NotNil(t, syncBlock.WDelayer.Vars)
  522. assert.Equal(t, int64(5), syncBlock.Block.Num)
  523. stats = s.Stats()
  524. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  525. assert.Equal(t, int64(5), stats.Eth.LastBlock.Num)
  526. assert.Equal(t, int64(5), stats.Sync.LastBlock.Num)
  527. vars = s.SCVars()
  528. assert.NotEqual(t, clientSetup.RollupVariables, vars.Rollup)
  529. assert.NotEqual(t, clientSetup.AuctionVariables, vars.Auction)
  530. assert.NotEqual(t, clientSetup.WDelayerVariables, vars.WDelayer)
  531. dbRollupVars, dbAuctionVars, dbWDelayerVars, err := s.historyDB.GetSCVars()
  532. require.NoError(t, err)
  533. // Set EthBlockNum for Vars to the blockNum in which they were updated (should be 5)
  534. rollupVars.EthBlockNum = syncBlock.Block.Num
  535. auctionVars.EthBlockNum = syncBlock.Block.Num
  536. wDelayerVars.EthBlockNum = syncBlock.Block.Num
  537. assert.Equal(t, rollupVars, dbRollupVars)
  538. assert.Equal(t, auctionVars, dbAuctionVars)
  539. assert.Equal(t, wDelayerVars, dbWDelayerVars)
  540. //
  541. // Reorg test
  542. //
  543. // Redo blocks 2-5 (as a reorg) only leaving:
  544. // - 2 create account transactions
  545. // - 2 add tokens
  546. // We add a 6th block so that the synchronizer can detect the reorg
  547. set2 := `
  548. Type: Blockchain
  549. AddToken(1)
  550. AddToken(2)
  551. CreateAccountDeposit(1) C: 2000 // Idx=256+1=257
  552. CreateAccountCoordinator(1) A // Idx=256+0=256
  553. > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{1}
  554. > batchL1 // forge defined L1UserTxs{1}, freeze L1UserTxs{nil}
  555. > block // blockNum=2
  556. > block // blockNum=3
  557. > block // blockNum=4
  558. > block // blockNum=5
  559. > block // blockNum=6
  560. `
  561. tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
  562. tilCfgExtra = til.ConfigExtra{
  563. BootCoordAddr: bootCoordAddr,
  564. CoordUser: "A",
  565. }
  566. blocks, err = tc.GenerateBlocks(set2)
  567. require.NoError(t, err)
  568. // Set StateRoots for batches manually (til doesn't set it)
  569. blocks[0].Rollup.Batches[0].Batch.StateRoot =
  570. newBigInt("14095767774967159269372103336737817266053275274769794195030162905513860477094")
  571. blocks[0].Rollup.Batches[1].Batch.StateRoot =
  572. newBigInt("2095674348545184674850951945506660952512376416769035169971006930847780339914")
  573. for i := 0; i < 4; i++ {
  574. client.CtlRollback()
  575. }
  576. block := client.CtlLastBlock()
  577. require.Equal(t, int64(1), block.Num)
  578. // Generate extra required data
  579. ethAddTokens(blocks, client)
  580. err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
  581. require.NoError(t, err)
  582. tc.FillBlocksL1UserTxsBatchNum(blocks)
  583. // Add block data to the smart contracts
  584. err = client.CtlAddBlocks(blocks)
  585. require.NoError(t, err)
  586. // First sync detects the reorg and discards 4 blocks
  587. syncBlock, discards, err = s.Sync(ctx, nil)
  588. require.NoError(t, err)
  589. expetedDiscards := int64(4)
  590. require.Equal(t, &expetedDiscards, discards)
  591. require.Nil(t, syncBlock)
  592. stats = s.Stats()
  593. assert.Equal(t, false, stats.Synced())
  594. assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
  595. vars = s.SCVars()
  596. assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
  597. assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
  598. assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
  599. // At this point, the DB only has data up to block 1
  600. dbBlock, err := s.historyDB.GetLastBlock()
  601. require.NoError(t, err)
  602. assert.Equal(t, int64(1), dbBlock.Num)
  603. // Accounts in HistoryDB and StateDB must be empty
  604. dbAccounts, err := s.historyDB.GetAllAccounts()
  605. require.NoError(t, err)
  606. sdbAccounts, err := s.stateDB.TestGetAccounts()
  607. require.NoError(t, err)
  608. assert.Equal(t, 0, len(dbAccounts))
  609. assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts)
  610. // Sync blocks 2-6
  611. for i := 0; i < 5; i++ {
  612. syncBlock, discards, err = s.Sync(ctx, nil)
  613. require.NoError(t, err)
  614. require.Nil(t, discards)
  615. require.NotNil(t, syncBlock)
  616. assert.Nil(t, syncBlock.Rollup.Vars)
  617. assert.Nil(t, syncBlock.Auction.Vars)
  618. assert.Nil(t, syncBlock.WDelayer.Vars)
  619. assert.Equal(t, int64(2+i), syncBlock.Block.Num)
  620. stats = s.Stats()
  621. assert.Equal(t, int64(1), stats.Eth.FirstBlockNum)
  622. assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
  623. assert.Equal(t, int64(2+i), stats.Sync.LastBlock.Num)
  624. if i == 4 {
  625. assert.Equal(t, true, stats.Synced())
  626. } else {
  627. assert.Equal(t, false, stats.Synced())
  628. }
  629. vars = s.SCVars()
  630. assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
  631. assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
  632. assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
  633. }
  634. dbBlock, err = s.historyDB.GetLastBlock()
  635. require.NoError(t, err)
  636. assert.Equal(t, int64(6), dbBlock.Num)
  637. // Accounts in HistoryDB and StateDB is only 2 entries
  638. dbAccounts, err = s.historyDB.GetAllAccounts()
  639. require.NoError(t, err)
  640. sdbAccounts, err = s.stateDB.TestGetAccounts()
  641. require.NoError(t, err)
  642. assert.Equal(t, 2, len(dbAccounts))
  643. assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts)
  644. }
  645. func TestSyncForgerCommitment(t *testing.T) {
  646. stateDB, historyDB, l2DB := newTestModules(t)
  647. // Init eth client
  648. var timer timer
  649. clientSetup := test.NewClientSetupExample()
  650. clientSetup.ChainID = big.NewInt(int64(chainID))
  651. clientSetup.AuctionConstants.GenesisBlockNum = 2
  652. clientSetup.AuctionConstants.BlocksPerSlot = 4
  653. clientSetup.AuctionVariables.SlotDeadline = 2
  654. bootCoordAddr := clientSetup.AuctionVariables.BootCoordinator
  655. client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
  656. // Create Synchronizer
  657. s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{
  658. StatsRefreshPeriod: 0 * time.Second,
  659. })
  660. require.NoError(t, err)
  661. ctx := context.Background()
  662. set := `
  663. Type: Blockchain
  664. // Slot = 0
  665. > block // 2
  666. > block // 3
  667. > block // 4
  668. > block // 5
  669. // Slot = 1
  670. > block // 6
  671. > batch
  672. > block // 7
  673. > block // 8
  674. > block // 9
  675. // Slot = 2
  676. > block // 10
  677. > block // 11
  678. > batch
  679. > block // 12
  680. > block // 13
  681. `
  682. // For each block, true when the slot that belongs to the following
  683. // block has forgerCommitment
  684. commitment := map[int64]bool{
  685. 2: false,
  686. 3: false,
  687. 4: false,
  688. 5: false,
  689. 6: false,
  690. 7: true,
  691. 8: true,
  692. 9: false,
  693. 10: false,
  694. 11: false,
  695. 12: false,
  696. 13: false,
  697. }
  698. tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
  699. blocks, err := tc.GenerateBlocks(set)
  700. assert.NoError(t, err)
  701. tilCfgExtra := til.ConfigExtra{
  702. BootCoordAddr: bootCoordAddr,
  703. CoordUser: "A",
  704. }
  705. err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
  706. require.NoError(t, err)
  707. // for i := range blocks {
  708. // for j := range blocks[i].Rollup.Batches {
  709. // blocks[i].Rollup.Batches[j].Batch.SlotNum = int64(i) / 4
  710. // }
  711. // }
  712. // be in sync
  713. for {
  714. syncBlock, discards, err := s.Sync(ctx, nil)
  715. require.NoError(t, err)
  716. require.Nil(t, discards)
  717. if syncBlock == nil {
  718. break
  719. }
  720. }
  721. stats := s.Stats()
  722. require.Equal(t, int64(1), stats.Sync.LastBlock.Num)
  723. // Store ForgerComitmnent observed at every block by the live synchronizer
  724. syncCommitment := map[int64]bool{}
  725. // Store ForgerComitmnent observed at every block by a synchronizer that is restarted
  726. syncRestartedCommitment := map[int64]bool{}
  727. for _, block := range blocks {
  728. // Add block data to the smart contracts
  729. err = client.CtlAddBlocks([]common.BlockData{block})
  730. require.NoError(t, err)
  731. syncBlock, discards, err := s.Sync(ctx, nil)
  732. require.NoError(t, err)
  733. require.Nil(t, discards)
  734. if syncBlock == nil {
  735. break
  736. }
  737. stats := s.Stats()
  738. require.True(t, stats.Synced())
  739. syncCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment
  740. s2, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{
  741. StatsRefreshPeriod: 0 * time.Second,
  742. })
  743. require.NoError(t, err)
  744. stats = s2.Stats()
  745. require.True(t, stats.Synced())
  746. syncRestartedCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment
  747. }
  748. assert.Equal(t, commitment, syncCommitment)
  749. assert.Equal(t, commitment, syncRestartedCommitment)
  750. }