Integrate purger to node

- Common
	- Add `IdxNonce` type used to track nonces in accounts to invalidate
	  l2txs in the pool
- Config
	- Update coordinator config will all the new configuration parameters
	  used in the coordinator
- Coordinator
	- Introduce the `Purger` to track how often to purge and do the job when
	  needed according to a configuration.
	- Implement the methods to invalidate l2txs transactions due to l2txs
	  selection in batches.  For now these functions are not used in favour
	  of the `Purger` methods, which check ALL the l2txs in the pool.
	- Call Invalidation and Purging methods of the purger both when the
	  node is forging (in the pipeline when starting a new batch) and when
	  the node is not forging (in coordinator when being notified about a
	  new synced block)
- L2DB:
	- Implement `GetPendingUniqueFromIdxs` to get all the unique idxs from
	  pending transactions (used to get their nonces and then invalidate
	  txs)
	- Redo `CheckNonces` with a single SQL query and using `common.IdxNonce`
	  instead of `common.Account`
- StateDB:
	- Expose GetIdx to check errors when invalidating pool txs
- Synchronizer:
	- Test forged L1UserTxs processed by TxProcessor
	- Improve checks of Effective values
- TxSelector:
	- Expose the internal LocalStateDB in order to check account nonces in
	  the coordinator when not forging.
This commit is contained in:
Eduard S
2020-12-03 14:58:26 +01:00
parent 8de7fe537a
commit 900d1fb6ce
15 changed files with 402 additions and 101 deletions

View File

@@ -170,8 +170,19 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
coord, err = coordinator.NewCoordinator(
coordinator.Config{
ForgerAddress: coordCfg.ForgerAddress,
ConfirmBlocks: coordCfg.ConfirmBlocks,
ForgerAddress: coordCfg.ForgerAddress,
ConfirmBlocks: coordCfg.ConfirmBlocks,
L1BatchTimeoutPerc: coordCfg.L1BatchTimeoutPerc,
EthClientAttempts: coordCfg.EthClient.Attempts,
EthClientAttemptsDelay: coordCfg.EthClient.AttemptsDelay.Duration,
TxManagerCheckInterval: coordCfg.EthClient.IntervalCheckLoop.Duration,
DebugBatchPath: coordCfg.Debug.BatchPath,
Purger: coordinator.PurgerCfg{
PurgeBatchDelay: coordCfg.L2DB.PurgeBatchDelay,
InvalidateBatchDelay: coordCfg.L2DB.InvalidateBatchDelay,
PurgeBlockDelay: coordCfg.L2DB.PurgeBlockDelay,
InvalidateBlockDelay: coordCfg.L2DB.InvalidateBlockDelay,
},
},
historyDB,
l2DB,
@@ -327,7 +338,9 @@ func (n *Node) syncLoopFn(lastBlock *common.Block) (*common.Block, time.Duration
// case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
if n.mode == ModeCoordinator {
n.coord.SendMsg(coordinator.MsgSyncReorg{})
n.coord.SendMsg(coordinator.MsgSyncReorg{
Stats: *stats,
})
}
if n.nodeAPI != nil {
rollup, auction, wDelayer := n.sync.SCVars()
@@ -351,8 +364,9 @@ func (n *Node) syncLoopFn(lastBlock *common.Block) (*common.Block, time.Duration
WDelayer: blockData.WDelayer.Vars,
})
}
n.coord.SendMsg(coordinator.MsgSyncStats{
Stats: *stats,
n.coord.SendMsg(coordinator.MsgSyncBlock{
Stats: *stats,
Batches: blockData.Rollup.Batches,
})
}
if n.nodeAPI != nil {