Update coordinator to work better under real net

- cli / node
    - Update handler of SIGINT so that after 3 SIGINTs, the process terminates
      unconditionally
- coordinator
    - Store stats without pointer
    - In all functions that send a variable via channel, check for context done
      to avoid deadlock (due to no process reading from the channel, which has
      no queue) when the node is stopped.
    - Abstract `canForge` so that it can be used outside of the `Coordinator`
    - In `canForge` check the blockNumber in current and next slot.
    - Update tests due to smart contract changes in slot handling, and minimum
      bid defaults
    - TxManager
        - Add consts, vars and stats to allow evaluating `canForge`
        - Add `canForge` method (not used yet)
        - Store batch and nonces status (last success and last pending)
        - Track nonces internally instead of relying on the ethereum node (this
          is required to work with ganache when there are pending txs)
        - Handle the (common) case of the receipt not being found after the tx
          is sent.
        - Don't start the main loop until we get an initial messae fo the stats
          and vars (so that in the loop the stats and vars are set to
          synchronizer values)
- eth / ethereum client
    - Add necessary methods to create the auth object for transactions manually
      so that we can set the nonce, gas price, gas limit, etc manually
    - Update `RollupForgeBatch` to take an auth object as input (so that the
      coordinator can set parameters manually)
- synchronizer
    - In stats, add `NextSlot`
This commit is contained in:
Eduard S
2021-01-15 19:37:39 +01:00
parent 168432c559
commit 70482605c4
20 changed files with 664 additions and 308 deletions

View File

@@ -429,10 +429,10 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil
}
func (n *Node) handleNewBlock(stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr,
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr,
batches []common.BatchData) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(coordinator.MsgSyncBlock{
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats,
Vars: vars,
Batches: batches,
@@ -461,9 +461,9 @@ func (n *Node) handleNewBlock(stats *synchronizer.Stats, vars synchronizer.SCVar
}
}
func (n *Node) handleReorg(stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(coordinator.MsgSyncReorg{
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: vars,
})
@@ -491,7 +491,7 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars()
n.handleReorg(stats, vars)
n.handleReorg(ctx, stats, vars)
return nil, time.Duration(0), nil
} else if blockData != nil {
// case: new block
@@ -500,7 +500,7 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,
}
n.handleNewBlock(stats, vars, blockData.Rollup.Batches)
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
return &blockData.Block, time.Duration(0), nil
} else {
// case: no block
@@ -519,7 +519,7 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized
stats := n.sync.Stats()
vars := n.sync.SCVars()
n.handleNewBlock(stats, vars, []common.BatchData{})
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
n.wg.Add(1)
go func() {