Browse Source

WIP3

feature/update-txman3
Eduard S 3 years ago
parent
commit
39eb715b98
6 changed files with 60 additions and 42 deletions
  1. +3
    -0
      config/config.go
  2. +3
    -0
      coordinator/coordinator.go
  3. +5
    -2
      coordinator/txmanager.go
  4. +2
    -0
      go.sum
  5. +1
    -0
      node/node.go
  6. +46
    -40
      synchronizer/synchronizer.go

+ 3
- 0
config/config.go

@ -141,6 +141,9 @@ type Coordinator struct {
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept
Keystore struct {
// Path to the keystore

+ 3
- 0
coordinator/coordinator.go

@ -82,6 +82,9 @@ type Config struct {
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int

+ 5
- 2
coordinator/txmanager.go

@ -499,7 +499,8 @@ func (t *TxManager) Run(ctx context.Context) {
continue
}
now := time.Now()
if confirm == nil && now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
if !t.cfg.EthNoReuseNonce && confirm == nil &&
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
@ -573,7 +574,9 @@ func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
if err != nil {
return err
}
t.accNextNonce = accNonce
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}

+ 2
- 0
go.sum

@ -602,6 +602,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=

+ 1
- 0
node/node.go

@ -294,6 +294,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,

+ 46
- 40
synchronizer/synchronizer.go

@ -333,23 +333,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil
}
// firstBatchBlockNum is the blockNum of first batch in that block, if any
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
slot := common.Slot{
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
}
// updateCurrentSlot updates the slot with information of the current slot.
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
// hasBatch is true when the last synced block contained at least one batch.
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
// We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
firstBatchBlockNum = nil
hasBatch = false
} else {
firstBatchBlockNum = &dbFirstBatchBlockNum
hasBatch = true
firstBatchBlockNum = dbFirstBatchBlockNum
}
slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum {
@ -360,11 +362,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(&slot); err != nil {
return nil, tracerr.Wrap(err)
if err := s.setSlotCoordinator(slot); err != nil {
return tracerr.Wrap(err)
}
if firstBatchBlockNum != nil &&
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
if hasBatch &&
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true
}
@ -373,57 +375,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
// BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil {
return nil, tracerr.Wrap(err)
return tracerr.Wrap(err)
}
if !canForge {
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot))
}
// END SANITY CHECK
}
return &slot, nil
return nil
}
func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
// updateNextSlot updates the slot with information of the next slot.
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot := common.Slot{
SlotNum: slotNum,
ForgerCommitment: false,
}
slot.SlotNum = slotNum
slot.ForgerCommitment = false
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(&slot); err != nil {
return nil, tracerr.Wrap(err)
if err := s.setSlotCoordinator(slot); err != nil {
return tracerr.Wrap(err)
}
// TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil {
return nil, tracerr.Wrap(err)
return tracerr.Wrap(err)
}
if !canForge {
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot))
}
// END SANITY CHECK
}
return &slot, nil
return nil
}
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
if err != nil {
// updateCurrentNextSlotIfSync updates the current and next slot. Information
// about forger address that is allowed to forge is only updated if we are
// Synced.
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
return tracerr.Wrap(err)
}
next, err := s.getNextSlot()
if err != nil {
if err := s.updateNextSlot(&next); err != nil {
return tracerr.Wrap(err)
}
s.stats.UpdateCurrentNextSlot(current, next)
s.stats.UpdateCurrentNextSlot(&current, &next)
return nil
}
@ -634,11 +640,11 @@ func (s *Synchronizer) Sync2(ctx context.Context,
&rollupData.Batches[batchesLen-1].Batch,
lastL1BatchBlock, lastForgeL1TxsNum)
}
var firstBatchBlockNum *int64
hasBatch := false
if len(rollupData.Batches) > 0 {
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
hasBatch = true
}
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
return nil, nil, tracerr.Wrap(err)
}
@ -757,15 +763,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer
}
batchNum, err := s.historyDB.GetLastBatchNum()
batch, err := s.historyDB.GetLastBatch()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
if tracerr.Unwrap(err) == sql.ErrNoRows {
batchNum = 0
batch = &common.Batch{}
}
err = s.stateDB.Reset(batchNum)
err = s.stateDB.Reset(batch.BatchNum)
if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
}
@ -787,9 +793,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n
}
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
return tracerr.Wrap(err)
}
return nil

Loading…
Cancel
Save