mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Compare commits
10 Commits
feature/up
...
feature/up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b38fea17d6 | ||
|
|
0ffd69ad2c | ||
|
|
aca106a2ee | ||
|
|
e737aebd28 | ||
|
|
104b277de0 | ||
|
|
39eb715b98 | ||
|
|
33634a00b1 | ||
|
|
d284baf8c4 | ||
|
|
deede9541b | ||
|
|
e2006403d7 |
@@ -38,7 +38,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func retSQLErr(err error, c *gin.Context) {
|
func retSQLErr(err error, c *gin.Context) {
|
||||||
log.Warn("HTTP API SQL request error", "err", err)
|
log.Warnw("HTTP API SQL request error", "err", err)
|
||||||
if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||||
if sqlErr.Code == "23505" {
|
if sqlErr.Code == "23505" {
|
||||||
@@ -59,7 +59,7 @@ func retSQLErr(err error, c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func retBadReq(err error, c *gin.Context) {
|
func retBadReq(err error, c *gin.Context) {
|
||||||
log.Warn("HTTP API Bad request error", "err", err)
|
log.Warnw("HTTP API Bad request error", "err", err)
|
||||||
c.JSON(http.StatusBadRequest, errorMsg{
|
c.JSON(http.StatusBadRequest, errorMsg{
|
||||||
Message: err.Error(),
|
Message: err.Error(),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
|
|||||||
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
||||||
// it can just roll back the internal copy.
|
// it can just roll back the internal copy.
|
||||||
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
|
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
||||||
|
|||||||
@@ -39,12 +39,15 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
|
|||||||
TokenHEZName = "Hermez Network Token"
|
TokenHEZName = "Hermez Network Token"
|
||||||
|
|
||||||
[Coordinator]
|
[Coordinator]
|
||||||
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||||
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
||||||
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||||
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
||||||
ConfirmBlocks = 10
|
ConfirmBlocks = 10
|
||||||
L1BatchTimeoutPerc = 0.999
|
L1BatchTimeoutPerc = 0.6
|
||||||
|
StartSlotBlocksDelay = 2
|
||||||
|
ScheduleBatchBlocksAheadCheck = 3
|
||||||
|
SendBatchBlocksMarginCheck = 1
|
||||||
ProofServerPollInterval = "1s"
|
ProofServerPollInterval = "1s"
|
||||||
ForgeRetryInterval = "500ms"
|
ForgeRetryInterval = "500ms"
|
||||||
SyncRetryInterval = "1s"
|
SyncRetryInterval = "1s"
|
||||||
@@ -83,8 +86,11 @@ ReceiptLoopInterval = "500ms"
|
|||||||
CheckLoopInterval = "500ms"
|
CheckLoopInterval = "500ms"
|
||||||
Attempts = 4
|
Attempts = 4
|
||||||
AttemptsDelay = "500ms"
|
AttemptsDelay = "500ms"
|
||||||
|
TxResendTimeout = "2m"
|
||||||
|
NoReuseNonce = false
|
||||||
CallGasLimit = 300000
|
CallGasLimit = 300000
|
||||||
GasPriceDiv = 100
|
GasPriceDiv = 100
|
||||||
|
MaxGasPrice = "5000000000"
|
||||||
|
|
||||||
[Coordinator.EthClient.Keystore]
|
[Coordinator.EthClient.Keystore]
|
||||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||||
|
|||||||
@@ -27,6 +27,24 @@ type Batch struct {
|
|||||||
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
|
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewEmptyBatch creates a new empty batch
|
||||||
|
func NewEmptyBatch() *Batch {
|
||||||
|
return &Batch{
|
||||||
|
BatchNum: 0,
|
||||||
|
EthBlockNum: 0,
|
||||||
|
ForgerAddr: ethCommon.Address{},
|
||||||
|
CollectedFees: make(map[TokenID]*big.Int),
|
||||||
|
FeeIdxsCoordinator: make([]Idx, 0),
|
||||||
|
StateRoot: big.NewInt(0),
|
||||||
|
NumAccounts: 0,
|
||||||
|
LastIdx: 0,
|
||||||
|
ExitRoot: big.NewInt(0),
|
||||||
|
ForgeL1TxsNum: nil,
|
||||||
|
SlotNum: 0,
|
||||||
|
TotalFeesUSD: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// BatchNum identifies a batch
|
// BatchNum identifies a batch
|
||||||
type BatchNum int64
|
type BatchNum int64
|
||||||
|
|
||||||
@@ -75,3 +93,23 @@ func NewBatchData() *BatchData {
|
|||||||
Batch: Batch{},
|
Batch: Batch{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BatchSync is a subset of Batch that contains fileds needed for the
|
||||||
|
// synchronizer and coordinator
|
||||||
|
// type BatchSync struct {
|
||||||
|
// BatchNum BatchNum `meddler:"batch_num"`
|
||||||
|
// EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
|
||||||
|
// ForgerAddr ethCommon.Address `meddler:"forger_addr"`
|
||||||
|
// StateRoot *big.Int `meddler:"state_root,bigint"`
|
||||||
|
// SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func NewBatchSync() *BatchSync {
|
||||||
|
// return &BatchSync{
|
||||||
|
// BatchNum: 0,
|
||||||
|
// EthBlockNum: 0,
|
||||||
|
// ForgerAddr: ethCommon.Address,
|
||||||
|
// StateRoot: big.NewInt(0),
|
||||||
|
// SlotNum: 0,
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|||||||
@@ -33,7 +33,8 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
|
|||||||
if blockNum >= c.GenesisBlockNum {
|
if blockNum >= c.GenesisBlockNum {
|
||||||
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||||
}
|
}
|
||||||
return -1
|
// This result will be negative
|
||||||
|
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SlotBlocks returns the first and the last block numbers included in that slot
|
// SlotBlocks returns the first and the last block numbers included in that slot
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package config
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
@@ -51,6 +52,27 @@ type Coordinator struct {
|
|||||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||||
// timeout that will trigger a schedule to forge an L1Batch
|
// timeout that will trigger a schedule to forge an L1Batch
|
||||||
L1BatchTimeoutPerc float64 `validate:"required"`
|
L1BatchTimeoutPerc float64 `validate:"required"`
|
||||||
|
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||||
|
// starting the pipeline when we reach a slot in which we can forge.
|
||||||
|
StartSlotBlocksDelay int64
|
||||||
|
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||||
|
// the forger address is checked to be allowed to forge (apart from
|
||||||
|
// checking the next block), used to decide when to stop scheduling new
|
||||||
|
// batches (by stopping the pipeline).
|
||||||
|
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||||
|
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||||
|
// stopped if we can't forge at block 15.
|
||||||
|
// This value should be the expected number of blocks it takes between
|
||||||
|
// scheduling a batch and having it mined.
|
||||||
|
ScheduleBatchBlocksAheadCheck int64
|
||||||
|
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||||
|
// which the coordinator is also checked to be allowed to forge, apart
|
||||||
|
// from the next block; used to decide when to stop sending batches to
|
||||||
|
// the smart contract.
|
||||||
|
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||||
|
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||||
|
// if we can't forge at block 15.
|
||||||
|
SendBatchBlocksMarginCheck int64
|
||||||
// ProofServerPollInterval is the waiting interval between polling the
|
// ProofServerPollInterval is the waiting interval between polling the
|
||||||
// ProofServer while waiting for a particular status
|
// ProofServer while waiting for a particular status
|
||||||
ProofServerPollInterval Duration `validate:"required"`
|
ProofServerPollInterval Duration `validate:"required"`
|
||||||
@@ -101,6 +123,9 @@ type Coordinator struct {
|
|||||||
// calls, except for methods where a particular gas limit is
|
// calls, except for methods where a particular gas limit is
|
||||||
// harcoded because it's known to be a big value
|
// harcoded because it's known to be a big value
|
||||||
CallGasLimit uint64 `validate:"required"`
|
CallGasLimit uint64 `validate:"required"`
|
||||||
|
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||||
|
// transactions
|
||||||
|
MaxGasPrice *big.Int `validate:"required"`
|
||||||
// GasPriceDiv is the gas price division
|
// GasPriceDiv is the gas price division
|
||||||
GasPriceDiv uint64 `validate:"required"`
|
GasPriceDiv uint64 `validate:"required"`
|
||||||
// CheckLoopInterval is the waiting interval between receipt
|
// CheckLoopInterval is the waiting interval between receipt
|
||||||
@@ -112,6 +137,13 @@ type Coordinator struct {
|
|||||||
// AttemptsDelay is delay between attempts do do an eth client
|
// AttemptsDelay is delay between attempts do do an eth client
|
||||||
// RPC call
|
// RPC call
|
||||||
AttemptsDelay Duration `validate:"required"`
|
AttemptsDelay Duration `validate:"required"`
|
||||||
|
// TxResendTimeout is the timeout after which a non-mined
|
||||||
|
// ethereum transaction will be resent (reusing the nonce) with
|
||||||
|
// a newly calculated gas price
|
||||||
|
TxResendTimeout Duration `validate:"required"`
|
||||||
|
// NoReuseNonce disables reusing nonces of pending transactions for
|
||||||
|
// new replacement transactions
|
||||||
|
NoReuseNonce bool
|
||||||
// Keystore is the ethereum keystore where private keys are kept
|
// Keystore is the ethereum keystore where private keys are kept
|
||||||
Keystore struct {
|
Keystore struct {
|
||||||
// Path to the keystore
|
// Path to the keystore
|
||||||
|
|||||||
@@ -47,6 +47,8 @@ type Debug struct {
|
|||||||
MineBlockNum int64
|
MineBlockNum int64
|
||||||
// SendBlockNum is the blockNum when the batch was sent to ethereum
|
// SendBlockNum is the blockNum when the batch was sent to ethereum
|
||||||
SendBlockNum int64
|
SendBlockNum int64
|
||||||
|
// ResendNum is the number of times the tx has been resent
|
||||||
|
ResendNum int
|
||||||
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
|
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
|
||||||
// was scheduled
|
// was scheduled
|
||||||
LastScheduledL1BatchBlockNum int64
|
LastScheduledL1BatchBlockNum int64
|
||||||
@@ -64,10 +66,17 @@ type Debug struct {
|
|||||||
// StartToSendDelay is the delay between starting a batch and sending
|
// StartToSendDelay is the delay between starting a batch and sending
|
||||||
// it to ethereum, in seconds
|
// it to ethereum, in seconds
|
||||||
StartToSendDelay float64
|
StartToSendDelay float64
|
||||||
|
// StartToMineDelay is the delay between starting a batch and having
|
||||||
|
// it mined in seconds
|
||||||
|
StartToMineDelay float64
|
||||||
|
// SendToMineDelay is the delay between sending a batch tx and having
|
||||||
|
// it mined in seconds
|
||||||
|
SendToMineDelay float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchInfo contans the Batch information
|
// BatchInfo contans the Batch information
|
||||||
type BatchInfo struct {
|
type BatchInfo struct {
|
||||||
|
PipelineNum int
|
||||||
BatchNum common.BatchNum
|
BatchNum common.BatchNum
|
||||||
ServerProof prover.Client
|
ServerProof prover.Client
|
||||||
ZKInputs *common.ZKInputs
|
ZKInputs *common.ZKInputs
|
||||||
@@ -83,7 +92,13 @@ type BatchInfo struct {
|
|||||||
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
||||||
// FeesInfo
|
// FeesInfo
|
||||||
EthTx *types.Transaction
|
EthTx *types.Transaction
|
||||||
|
// SendTimestamp the time of batch sent to ethereum
|
||||||
|
SendTimestamp time.Time
|
||||||
Receipt *types.Receipt
|
Receipt *types.Receipt
|
||||||
|
// Fail is true if:
|
||||||
|
// - The receipt status is failed
|
||||||
|
// - A previous parent batch is failed
|
||||||
|
Fail bool
|
||||||
Debug Debug
|
Debug Debug
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ package coordinator
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -42,6 +42,29 @@ type Config struct {
|
|||||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||||
// timeout that will trigger a schedule to forge an L1Batch
|
// timeout that will trigger a schedule to forge an L1Batch
|
||||||
L1BatchTimeoutPerc float64
|
L1BatchTimeoutPerc float64
|
||||||
|
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||||
|
// starting the pipeline when we reach a slot in which we can forge.
|
||||||
|
StartSlotBlocksDelay int64
|
||||||
|
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||||
|
// the forger address is checked to be allowed to forge (apart from
|
||||||
|
// checking the next block), used to decide when to stop scheduling new
|
||||||
|
// batches (by stopping the pipeline).
|
||||||
|
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||||
|
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||||
|
// stopped if we can't forge at block 15.
|
||||||
|
// This value should be the expected number of blocks it takes between
|
||||||
|
// scheduling a batch and having it mined.
|
||||||
|
ScheduleBatchBlocksAheadCheck int64
|
||||||
|
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||||
|
// which the coordinator is also checked to be allowed to forge, apart
|
||||||
|
// from the next block; used to decide when to stop sending batches to
|
||||||
|
// the smart contract.
|
||||||
|
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||||
|
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||||
|
// if we can't forge at block 15.
|
||||||
|
// This value should be the expected number of blocks it takes between
|
||||||
|
// sending a batch and having it mined.
|
||||||
|
SendBatchBlocksMarginCheck int64
|
||||||
// EthClientAttempts is the number of attempts to do an eth client RPC
|
// EthClientAttempts is the number of attempts to do an eth client RPC
|
||||||
// call before giving up
|
// call before giving up
|
||||||
EthClientAttempts int
|
EthClientAttempts int
|
||||||
@@ -54,6 +77,16 @@ type Config struct {
|
|||||||
// EthClientAttemptsDelay is delay between attempts do do an eth client
|
// EthClientAttemptsDelay is delay between attempts do do an eth client
|
||||||
// RPC call
|
// RPC call
|
||||||
EthClientAttemptsDelay time.Duration
|
EthClientAttemptsDelay time.Duration
|
||||||
|
// EthTxResendTimeout is the timeout after which a non-mined ethereum
|
||||||
|
// transaction will be resent (reusing the nonce) with a newly
|
||||||
|
// calculated gas price
|
||||||
|
EthTxResendTimeout time.Duration
|
||||||
|
// EthNoReuseNonce disables reusing nonces of pending transactions for
|
||||||
|
// new replacement transactions
|
||||||
|
EthNoReuseNonce bool
|
||||||
|
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||||
|
// transactions
|
||||||
|
MaxGasPrice *big.Int
|
||||||
// TxManagerCheckInterval is the waiting interval between receipt
|
// TxManagerCheckInterval is the waiting interval between receipt
|
||||||
// checks of ethereum transactions in the TxManager
|
// checks of ethereum transactions in the TxManager
|
||||||
TxManagerCheckInterval time.Duration
|
TxManagerCheckInterval time.Duration
|
||||||
@@ -61,6 +94,8 @@ type Config struct {
|
|||||||
// in JSON in every step/update of the pipeline
|
// in JSON in every step/update of the pipeline
|
||||||
DebugBatchPath string
|
DebugBatchPath string
|
||||||
Purger PurgerCfg
|
Purger PurgerCfg
|
||||||
|
// VerifierIdx is the index of the verifier contract registered in the
|
||||||
|
// smart contract
|
||||||
VerifierIdx uint8
|
VerifierIdx uint8
|
||||||
TxProcessorConfig txprocessor.Config
|
TxProcessorConfig txprocessor.Config
|
||||||
}
|
}
|
||||||
@@ -74,10 +109,17 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fromBatch struct {
|
||||||
|
BatchNum common.BatchNum
|
||||||
|
ForgerAddr ethCommon.Address
|
||||||
|
StateRoot *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
// Coordinator implements the Coordinator type
|
// Coordinator implements the Coordinator type
|
||||||
type Coordinator struct {
|
type Coordinator struct {
|
||||||
// State
|
// State
|
||||||
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
|
pipelineNum int // Pipeline sequential number. The first pipeline is 1
|
||||||
|
pipelineFromBatch fromBatch // batch from which we started the pipeline
|
||||||
provers []prover.Client
|
provers []prover.Client
|
||||||
consts synchronizer.SCConsts
|
consts synchronizer.SCConsts
|
||||||
vars synchronizer.SCVariables
|
vars synchronizer.SCVariables
|
||||||
@@ -97,6 +139,7 @@ type Coordinator struct {
|
|||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
||||||
pipeline *Pipeline
|
pipeline *Pipeline
|
||||||
|
lastNonFailedBatchNum common.BatchNum
|
||||||
|
|
||||||
purger *Purger
|
purger *Purger
|
||||||
txManager *TxManager
|
txManager *TxManager
|
||||||
@@ -139,7 +182,12 @@ func NewCoordinator(cfg Config,
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
c := Coordinator{
|
c := Coordinator{
|
||||||
pipelineBatchNum: -1,
|
pipelineNum: 0,
|
||||||
|
pipelineFromBatch: fromBatch{
|
||||||
|
BatchNum: 0,
|
||||||
|
ForgerAddr: ethCommon.Address{},
|
||||||
|
StateRoot: big.NewInt(0),
|
||||||
|
},
|
||||||
provers: serverProofs,
|
provers: serverProofs,
|
||||||
consts: *scConsts,
|
consts: *scConsts,
|
||||||
vars: *initSCVars,
|
vars: *initSCVars,
|
||||||
@@ -183,8 +231,9 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
||||||
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
|
c.pipelineNum++
|
||||||
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
|
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
|
||||||
|
c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsgSyncBlock indicates an update to the Synchronizer stats
|
// MsgSyncBlock indicates an update to the Synchronizer stats
|
||||||
@@ -205,6 +254,9 @@ type MsgSyncReorg struct {
|
|||||||
// MsgStopPipeline indicates a signal to reset the pipeline
|
// MsgStopPipeline indicates a signal to reset the pipeline
|
||||||
type MsgStopPipeline struct {
|
type MsgStopPipeline struct {
|
||||||
Reason string
|
Reason string
|
||||||
|
// FailedBatchNum indicates the first batchNum that faile in the
|
||||||
|
// pipeline. If FailedBatchNum is 0, it should be ignored.
|
||||||
|
FailedBatchNum common.BatchNum
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMsg is a thread safe method to pass a message to the Coordinator
|
// SendMsg is a thread safe method to pass a message to the Coordinator
|
||||||
@@ -215,27 +267,36 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
|
||||||
|
if update.Rollup != nil {
|
||||||
|
vars.Rollup = *update.Rollup
|
||||||
|
}
|
||||||
|
if update.Auction != nil {
|
||||||
|
vars.Auction = *update.Auction
|
||||||
|
}
|
||||||
|
if update.WDelayer != nil {
|
||||||
|
vars.WDelayer = *update.WDelayer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||||
if vars.Rollup != nil {
|
updateSCVars(&c.vars, vars)
|
||||||
c.vars.Rollup = *vars.Rollup
|
|
||||||
}
|
|
||||||
if vars.Auction != nil {
|
|
||||||
c.vars.Auction = *vars.Auction
|
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
c.vars.WDelayer = *vars.WDelayer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
||||||
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
||||||
|
if blockNum < auctionConstants.GenesisBlockNum {
|
||||||
|
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
|
||||||
|
"genesis", auctionConstants.GenesisBlockNum)
|
||||||
|
return false
|
||||||
|
}
|
||||||
var slot *common.Slot
|
var slot *common.Slot
|
||||||
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
|
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
|
||||||
slot = currentSlot
|
slot = currentSlot
|
||||||
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
|
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
|
||||||
slot = nextSlot
|
slot = nextSlot
|
||||||
} else {
|
} else {
|
||||||
log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
|
log.Warnw("canForge: requested blockNum is outside current and next slot",
|
||||||
"blockNum", blockNum, "currentSlot", currentSlot,
|
"blockNum", blockNum, "currentSlot", currentSlot,
|
||||||
"nextSlot", nextSlot,
|
"nextSlot", nextSlot,
|
||||||
)
|
)
|
||||||
@@ -244,16 +305,23 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
|
|||||||
anyoneForge := false
|
anyoneForge := false
|
||||||
if !slot.ForgerCommitment &&
|
if !slot.ForgerCommitment &&
|
||||||
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
|
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
|
||||||
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
|
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
|
||||||
"block", blockNum)
|
"block", blockNum)
|
||||||
anyoneForge = true
|
anyoneForge = true
|
||||||
}
|
}
|
||||||
if slot.Forger == addr || anyoneForge {
|
if slot.Forger == addr || anyoneForge {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Coordinator) canForgeAt(blockNum int64) bool {
|
||||||
|
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||||
|
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
||||||
|
c.cfg.ForgerAddress, blockNum)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Coordinator) canForge() bool {
|
func (c *Coordinator) canForge() bool {
|
||||||
blockNum := c.stats.Eth.LastBlock.Num + 1
|
blockNum := c.stats.Eth.LastBlock.Num + 1
|
||||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||||
@@ -262,12 +330,24 @@ func (c *Coordinator) canForge() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
||||||
canForge := c.canForge()
|
nextBlock := c.stats.Eth.LastBlock.Num + 1
|
||||||
|
canForge := c.canForgeAt(nextBlock)
|
||||||
|
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
|
||||||
|
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
|
||||||
|
}
|
||||||
if c.pipeline == nil {
|
if c.pipeline == nil {
|
||||||
if canForge {
|
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
|
||||||
|
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
|
||||||
|
log.Debugw("Coordinator: delaying pipeline start due to "+
|
||||||
|
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
|
||||||
|
relativeBlock, c.cfg.StartSlotBlocksDelay)
|
||||||
|
} else if canForge {
|
||||||
log.Infow("Coordinator: forging state begin", "block",
|
log.Infow("Coordinator: forging state begin", "block",
|
||||||
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
|
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
|
||||||
batchNum := common.BatchNum(stats.Sync.LastBatch)
|
batchNum := stats.Sync.LastBatch.BatchNum
|
||||||
|
if c.lastNonFailedBatchNum > batchNum {
|
||||||
|
batchNum = c.lastNonFailedBatchNum
|
||||||
|
}
|
||||||
var err error
|
var err error
|
||||||
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
@@ -276,7 +356,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
|||||||
c.pipeline = nil
|
c.pipeline = nil
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
c.pipelineBatchNum = batchNum
|
// c.pipelineBatchNum = batchNum
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !canForge {
|
if !canForge {
|
||||||
@@ -293,18 +373,17 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
|||||||
// return err
|
// return err
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
|
// if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)) {
|
||||||
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
|
// if err := c.txSelector.Reset(stats.Sync.LastBatch.BatchNum); err != nil {
|
||||||
|
// return tracerr.Wrap(err)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
||||||
|
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
|
||||||
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||||
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -331,33 +410,42 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
|
|||||||
if c.pipeline != nil {
|
if c.pipeline != nil {
|
||||||
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
|
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
|
||||||
}
|
}
|
||||||
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
|
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
|
||||||
// There's been a reorg and the batch from which the pipeline
|
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0 {
|
||||||
// was started was in a block that was discarded. The batch
|
// There's been a reorg and the batch state root from which the
|
||||||
// may not be in the main chain, so we stop the pipeline as a
|
// pipeline was started has changed (probably because it was in
|
||||||
// precaution (it will be started again once the node is in
|
// a block that was discarded), and it was sent by a different
|
||||||
// sync).
|
// coordinator than us. That batch may never be in the main
|
||||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
|
// chain, so we stop the pipeline (it will be started again
|
||||||
"sync.LastBatch", c.stats.Sync.LastBatch,
|
// once the node is in sync).
|
||||||
"c.pipelineBatchNum", c.pipelineBatchNum)
|
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
|
||||||
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
|
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
|
||||||
|
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
|
||||||
|
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
|
||||||
|
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
|
||||||
|
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
|
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
|
||||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
// the next pipeline will start from the last state of the synchronizer,
|
||||||
|
// otherwise, it will state from failedBatchNum-1.
|
||||||
|
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
|
||||||
|
batchNum := c.stats.Sync.LastBatch.BatchNum
|
||||||
|
if failedBatchNum != 0 {
|
||||||
|
batchNum = failedBatchNum - 1
|
||||||
|
}
|
||||||
|
if err := c.l2DB.Reorg(batchNum); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if c.pipeline != nil {
|
if c.pipeline != nil {
|
||||||
c.pipeline.Stop(c.ctx)
|
c.pipeline.Stop(c.ctx)
|
||||||
c.pipeline = nil
|
c.pipeline = nil
|
||||||
}
|
}
|
||||||
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
|
c.lastNonFailedBatchNum = batchNum
|
||||||
// TODO: Check that we are in a slot in which we can't forge
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -373,7 +461,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
|
|||||||
}
|
}
|
||||||
case MsgStopPipeline:
|
case MsgStopPipeline:
|
||||||
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
|
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
|
||||||
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
|
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
|
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package coordinator
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
@@ -11,6 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||||
@@ -261,8 +263,8 @@ func TestCoordinatorFlow(t *testing.T) {
|
|||||||
var stats synchronizer.Stats
|
var stats synchronizer.Stats
|
||||||
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
|
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
|
||||||
stats.Sync.LastBlock = stats.Eth.LastBlock
|
stats.Sync.LastBlock = stats.Eth.LastBlock
|
||||||
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
|
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
|
||||||
stats.Sync.LastBatch = stats.Eth.LastBatch
|
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
|
||||||
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
|
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var slot common.Slot
|
var slot common.Slot
|
||||||
@@ -279,7 +281,7 @@ func TestCoordinatorFlow(t *testing.T) {
|
|||||||
// Copy stateDB to synchronizer if there was a new batch
|
// Copy stateDB to synchronizer if there was a new batch
|
||||||
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
|
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
|
||||||
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
|
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
|
||||||
if stats.Sync.LastBatch != 0 {
|
if stats.Sync.LastBatch.BatchNum != 0 {
|
||||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||||
log.Infow("Making pebble checkpoint for sync",
|
log.Infow("Making pebble checkpoint for sync",
|
||||||
"source", source, "dest", dest)
|
"source", source, "dest", dest)
|
||||||
@@ -566,3 +568,8 @@ func TestCoordinatorStress(t *testing.T) {
|
|||||||
// TODO: Test forgeBatch
|
// TODO: Test forgeBatch
|
||||||
// TODO: Test waitServerProof
|
// TODO: Test waitServerProof
|
||||||
// TODO: Test handleReorg
|
// TODO: Test handleReorg
|
||||||
|
|
||||||
|
func TestFoo(t *testing.T) {
|
||||||
|
a := tracerr.Wrap(fmt.Errorf("AAA: %w", core.ErrNonceTooLow))
|
||||||
|
fmt.Println(errors.Is(a, core.ErrNonceTooLow))
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package coordinator
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -24,19 +25,30 @@ type statsVars struct {
|
|||||||
Vars synchronizer.SCVariablesPtr
|
Vars synchronizer.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type state struct {
|
||||||
|
batchNum common.BatchNum
|
||||||
|
lastScheduledL1BatchBlockNum int64
|
||||||
|
lastForgeL1TxsNum int64
|
||||||
|
}
|
||||||
|
|
||||||
// Pipeline manages the forging of batches with parallel server proofs
|
// Pipeline manages the forging of batches with parallel server proofs
|
||||||
type Pipeline struct {
|
type Pipeline struct {
|
||||||
|
num int
|
||||||
cfg Config
|
cfg Config
|
||||||
consts synchronizer.SCConsts
|
consts synchronizer.SCConsts
|
||||||
|
|
||||||
// state
|
// state
|
||||||
batchNum common.BatchNum
|
state state
|
||||||
lastScheduledL1BatchBlockNum int64
|
// batchNum common.BatchNum
|
||||||
lastForgeL1TxsNum int64
|
// lastScheduledL1BatchBlockNum int64
|
||||||
|
// lastForgeL1TxsNum int64
|
||||||
started bool
|
started bool
|
||||||
|
rw sync.RWMutex
|
||||||
|
errAtBatchNum common.BatchNum
|
||||||
|
|
||||||
proversPool *ProversPool
|
proversPool *ProversPool
|
||||||
provers []prover.Client
|
provers []prover.Client
|
||||||
|
coord *Coordinator
|
||||||
txManager *TxManager
|
txManager *TxManager
|
||||||
historyDB *historydb.HistoryDB
|
historyDB *historydb.HistoryDB
|
||||||
l2DB *l2db.L2DB
|
l2DB *l2db.L2DB
|
||||||
@@ -53,14 +65,28 @@ type Pipeline struct {
|
|||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
|
||||||
|
p.rw.Lock()
|
||||||
|
defer p.rw.Unlock()
|
||||||
|
p.errAtBatchNum = batchNum
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
|
||||||
|
p.rw.RLock()
|
||||||
|
defer p.rw.RUnlock()
|
||||||
|
return p.errAtBatchNum
|
||||||
|
}
|
||||||
|
|
||||||
// NewPipeline creates a new Pipeline
|
// NewPipeline creates a new Pipeline
|
||||||
func NewPipeline(ctx context.Context,
|
func NewPipeline(ctx context.Context,
|
||||||
cfg Config,
|
cfg Config,
|
||||||
|
num int, // Pipeline sequential number
|
||||||
historyDB *historydb.HistoryDB,
|
historyDB *historydb.HistoryDB,
|
||||||
l2DB *l2db.L2DB,
|
l2DB *l2db.L2DB,
|
||||||
txSelector *txselector.TxSelector,
|
txSelector *txselector.TxSelector,
|
||||||
batchBuilder *batchbuilder.BatchBuilder,
|
batchBuilder *batchbuilder.BatchBuilder,
|
||||||
purger *Purger,
|
purger *Purger,
|
||||||
|
coord *Coordinator,
|
||||||
txManager *TxManager,
|
txManager *TxManager,
|
||||||
provers []prover.Client,
|
provers []prover.Client,
|
||||||
scConsts *synchronizer.SCConsts,
|
scConsts *synchronizer.SCConsts,
|
||||||
@@ -79,6 +105,7 @@ func NewPipeline(ctx context.Context,
|
|||||||
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
|
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
|
||||||
}
|
}
|
||||||
return &Pipeline{
|
return &Pipeline{
|
||||||
|
num: num,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
historyDB: historyDB,
|
historyDB: historyDB,
|
||||||
l2DB: l2DB,
|
l2DB: l2DB,
|
||||||
@@ -87,6 +114,7 @@ func NewPipeline(ctx context.Context,
|
|||||||
provers: provers,
|
provers: provers,
|
||||||
proversPool: proversPool,
|
proversPool: proversPool,
|
||||||
purger: purger,
|
purger: purger,
|
||||||
|
coord: coord,
|
||||||
txManager: txManager,
|
txManager: txManager,
|
||||||
consts: *scConsts,
|
consts: *scConsts,
|
||||||
statsVarsCh: make(chan statsVars, queueLen),
|
statsVarsCh: make(chan statsVars, queueLen),
|
||||||
@@ -104,33 +132,67 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
|
|||||||
// reset pipeline state
|
// reset pipeline state
|
||||||
func (p *Pipeline) reset(batchNum common.BatchNum,
|
func (p *Pipeline) reset(batchNum common.BatchNum,
|
||||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
||||||
p.batchNum = batchNum
|
p.state = state{
|
||||||
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
|
batchNum: batchNum,
|
||||||
|
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
||||||
|
lastScheduledL1BatchBlockNum: 0,
|
||||||
|
}
|
||||||
p.stats = *stats
|
p.stats = *stats
|
||||||
p.vars = *vars
|
p.vars = *vars
|
||||||
p.lastScheduledL1BatchBlockNum = 0
|
|
||||||
|
|
||||||
err := p.txSelector.Reset(p.batchNum)
|
// Reset the StateDB in TxSelector and BatchBuilder from the
|
||||||
|
// synchronizer only if the checkpoint we reset from either:
|
||||||
|
// a. Doesn't exist in the TxSelector/BatchBuilder
|
||||||
|
// b. The batch has already been synced by the synchronizer and has a
|
||||||
|
// different MTRoot than the BatchBuilder
|
||||||
|
// Otherwise, reset from the local checkpoint.
|
||||||
|
|
||||||
|
// First attempt to reset from local checkpoint if such checkpoint exists
|
||||||
|
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
err = p.batchBuilder.Reset(p.batchNum, true)
|
fromSynchronizerTxSelector := !existsTxSelector
|
||||||
|
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
fromSynchronizerBatchBuilder := !existsBatchBuilder
|
||||||
|
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// After reset, check that if the batch exists in the historyDB, the
|
||||||
|
// stateRoot matches with the local one, if not, force a reset from
|
||||||
|
// synchronizer
|
||||||
|
batch, err := p.historyDB.GetBatch(p.state.batchNum)
|
||||||
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
|
// nothing to do
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
} else {
|
||||||
|
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
|
||||||
|
if batch.StateRoot.Cmp(localStateRoot) != 0 {
|
||||||
|
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
|
||||||
|
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
|
||||||
|
// StateRoot from synchronizer doesn't match StateRoot
|
||||||
|
// from batchBuilder, force a reset from synchronizer
|
||||||
|
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||||
if vars.Rollup != nil {
|
updateSCVars(&p.vars, vars)
|
||||||
p.vars.Rollup = *vars.Rollup
|
|
||||||
}
|
|
||||||
if vars.Auction != nil {
|
|
||||||
p.vars.Auction = *vars.Auction
|
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
p.vars.WDelayer = *vars.WDelayer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
|
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
|
||||||
@@ -143,7 +205,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||||
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
||||||
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
|
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
||||||
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
||||||
} else {
|
} else {
|
||||||
log.Errorw("forgeBatch", "err", err)
|
log.Errorw("forgeBatch", "err", err)
|
||||||
@@ -199,15 +261,32 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
p.stats = statsVars.Stats
|
p.stats = statsVars.Stats
|
||||||
p.syncSCVars(statsVars.Vars)
|
p.syncSCVars(statsVars.Vars)
|
||||||
case <-time.After(waitDuration):
|
case <-time.After(waitDuration):
|
||||||
batchNum = p.batchNum + 1
|
// Once errAtBatchNum != 0, we stop forging
|
||||||
|
// batches because there's been an error and we
|
||||||
|
// wait for the pipeline to be stopped.
|
||||||
|
if p.getErrAtBatchNum() != 0 {
|
||||||
|
waitDuration = p.cfg.ForgeRetryInterval
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
batchNum = p.state.batchNum + 1
|
||||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
|
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||||
|
waitDuration = p.cfg.ForgeRetryInterval
|
||||||
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
waitDuration = p.cfg.SyncRetryInterval
|
p.setErrAtBatchNum(batchNum)
|
||||||
|
waitDuration = p.cfg.ForgeRetryInterval
|
||||||
|
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf(
|
||||||
|
"Pipeline.handleForgBatch: %v", err),
|
||||||
|
FailedBatchNum: batchNum,
|
||||||
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p.batchNum = batchNum
|
|
||||||
|
p.state.batchNum = batchNum
|
||||||
select {
|
select {
|
||||||
case batchChSentServerProof <- batchInfo:
|
case batchChSentServerProof <- batchInfo:
|
||||||
case <-p.ctx.Done():
|
case <-p.ctx.Done():
|
||||||
@@ -225,16 +304,28 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
p.wg.Done()
|
p.wg.Done()
|
||||||
return
|
return
|
||||||
case batchInfo := <-batchChSentServerProof:
|
case batchInfo := <-batchChSentServerProof:
|
||||||
|
// Once errAtBatchNum != 0, we stop forging
|
||||||
|
// batches because there's been an error and we
|
||||||
|
// wait for the pipeline to be stopped.
|
||||||
|
if p.getErrAtBatchNum() != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
err := p.waitServerProof(p.ctx, batchInfo)
|
err := p.waitServerProof(p.ctx, batchInfo)
|
||||||
// We are done with this serverProof, add it back to the pool
|
|
||||||
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
|
||||||
batchInfo.ServerProof = nil
|
|
||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Errorw("waitServerProof", "err", err)
|
log.Errorw("waitServerProof", "err", err)
|
||||||
|
p.setErrAtBatchNum(batchInfo.BatchNum)
|
||||||
|
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf(
|
||||||
|
"Pipeline.waitServerProof: %v", err),
|
||||||
|
FailedBatchNum: batchInfo.BatchNum,
|
||||||
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// We are done with this serverProof, add it back to the pool
|
||||||
|
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
||||||
|
// batchInfo.ServerProof = nil
|
||||||
p.txManager.AddBatch(p.ctx, batchInfo)
|
p.txManager.AddBatch(p.ctx, batchInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -284,8 +375,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
// Structure to accumulate data and metadata of the batch
|
||||||
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
|
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
|
||||||
batchInfo.Debug.StartTimestamp = time.Now()
|
batchInfo.Debug.StartTimestamp = time.Now()
|
||||||
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
|
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||||
|
|
||||||
@@ -300,22 +391,19 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
var auths [][]byte
|
var auths [][]byte
|
||||||
var coordIdxs []common.Idx
|
var coordIdxs []common.Idx
|
||||||
|
|
||||||
|
// TODO: If there are no txs and we are behind the timeout, skip
|
||||||
|
// forging a batch and return a particular error that can be handleded
|
||||||
|
// in the loop where handleForgeBatch is called to retry after an
|
||||||
|
// interval
|
||||||
|
|
||||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||||
if p.shouldL1L2Batch(batchInfo) {
|
if p.shouldL1L2Batch(batchInfo) {
|
||||||
batchInfo.L1Batch = true
|
batchInfo.L1Batch = true
|
||||||
defer func() {
|
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||||
// If there's no error, update the parameters related
|
|
||||||
// to the last L1Batch forged
|
|
||||||
if err == nil {
|
|
||||||
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
|
||||||
p.lastForgeL1TxsNum++
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
|
||||||
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
||||||
}
|
}
|
||||||
// 2a: L1+L2 txs
|
// 2a: L1+L2 txs
|
||||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
|
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -324,6 +412,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||||
|
p.state.lastForgeL1TxsNum++
|
||||||
} else {
|
} else {
|
||||||
// 2b: only L2 txs
|
// 2b: only L2 txs
|
||||||
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||||
@@ -399,12 +490,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
|||||||
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
||||||
// Take the lastL1BatchBlockNum as the biggest between the last
|
// Take the lastL1BatchBlockNum as the biggest between the last
|
||||||
// scheduled one, and the synchronized one.
|
// scheduled one, and the synchronized one.
|
||||||
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
|
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
|
||||||
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||||
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
||||||
}
|
}
|
||||||
// Set Debug information
|
// Set Debug information
|
||||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
|
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
|
||||||
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
||||||
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
||||||
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// Scheduled L1Batch
|
// Scheduled L1Batch
|
||||||
//
|
//
|
||||||
pipeline.lastScheduledL1BatchBlockNum = startBlock
|
pipeline.state.lastScheduledL1BatchBlockNum = startBlock
|
||||||
stats.Sync.LastL1BatchBlock = startBlock - 10
|
stats.Sync.LastL1BatchBlock = startBlock - 10
|
||||||
|
|
||||||
// We are are one block before the timeout range * 0.5
|
// We are are one block before the timeout range * 0.5
|
||||||
@@ -172,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
|
|||||||
// users with positive balances
|
// users with positive balances
|
||||||
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
|
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
|
||||||
syncStats := sync.Stats()
|
syncStats := sync.Stats()
|
||||||
batchNum := common.BatchNum(syncStats.Sync.LastBatch)
|
batchNum := syncStats.Sync.LastBatch.BatchNum
|
||||||
syncSCVars := sync.SCVars()
|
syncSCVars := sync.SCVars()
|
||||||
|
|
||||||
pipeline, err := coord.newPipeline(ctx)
|
pipeline, err := coord.newPipeline(ctx)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package coordinator
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
@@ -9,6 +10,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
@@ -35,12 +37,22 @@ type TxManager struct {
|
|||||||
vars synchronizer.SCVariables
|
vars synchronizer.SCVariables
|
||||||
statsVarsCh chan statsVars
|
statsVarsCh chan statsVars
|
||||||
|
|
||||||
queue []*BatchInfo
|
discardPipelineCh chan int // int refers to the pipelineNum
|
||||||
|
|
||||||
|
minPipelineNum int
|
||||||
|
queue Queue
|
||||||
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
|
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
|
||||||
lastSuccessBatch common.BatchNum
|
lastSuccessBatch common.BatchNum
|
||||||
lastPendingBatch common.BatchNum
|
// lastPendingBatch common.BatchNum
|
||||||
lastSuccessNonce uint64
|
// accNonce is the account nonce in the last mined block (due to mined txs)
|
||||||
lastPendingNonce uint64
|
accNonce uint64
|
||||||
|
// accNextNonce is the nonce that we should use to send the next tx.
|
||||||
|
// In some cases this will be a reused nonce of an already pending tx.
|
||||||
|
accNextNonce uint64
|
||||||
|
// accPendingNonce is the pending nonce of the account due to pending txs
|
||||||
|
// accPendingNonce uint64
|
||||||
|
|
||||||
|
lastSentL1BatchBlockNum int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTxManager creates a new TxManager
|
// NewTxManager creates a new TxManager
|
||||||
@@ -54,19 +66,19 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
|
// accPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
return nil, err
|
// return nil, err
|
||||||
}
|
// }
|
||||||
if lastSuccessNonce != lastPendingNonce {
|
// if accNonce != accPendingNonce {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
|
// return nil, tracerr.Wrap(fmt.Errorf("currentNonce (%v) != accPendingNonce (%v)",
|
||||||
lastSuccessNonce, lastPendingNonce))
|
// accNonce, accPendingNonce))
|
||||||
}
|
// }
|
||||||
log.Infow("TxManager started", "nonce", lastSuccessNonce)
|
log.Infow("TxManager started", "nonce", accNonce)
|
||||||
return &TxManager{
|
return &TxManager{
|
||||||
cfg: *cfg,
|
cfg: *cfg,
|
||||||
ethClient: ethClient,
|
ethClient: ethClient,
|
||||||
@@ -74,6 +86,7 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
|||||||
coord: coord,
|
coord: coord,
|
||||||
batchCh: make(chan *BatchInfo, queueLen),
|
batchCh: make(chan *BatchInfo, queueLen),
|
||||||
statsVarsCh: make(chan statsVars, queueLen),
|
statsVarsCh: make(chan statsVars, queueLen),
|
||||||
|
discardPipelineCh: make(chan int, queueLen),
|
||||||
account: accounts.Account{
|
account: accounts.Account{
|
||||||
Address: *address,
|
Address: *address,
|
||||||
},
|
},
|
||||||
@@ -82,8 +95,11 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
|||||||
|
|
||||||
vars: *initSCVars,
|
vars: *initSCVars,
|
||||||
|
|
||||||
lastSuccessNonce: lastSuccessNonce,
|
minPipelineNum: 0,
|
||||||
lastPendingNonce: lastPendingNonce,
|
queue: NewQueue(),
|
||||||
|
accNonce: accNonce,
|
||||||
|
accNextNonce: accNonce,
|
||||||
|
// accPendingNonce: accPendingNonce,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,16 +120,17 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
|
||||||
|
// due to a reorg
|
||||||
|
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
|
||||||
|
select {
|
||||||
|
case t.discardPipelineCh <- pipelineNum:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||||
if vars.Rollup != nil {
|
updateSCVars(&t.vars, vars)
|
||||||
t.vars.Rollup = *vars.Rollup
|
|
||||||
}
|
|
||||||
if vars.Auction != nil {
|
|
||||||
t.vars.Auction = *vars.Auction
|
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
t.vars.WDelayer = *vars.WDelayer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuth generates a new auth object for an ethereum transaction
|
// NewAuth generates a new auth object for an ethereum transaction
|
||||||
@@ -123,6 +140,7 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
inc := new(big.Int).Set(gasPrice)
|
inc := new(big.Int).Set(gasPrice)
|
||||||
|
// TODO: Replace this by a value of percentage
|
||||||
const gasPriceDiv = 100
|
const gasPriceDiv = 100
|
||||||
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
|
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
|
||||||
gasPrice.Add(gasPrice, inc)
|
gasPrice.Add(gasPrice, inc)
|
||||||
@@ -141,29 +159,75 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
|||||||
return auth, nil
|
return auth, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
|
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
|
||||||
// TODO: Check if we can forge in the next blockNum, abort if we can't
|
nextBlock := t.stats.Eth.LastBlock.Num + 1
|
||||||
batchInfo.Debug.Status = StatusSent
|
if !t.canForgeAt(nextBlock) {
|
||||||
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
|
||||||
batchInfo.Debug.SendTimestamp = time.Now()
|
}
|
||||||
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
|
||||||
batchInfo.Debug.StartTimestamp).Seconds()
|
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
|
||||||
|
}
|
||||||
|
margin := t.cfg.SendBatchBlocksMarginCheck
|
||||||
|
if margin != 0 {
|
||||||
|
if !t.canForgeAt(nextBlock + margin) {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
|
||||||
|
margin, nextBlock))
|
||||||
|
}
|
||||||
|
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
|
||||||
|
margin, nextBlock))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPerc(v *big.Int, p int64) *big.Int {
|
||||||
|
r := new(big.Int).Set(v)
|
||||||
|
r.Mul(r, big.NewInt(p))
|
||||||
|
// nolint reason: to calculate percetnages we divide by 100
|
||||||
|
r.Div(r, big.NewInt(100)) //nolit:gomnd
|
||||||
|
return r.Add(v, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
|
||||||
var ethTx *types.Transaction
|
var ethTx *types.Transaction
|
||||||
var err error
|
var err error
|
||||||
auth, err := t.NewAuth(ctx)
|
auth, err := t.NewAuth(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
|
auth.Nonce = big.NewInt(int64(t.accNextNonce))
|
||||||
t.lastPendingNonce++
|
if resend {
|
||||||
|
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
|
||||||
|
}
|
||||||
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
|
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
|
||||||
|
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
|
||||||
|
auth.GasPrice, t.cfg.MaxGasPrice))
|
||||||
|
}
|
||||||
|
// RollupForgeBatch() calls ethclient.SendTransaction()
|
||||||
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
|
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
|
||||||
if err != nil {
|
if errors.Is(err, core.ErrNonceTooLow) {
|
||||||
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
|
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
|
||||||
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
|
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||||
// "block", t.stats.Eth.LastBlock.Num+1)
|
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
|
||||||
// return tracerr.Wrap(err)
|
attempt--
|
||||||
// }
|
} else if errors.Is(err, core.ErrNonceTooHigh) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
|
||||||
|
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
|
||||||
|
attempt--
|
||||||
|
} else if errors.Is(err, core.ErrUnderpriced) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||||
|
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||||
|
attempt--
|
||||||
|
} else if errors.Is(err, core.ErrReplaceUnderpriced) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||||
|
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||||
|
attempt--
|
||||||
|
} else if err != nil {
|
||||||
log.Errorw("TxManager ethClient.RollupForgeBatch",
|
log.Errorw("TxManager ethClient.RollupForgeBatch",
|
||||||
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
|
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
|
||||||
"batchNum", batchInfo.BatchNum)
|
"batchNum", batchInfo.BatchNum)
|
||||||
@@ -179,10 +243,30 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
||||||
}
|
}
|
||||||
|
if !resend {
|
||||||
|
t.accNextNonce = auth.Nonce.Uint64() + 1
|
||||||
|
}
|
||||||
batchInfo.EthTx = ethTx
|
batchInfo.EthTx = ethTx
|
||||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
|
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
|
||||||
|
now := time.Now()
|
||||||
|
batchInfo.SendTimestamp = now
|
||||||
|
|
||||||
|
if resend {
|
||||||
|
batchInfo.Debug.ResendNum++
|
||||||
|
}
|
||||||
|
batchInfo.Debug.Status = StatusSent
|
||||||
|
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||||
|
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
|
||||||
|
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
||||||
|
batchInfo.Debug.StartTimestamp).Seconds()
|
||||||
t.cfg.debugBatchStore(batchInfo)
|
t.cfg.debugBatchStore(batchInfo)
|
||||||
t.lastPendingBatch = batchInfo.BatchNum
|
|
||||||
|
// t.lastPendingBatch = batchInfo.BatchNum
|
||||||
|
if !resend {
|
||||||
|
if batchInfo.L1Batch {
|
||||||
|
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -225,13 +309,19 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
|
|||||||
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
|
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
|
||||||
receipt := batchInfo.Receipt
|
receipt := batchInfo.Receipt
|
||||||
if receipt != nil {
|
if receipt != nil {
|
||||||
|
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
|
||||||
|
t.accNonce = batchInfo.EthTx.Nonce() + 1
|
||||||
|
}
|
||||||
if receipt.Status == types.ReceiptStatusFailed {
|
if receipt.Status == types.ReceiptStatusFailed {
|
||||||
batchInfo.Debug.Status = StatusFailed
|
batchInfo.Debug.Status = StatusFailed
|
||||||
t.cfg.debugBatchStore(batchInfo)
|
t.cfg.debugBatchStore(batchInfo)
|
||||||
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
|
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
|
||||||
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
|
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
|
||||||
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
|
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
|
||||||
"err", err)
|
"err", err)
|
||||||
|
if batchInfo.BatchNum <= t.lastSuccessBatch {
|
||||||
|
t.lastSuccessBatch = batchInfo.BatchNum - 1
|
||||||
|
}
|
||||||
return nil, tracerr.Wrap(fmt.Errorf(
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
"ethereum transaction receipt status is failed: %w", err))
|
"ethereum transaction receipt status is failed: %w", err))
|
||||||
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
||||||
@@ -239,6 +329,17 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
|||||||
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
|
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
|
||||||
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
|
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
|
||||||
batchInfo.Debug.StartBlockNum
|
batchInfo.Debug.StartBlockNum
|
||||||
|
if batchInfo.Debug.StartToMineDelay == 0 {
|
||||||
|
if block, err := t.ethClient.EthBlockByNumber(ctx,
|
||||||
|
receipt.BlockNumber.Int64()); err != nil {
|
||||||
|
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
|
||||||
|
} else {
|
||||||
|
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
|
||||||
|
batchInfo.Debug.SendTimestamp).Seconds()
|
||||||
|
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
|
||||||
|
batchInfo.Debug.StartTimestamp).Seconds()
|
||||||
|
}
|
||||||
|
}
|
||||||
t.cfg.debugBatchStore(batchInfo)
|
t.cfg.debugBatchStore(batchInfo)
|
||||||
if batchInfo.BatchNum > t.lastSuccessBatch {
|
if batchInfo.BatchNum > t.lastSuccessBatch {
|
||||||
t.lastSuccessBatch = batchInfo.BatchNum
|
t.lastSuccessBatch = batchInfo.BatchNum
|
||||||
@@ -250,9 +351,72 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
|
||||||
|
|
||||||
|
// Queue of BatchInfos
|
||||||
|
type Queue struct {
|
||||||
|
list []*BatchInfo
|
||||||
|
// nonceByBatchNum map[common.BatchNum]uint64
|
||||||
|
next int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueue returns a new queue
|
||||||
|
func NewQueue() Queue {
|
||||||
|
return Queue{
|
||||||
|
list: make([]*BatchInfo, 0),
|
||||||
|
// nonceByBatchNum: make(map[common.BatchNum]uint64),
|
||||||
|
next: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len is the length of the queue
|
||||||
|
func (q *Queue) Len() int {
|
||||||
|
return len(q.list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns the BatchInfo at position (or nil if position is out of bounds)
|
||||||
|
func (q *Queue) At(position int) *BatchInfo {
|
||||||
|
if position >= len(q.list) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return q.list[position]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next BatchInfo (or nil if queue is empty)
|
||||||
|
func (q *Queue) Next() (int, *BatchInfo) {
|
||||||
|
if len(q.list) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
defer func() { q.next = (q.next + 1) % len(q.list) }()
|
||||||
|
return q.next, q.list[q.next]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the BatchInfo at position
|
||||||
|
func (q *Queue) Remove(position int) {
|
||||||
|
// batchInfo := q.list[position]
|
||||||
|
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
|
||||||
|
q.list = append(q.list[:position], q.list[position+1:]...)
|
||||||
|
if len(q.list) == 0 {
|
||||||
|
q.next = 0
|
||||||
|
} else {
|
||||||
|
q.next = position % len(q.list)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push adds a new BatchInfo
|
||||||
|
func (q *Queue) Push(batchInfo *BatchInfo) {
|
||||||
|
q.list = append(q.list, batchInfo)
|
||||||
|
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
|
||||||
|
// nonce, ok := q.nonceByBatchNum[batchNum]
|
||||||
|
// return nonce, ok
|
||||||
|
// }
|
||||||
|
|
||||||
// Run the TxManager
|
// Run the TxManager
|
||||||
func (t *TxManager) Run(ctx context.Context) {
|
func (t *TxManager) Run(ctx context.Context) {
|
||||||
next := 0
|
|
||||||
waitDuration := longWaitDuration
|
waitDuration := longWaitDuration
|
||||||
|
|
||||||
var statsVars statsVars
|
var statsVars statsVars
|
||||||
@@ -263,7 +427,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
t.stats = statsVars.Stats
|
t.stats = statsVars.Stats
|
||||||
t.syncSCVars(statsVars.Vars)
|
t.syncSCVars(statsVars.Vars)
|
||||||
log.Infow("TxManager: received initial statsVars",
|
log.Infow("TxManager: received initial statsVars",
|
||||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
|
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -273,8 +437,27 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
case statsVars := <-t.statsVarsCh:
|
case statsVars := <-t.statsVarsCh:
|
||||||
t.stats = statsVars.Stats
|
t.stats = statsVars.Stats
|
||||||
t.syncSCVars(statsVars.Vars)
|
t.syncSCVars(statsVars.Vars)
|
||||||
|
case pipelineNum := <-t.discardPipelineCh:
|
||||||
|
t.minPipelineNum = pipelineNum + 1
|
||||||
|
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
case batchInfo := <-t.batchCh:
|
case batchInfo := <-t.batchCh:
|
||||||
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
|
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||||
|
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
|
||||||
|
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
|
||||||
|
}
|
||||||
|
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
|
||||||
|
log.Warnw("TxManager: shouldSend", "err", err,
|
||||||
|
"batch", batchInfo.BatchNum)
|
||||||
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
// If we reach here it's because our ethNode has
|
// If we reach here it's because our ethNode has
|
||||||
@@ -282,19 +465,20 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
// ethereum. This could be due to the ethNode
|
// ethereum. This could be due to the ethNode
|
||||||
// failure, or an invalid transaction (that
|
// failure, or an invalid transaction (that
|
||||||
// can't be mined)
|
// can't be mined)
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
log.Warnw("TxManager: forgeBatch send failed", "err", err,
|
||||||
|
"batch", batchInfo.BatchNum)
|
||||||
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.queue = append(t.queue, batchInfo)
|
t.queue.Push(batchInfo)
|
||||||
waitDuration = t.cfg.TxManagerCheckInterval
|
waitDuration = t.cfg.TxManagerCheckInterval
|
||||||
case <-time.After(waitDuration):
|
case <-time.After(waitDuration):
|
||||||
if len(t.queue) == 0 {
|
queuePosition, batchInfo := t.queue.Next()
|
||||||
|
if batchInfo == nil {
|
||||||
waitDuration = longWaitDuration
|
waitDuration = longWaitDuration
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
current := next
|
|
||||||
next = (current + 1) % len(t.queue)
|
|
||||||
batchInfo := t.queue[current]
|
|
||||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil { //nolint:staticcheck
|
} else if err != nil { //nolint:staticcheck
|
||||||
@@ -304,7 +488,8 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
// if it was not mined, mined and succesfull or
|
// if it was not mined, mined and succesfull or
|
||||||
// mined and failed. This could be due to the
|
// mined and failed. This could be due to the
|
||||||
// ethNode failure.
|
// ethNode failure.
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
||||||
}
|
}
|
||||||
|
|
||||||
confirm, err := t.handleReceipt(ctx, batchInfo)
|
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||||
@@ -312,32 +497,108 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
continue
|
continue
|
||||||
} else if err != nil { //nolint:staticcheck
|
} else if err != nil { //nolint:staticcheck
|
||||||
// Transaction was rejected
|
// Transaction was rejected
|
||||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||||
if len(t.queue) == 0 {
|
continue
|
||||||
next = 0
|
} else if err != nil {
|
||||||
} else {
|
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||||
next = current % len(t.queue)
|
continue
|
||||||
}
|
}
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
now := time.Now()
|
||||||
log.Debugw("TxManager tx for RollupForgeBatch confirmed",
|
if !t.cfg.EthNoReuseNonce && confirm == nil &&
|
||||||
|
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
|
||||||
|
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
|
||||||
|
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||||
|
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
// If we reach here it's because our ethNode has
|
||||||
|
// been unable to send the transaction to
|
||||||
|
// ethereum. This could be due to the ethNode
|
||||||
|
// failure, or an invalid transaction (that
|
||||||
|
// can't be mined)
|
||||||
|
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
|
||||||
"batch", batchInfo.BatchNum)
|
"batch", batchInfo.BatchNum)
|
||||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
if len(t.queue) == 0 {
|
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
|
||||||
next = 0
|
continue
|
||||||
} else {
|
|
||||||
next = current % len(t.queue)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
||||||
|
log.Debugw("TxManager: forgeBatch tx confirmed",
|
||||||
|
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||||
|
t.queue.Remove(queuePosition)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: this function will be used in the future
|
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
|
||||||
//nolint:unused
|
next := 0
|
||||||
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
|
// batchNum := 0
|
||||||
|
for {
|
||||||
|
batchInfo := t.queue.At(next)
|
||||||
|
if batchInfo == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// Our ethNode is giving an error different
|
||||||
|
// than "not found" when getting the receipt
|
||||||
|
// for the transaction, so we can't figure out
|
||||||
|
// if it was not mined, mined and succesfull or
|
||||||
|
// mined and failed. This could be due to the
|
||||||
|
// ethNode failure.
|
||||||
|
next++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// Transaction was rejected
|
||||||
|
if t.minPipelineNum <= batchInfo.PipelineNum {
|
||||||
|
t.minPipelineNum = batchInfo.PipelineNum + 1
|
||||||
|
}
|
||||||
|
t.queue.Remove(next)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If tx is pending but is from a cancelled pipeline, remove it
|
||||||
|
// from the queue
|
||||||
|
if confirm == nil {
|
||||||
|
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||||
|
// batchNum++
|
||||||
|
t.queue.Remove(next)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
next++
|
||||||
|
}
|
||||||
|
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !t.cfg.EthNoReuseNonce {
|
||||||
|
t.accNextNonce = accNonce
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) canForgeAt(blockNum int64) bool {
|
||||||
return canForge(&t.consts.Auction, &t.vars.Auction,
|
return canForge(&t.consts.Auction, &t.vars.Auction,
|
||||||
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
|
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
|
||||||
t.cfg.ForgerAddress, blockNum)
|
t.cfg.ForgerAddress, blockNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
|
||||||
|
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
|
||||||
|
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||||
|
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
|
||||||
|
}
|
||||||
|
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
|
||||||
|
}
|
||||||
|
|||||||
15
coordinator/txmanager_test.go
Normal file
15
coordinator/txmanager_test.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
package coordinator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddPerc(t *testing.T) {
|
||||||
|
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
|
||||||
|
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
|
||||||
|
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
|
||||||
|
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
|
||||||
|
}
|
||||||
@@ -166,6 +166,19 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBatch returns the batch with the given batchNum
|
||||||
|
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
|
||||||
|
var batch common.Batch
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
|
||||||
|
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
|
||||||
|
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
|
||||||
|
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
|
||||||
|
batchNum,
|
||||||
|
)
|
||||||
|
return &batch, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetBatchAPI return the batch with the given batchNum
|
// GetBatchAPI return the batch with the given batchNum
|
||||||
func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
batch := &BatchAPI{}
|
batch := &BatchAPI{}
|
||||||
@@ -320,6 +333,18 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
|
|||||||
return batchNum, tracerr.Wrap(row.Scan(&batchNum))
|
return batchNum, tracerr.Wrap(row.Scan(&batchNum))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLastBatch returns the last forged batch
|
||||||
|
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
|
||||||
|
var batch common.Batch
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
|
||||||
|
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
|
||||||
|
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
|
||||||
|
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
|
||||||
|
)
|
||||||
|
return &batch, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
|
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
|
||||||
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
|
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
|
||||||
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
|
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
|
||||||
|
|||||||
@@ -200,6 +200,10 @@ func TestBatches(t *testing.T) {
|
|||||||
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
|
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
||||||
|
// Test GetLastBatch
|
||||||
|
fetchedLastBatch, err := historyDB.GetLastBatch()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
|
||||||
// Test GetLastL1TxsNum
|
// Test GetLastL1TxsNum
|
||||||
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -208,6 +212,12 @@ func TestBatches(t *testing.T) {
|
|||||||
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
|
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
|
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
|
||||||
|
// Test GetBatch
|
||||||
|
fetchedBatch, err := historyDB.GetBatch(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, &batches[0], fetchedBatch)
|
||||||
|
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
|
||||||
|
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBids(t *testing.T) {
|
func TestBids(t *testing.T) {
|
||||||
|
|||||||
@@ -425,12 +425,13 @@ func (k *KVDB) MakeCheckpoint() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if checkpoint BatchNum already exist in disk, delete it
|
// if checkpoint BatchNum already exist in disk, delete it
|
||||||
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
|
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
} else {
|
||||||
if err := os.RemoveAll(checkpointPath); err != nil {
|
if err := os.RemoveAll(checkpointPath); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
} else if err != nil && !os.IsNotExist(err) {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute Checkpoint
|
// execute Checkpoint
|
||||||
@@ -451,12 +452,25 @@ func (k *KVDB) MakeCheckpoint() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckpointExists returns true if the checkpoint exists
|
||||||
|
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||||
|
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||||
|
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
||||||
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||||
|
|
||||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||||
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return os.RemoveAll(checkpointPath)
|
return os.RemoveAll(checkpointPath)
|
||||||
@@ -520,6 +534,8 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
|
|||||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||||
// if kvdb does not have checkpoint at batchNum, return err
|
// if kvdb does not have checkpoint at batchNum, return err
|
||||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// By locking we allow calling MakeCheckpointFromTo from multiple
|
// By locking we allow calling MakeCheckpointFromTo from multiple
|
||||||
// places at the same time for the same stateDB. This allows the
|
// places at the same time for the same stateDB. This allows the
|
||||||
@@ -533,12 +549,13 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
|
|||||||
|
|
||||||
func pebbleMakeCheckpoint(source, dest string) error {
|
func pebbleMakeCheckpoint(source, dest string) error {
|
||||||
// Remove dest folder (if it exists) before doing the checkpoint
|
// Remove dest folder (if it exists) before doing the checkpoint
|
||||||
if _, err := os.Stat(dest); !os.IsNotExist(err) {
|
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
} else {
|
||||||
if err := os.RemoveAll(dest); err != nil {
|
if err := os.RemoveAll(dest); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
} else if err != nil && !os.IsNotExist(err) {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sto, err := pebble.NewPebbleStorage(source, false)
|
sto, err := pebble.NewPebbleStorage(source, false)
|
||||||
|
|||||||
@@ -498,11 +498,17 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckpointExists returns true if the checkpoint exists
|
||||||
|
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||||
|
return l.db.CheckpointExists(batchNum)
|
||||||
|
}
|
||||||
|
|
||||||
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
||||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
||||||
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
if fromSynchronizer {
|
if fromSynchronizer {
|
||||||
|
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
|
||||||
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
|
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
3
go.mod
3
go.mod
@@ -6,6 +6,7 @@ require (
|
|||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.3.1
|
||||||
github.com/dghubble/sling v1.3.0
|
github.com/dghubble/sling v1.3.0
|
||||||
github.com/ethereum/go-ethereum v1.9.25
|
github.com/ethereum/go-ethereum v1.9.25
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||||
github.com/getkin/kin-openapi v0.22.0
|
github.com/getkin/kin-openapi v0.22.0
|
||||||
github.com/gin-contrib/cors v1.3.1
|
github.com/gin-contrib/cors v1.3.1
|
||||||
github.com/gin-gonic/gin v1.5.0
|
github.com/gin-gonic/gin v1.5.0
|
||||||
@@ -23,6 +24,8 @@ require (
|
|||||||
github.com/mitchellh/mapstructure v1.3.0
|
github.com/mitchellh/mapstructure v1.3.0
|
||||||
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
||||||
github.com/russross/meddler v1.0.0
|
github.com/russross/meddler v1.0.0
|
||||||
|
github.com/sirupsen/logrus v1.5.0 // indirect
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
|
||||||
github.com/stretchr/testify v1.6.1
|
github.com/stretchr/testify v1.6.1
|
||||||
github.com/urfave/cli/v2 v2.2.0
|
github.com/urfave/cli/v2 v2.2.0
|
||||||
go.uber.org/zap v1.16.0
|
go.uber.org/zap v1.16.0
|
||||||
|
|||||||
10
go.sum
10
go.sum
@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
|
|||||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||||
|
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||||
|
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||||
@@ -84,6 +86,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
|
|||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||||
|
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||||
|
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||||
@@ -169,6 +173,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
|
|||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
||||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||||
@@ -596,6 +602,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
|||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||||
|
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
@@ -614,6 +622,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
|||||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
||||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
||||||
|
|||||||
14
node/node.go
14
node/node.go
@@ -2,6 +2,7 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -293,6 +294,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
||||||
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
|
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
|
||||||
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
|
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
|
||||||
|
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
|
||||||
|
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
|
||||||
|
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
|
||||||
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
|
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
|
||||||
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
|
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
|
||||||
Purger: coordinator.PurgerCfg{
|
Purger: coordinator.PurgerCfg{
|
||||||
@@ -479,11 +483,15 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
|
|||||||
if stats.Synced() {
|
if stats.Synced() {
|
||||||
if err := n.nodeAPI.api.UpdateNetworkInfo(
|
if err := n.nodeAPI.api.UpdateNetworkInfo(
|
||||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
common.BatchNum(stats.Eth.LastBatch),
|
common.BatchNum(stats.Eth.LastBatchNum),
|
||||||
stats.Sync.Auction.CurrentSlot.SlotNum,
|
stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
n.nodeAPI.api.UpdateNetworkInfoBlock(
|
||||||
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -565,10 +573,14 @@ func (n *Node) StartSynchronizer() {
|
|||||||
if n.ctx.Err() != nil {
|
if n.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
|
||||||
|
log.Warnw("Synchronizer.Sync", "err", err)
|
||||||
|
} else {
|
||||||
log.Errorw("Synchronizer.Sync", "err", err)
|
log.Errorw("Synchronizer.Sync", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
|
|||||||
@@ -25,12 +25,12 @@ type Stats struct {
|
|||||||
Updated time.Time
|
Updated time.Time
|
||||||
FirstBlockNum int64
|
FirstBlockNum int64
|
||||||
LastBlock common.Block
|
LastBlock common.Block
|
||||||
LastBatch int64
|
LastBatchNum int64
|
||||||
}
|
}
|
||||||
Sync struct {
|
Sync struct {
|
||||||
Updated time.Time
|
Updated time.Time
|
||||||
LastBlock common.Block
|
LastBlock common.Block
|
||||||
LastBatch int64
|
LastBatch common.Batch
|
||||||
// LastL1BatchBlock is the last ethereum block in which an
|
// LastL1BatchBlock is the last ethereum block in which an
|
||||||
// l1Batch was forged
|
// l1Batch was forged
|
||||||
LastL1BatchBlock int64
|
LastL1BatchBlock int64
|
||||||
@@ -77,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateSync updates the synchronizer stats
|
// UpdateSync updates the synchronizer stats
|
||||||
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
|
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
|
||||||
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
|
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
s.rw.Lock()
|
s.rw.Lock()
|
||||||
s.Sync.LastBlock = *lastBlock
|
s.Sync.LastBlock = *lastBlock
|
||||||
if lastBatch != nil {
|
if lastBatch != nil {
|
||||||
s.Sync.LastBatch = int64(*lastBatch)
|
s.Sync.LastBatch = *lastBatch
|
||||||
}
|
}
|
||||||
if lastL1BatchBlock != nil {
|
if lastL1BatchBlock != nil {
|
||||||
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
|
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
|
||||||
@@ -107,14 +107,14 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
lastBatch, err := ethClient.RollupLastForgedBatch()
|
lastBatchNum, err := ethClient.RollupLastForgedBatch()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
s.rw.Lock()
|
s.rw.Lock()
|
||||||
s.Eth.Updated = now
|
s.Eth.Updated = now
|
||||||
s.Eth.LastBlock = *lastBlock
|
s.Eth.LastBlock = *lastBlock
|
||||||
s.Eth.LastBatch = lastBatch
|
s.Eth.LastBatchNum = lastBatchNum
|
||||||
s.rw.Unlock()
|
s.rw.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -139,6 +139,10 @@ func (s *StatsHolder) CopyStats() *Stats {
|
|||||||
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
|
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
|
||||||
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
|
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
|
||||||
}
|
}
|
||||||
|
if s.Sync.LastBatch.StateRoot != nil {
|
||||||
|
sCopy.Sync.LastBatch.StateRoot =
|
||||||
|
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
|
||||||
|
}
|
||||||
s.rw.RUnlock()
|
s.rw.RUnlock()
|
||||||
return &sCopy
|
return &sCopy
|
||||||
}
|
}
|
||||||
@@ -152,9 +156,9 @@ func (s *StatsHolder) blocksPerc() float64 {
|
|||||||
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
|
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
|
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
|
||||||
return float64(batchNum) * 100.0 /
|
return float64(batchNum) * 100.0 /
|
||||||
float64(s.Eth.LastBatch)
|
float64(s.Eth.LastBatchNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartBlockNums sets the first block used to start tracking the smart
|
// StartBlockNums sets the first block used to start tracking the smart
|
||||||
@@ -329,23 +333,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// firstBatchBlockNum is the blockNum of first batch in that block, if any
|
// updateCurrentSlot updates the slot with information of the current slot.
|
||||||
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
|
// The information abouth which coordinator is allowed to forge is only updated
|
||||||
slot := common.Slot{
|
// when we are Synced.
|
||||||
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
|
// hasBatch is true when the last synced block contained at least one batch.
|
||||||
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
|
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
|
||||||
}
|
|
||||||
// We want the next block because the current one is already mined
|
// We want the next block because the current one is already mined
|
||||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||||
slotNum := s.consts.Auction.SlotNum(blockNum)
|
slotNum := s.consts.Auction.SlotNum(blockNum)
|
||||||
|
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
|
||||||
if reset {
|
if reset {
|
||||||
|
// Using this query only to know if there
|
||||||
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
||||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
firstBatchBlockNum = nil
|
hasBatch = false
|
||||||
} else {
|
} else {
|
||||||
firstBatchBlockNum = &dbFirstBatchBlockNum
|
hasBatch = true
|
||||||
|
firstBatchBlockNum = dbFirstBatchBlockNum
|
||||||
}
|
}
|
||||||
slot.ForgerCommitment = false
|
slot.ForgerCommitment = false
|
||||||
} else if slotNum > slot.SlotNum {
|
} else if slotNum > slot.SlotNum {
|
||||||
@@ -356,11 +362,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
|||||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||||
// If Synced, update the current coordinator
|
// If Synced, update the current coordinator
|
||||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
if err := s.setSlotCoordinator(slot); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if firstBatchBlockNum != nil &&
|
if hasBatch &&
|
||||||
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
|
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
|
||||||
int64(s.vars.Auction.SlotDeadline) {
|
int64(s.vars.Auction.SlotDeadline) {
|
||||||
slot.ForgerCommitment = true
|
slot.ForgerCommitment = true
|
||||||
}
|
}
|
||||||
@@ -369,57 +375,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
|||||||
// BEGIN SANITY CHECK
|
// BEGIN SANITY CHECK
|
||||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
|
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if !canForge {
|
if !canForge {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||||
"differs from smart contract: %+v", slot))
|
"differs from smart contract: %+v", slot))
|
||||||
}
|
}
|
||||||
// END SANITY CHECK
|
// END SANITY CHECK
|
||||||
}
|
}
|
||||||
return &slot, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
|
// updateNextSlot updates the slot with information of the next slot.
|
||||||
|
// The information abouth which coordinator is allowed to forge is only updated
|
||||||
|
// when we are Synced.
|
||||||
|
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
|
||||||
// We want the next block because the current one is already mined
|
// We want the next block because the current one is already mined
|
||||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||||
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
|
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
|
||||||
slot := common.Slot{
|
slot.SlotNum = slotNum
|
||||||
SlotNum: slotNum,
|
slot.ForgerCommitment = false
|
||||||
ForgerCommitment: false,
|
|
||||||
}
|
|
||||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||||
// If Synced, update the current coordinator
|
// If Synced, update the current coordinator
|
||||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
if err := s.setSlotCoordinator(slot); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Remove this SANITY CHECK once this code is tested enough
|
// TODO: Remove this SANITY CHECK once this code is tested enough
|
||||||
// BEGIN SANITY CHECK
|
// BEGIN SANITY CHECK
|
||||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
|
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if !canForge {
|
if !canForge {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||||
"differs from smart contract: %+v", slot))
|
"differs from smart contract: %+v", slot))
|
||||||
}
|
}
|
||||||
// END SANITY CHECK
|
// END SANITY CHECK
|
||||||
}
|
}
|
||||||
return &slot, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
|
// updateCurrentNextSlotIfSync updates the current and next slot. Information
|
||||||
current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
|
// about forger address that is allowed to forge is only updated if we are
|
||||||
if err != nil {
|
// Synced.
|
||||||
|
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
|
||||||
|
current := s.stats.Sync.Auction.CurrentSlot
|
||||||
|
next := s.stats.Sync.Auction.NextSlot
|
||||||
|
if err := s.updateCurrentSlot(¤t, reset, hasBatch); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
next, err := s.getNextSlot()
|
if err := s.updateNextSlot(&next); err != nil {
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
s.stats.UpdateCurrentNextSlot(current, next)
|
s.stats.UpdateCurrentNextSlot(¤t, &next)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,9 +468,9 @@ func (s *Synchronizer) init() error {
|
|||||||
"ethLastBlock", s.stats.Eth.LastBlock,
|
"ethLastBlock", s.stats.Eth.LastBlock,
|
||||||
)
|
)
|
||||||
log.Infow("Sync init batch",
|
log.Infow("Sync init batch",
|
||||||
"syncLastBatch", s.stats.Sync.LastBatch,
|
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
|
||||||
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
|
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
|
||||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||||
)
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -627,14 +637,14 @@ func (s *Synchronizer) Sync2(ctx context.Context,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.stats.UpdateSync(ethBlock,
|
s.stats.UpdateSync(ethBlock,
|
||||||
&rollupData.Batches[batchesLen-1].Batch.BatchNum,
|
&rollupData.Batches[batchesLen-1].Batch,
|
||||||
lastL1BatchBlock, lastForgeL1TxsNum)
|
lastL1BatchBlock, lastForgeL1TxsNum)
|
||||||
}
|
}
|
||||||
var firstBatchBlockNum *int64
|
hasBatch := false
|
||||||
if len(rollupData.Batches) > 0 {
|
if len(rollupData.Batches) > 0 {
|
||||||
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
|
hasBatch = true
|
||||||
}
|
}
|
||||||
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
|
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -646,8 +656,8 @@ func (s *Synchronizer) Sync2(ctx context.Context,
|
|||||||
for _, batchData := range rollupData.Batches {
|
for _, batchData := range rollupData.Batches {
|
||||||
log.Debugw("Synced batch",
|
log.Debugw("Synced batch",
|
||||||
"syncLastBatch", batchData.Batch.BatchNum,
|
"syncLastBatch", batchData.Batch.BatchNum,
|
||||||
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
|
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
||||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -753,15 +763,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
|||||||
s.vars.WDelayer = *wDelayer
|
s.vars.WDelayer = *wDelayer
|
||||||
}
|
}
|
||||||
|
|
||||||
batchNum, err := s.historyDB.GetLastBatchNum()
|
batch, err := s.historyDB.GetLastBatch()
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
||||||
}
|
}
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
batchNum = 0
|
batch = &common.Batch{}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.stateDB.Reset(batchNum)
|
err = s.stateDB.Reset(batch.BatchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
||||||
}
|
}
|
||||||
@@ -783,9 +793,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
|||||||
lastForgeL1TxsNum = &n
|
lastForgeL1TxsNum = &n
|
||||||
}
|
}
|
||||||
|
|
||||||
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||||
|
|
||||||
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
|
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -919,9 +929,15 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if s.stateDB.CurrentBatch() != batchNum {
|
if s.stateDB.CurrentBatch() != batchNum {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)",
|
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
|
||||||
|
"evtForgeBatch.BatchNum = (%v)",
|
||||||
s.stateDB.CurrentBatch(), batchNum))
|
s.stateDB.CurrentBatch(), batchNum))
|
||||||
}
|
}
|
||||||
|
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
|
||||||
|
"forgeBatchArgs.NewStRoot (%v)",
|
||||||
|
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
|
||||||
|
}
|
||||||
|
|
||||||
// Transform processed PoolL2 txs to L2 and store in BatchData
|
// Transform processed PoolL2 txs to L2 and store in BatchData
|
||||||
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
||||||
|
|||||||
@@ -89,12 +89,8 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
|
|||||||
|
|
||||||
// Reset tells the TxSelector to get it's internal AccountsDB
|
// Reset tells the TxSelector to get it's internal AccountsDB
|
||||||
// from the required `batchNum`
|
// from the required `batchNum`
|
||||||
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
|
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
err := txsel.localAccountsDB.Reset(batchNum, true)
|
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
||||||
|
|||||||
Reference in New Issue
Block a user