mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
WIP4
This commit is contained in:
@@ -87,9 +87,10 @@ CheckLoopInterval = "500ms"
|
|||||||
Attempts = 4
|
Attempts = 4
|
||||||
AttemptsDelay = "500ms"
|
AttemptsDelay = "500ms"
|
||||||
TxResendTimeout = "2m"
|
TxResendTimeout = "2m"
|
||||||
|
NoReuseNonce = false
|
||||||
CallGasLimit = 300000
|
CallGasLimit = 300000
|
||||||
GasPriceDiv = 100
|
GasPriceDiv = 100
|
||||||
MaxGasPrice = "0"
|
MaxGasPrice = "5000000000"
|
||||||
|
|
||||||
[Coordinator.EthClient.Keystore]
|
[Coordinator.EthClient.Keystore]
|
||||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||||
|
|||||||
@@ -24,6 +24,12 @@ type statsVars struct {
|
|||||||
Vars synchronizer.SCVariablesPtr
|
Vars synchronizer.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type state struct {
|
||||||
|
batchNum common.BatchNum
|
||||||
|
lastScheduledL1BatchBlockNum int64
|
||||||
|
lastForgeL1TxsNum int64
|
||||||
|
}
|
||||||
|
|
||||||
// Pipeline manages the forging of batches with parallel server proofs
|
// Pipeline manages the forging of batches with parallel server proofs
|
||||||
type Pipeline struct {
|
type Pipeline struct {
|
||||||
num int
|
num int
|
||||||
@@ -31,10 +37,11 @@ type Pipeline struct {
|
|||||||
consts synchronizer.SCConsts
|
consts synchronizer.SCConsts
|
||||||
|
|
||||||
// state
|
// state
|
||||||
batchNum common.BatchNum
|
state state
|
||||||
lastScheduledL1BatchBlockNum int64
|
// batchNum common.BatchNum
|
||||||
lastForgeL1TxsNum int64
|
// lastScheduledL1BatchBlockNum int64
|
||||||
started bool
|
// lastForgeL1TxsNum int64
|
||||||
|
started bool
|
||||||
|
|
||||||
proversPool *ProversPool
|
proversPool *ProversPool
|
||||||
provers []prover.Client
|
provers []prover.Client
|
||||||
@@ -107,17 +114,19 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
|
|||||||
// reset pipeline state
|
// reset pipeline state
|
||||||
func (p *Pipeline) reset(batchNum common.BatchNum,
|
func (p *Pipeline) reset(batchNum common.BatchNum,
|
||||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
||||||
p.batchNum = batchNum
|
p.state = state{
|
||||||
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
|
batchNum: batchNum,
|
||||||
|
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
||||||
|
lastScheduledL1BatchBlockNum: 0,
|
||||||
|
}
|
||||||
p.stats = *stats
|
p.stats = *stats
|
||||||
p.vars = *vars
|
p.vars = *vars
|
||||||
p.lastScheduledL1BatchBlockNum = 0
|
|
||||||
|
|
||||||
err := p.txSelector.Reset(p.batchNum)
|
err := p.txSelector.Reset(p.state.batchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
err = p.batchBuilder.Reset(p.batchNum, true)
|
err = p.batchBuilder.Reset(p.state.batchNum, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -138,7 +147,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||||
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
||||||
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
|
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
||||||
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
||||||
} else {
|
} else {
|
||||||
log.Errorw("forgeBatch", "err", err)
|
log.Errorw("forgeBatch", "err", err)
|
||||||
@@ -194,7 +203,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
p.stats = statsVars.Stats
|
p.stats = statsVars.Stats
|
||||||
p.syncSCVars(statsVars.Vars)
|
p.syncSCVars(statsVars.Vars)
|
||||||
case <-time.After(waitDuration):
|
case <-time.After(waitDuration):
|
||||||
batchNum = p.batchNum + 1
|
batchNum = p.state.batchNum + 1
|
||||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
@@ -202,7 +211,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
waitDuration = p.cfg.SyncRetryInterval
|
waitDuration = p.cfg.SyncRetryInterval
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p.batchNum = batchNum
|
p.state.batchNum = batchNum
|
||||||
select {
|
select {
|
||||||
case batchChSentServerProof <- batchInfo:
|
case batchChSentServerProof <- batchInfo:
|
||||||
case <-p.ctx.Done():
|
case <-p.ctx.Done():
|
||||||
@@ -302,15 +311,15 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
// If there's no error, update the parameters related
|
// If there's no error, update the parameters related
|
||||||
// to the last L1Batch forged
|
// to the last L1Batch forged
|
||||||
if err == nil {
|
if err == nil {
|
||||||
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||||
p.lastForgeL1TxsNum++
|
p.state.lastForgeL1TxsNum++
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||||
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
||||||
}
|
}
|
||||||
// 2a: L1+L2 txs
|
// 2a: L1+L2 txs
|
||||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
|
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -394,12 +403,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
|||||||
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
||||||
// Take the lastL1BatchBlockNum as the biggest between the last
|
// Take the lastL1BatchBlockNum as the biggest between the last
|
||||||
// scheduled one, and the synchronized one.
|
// scheduled one, and the synchronized one.
|
||||||
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
|
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
|
||||||
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||||
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
||||||
}
|
}
|
||||||
// Set Debug information
|
// Set Debug information
|
||||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
|
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
|
||||||
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
||||||
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
||||||
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// Scheduled L1Batch
|
// Scheduled L1Batch
|
||||||
//
|
//
|
||||||
pipeline.lastScheduledL1BatchBlockNum = startBlock
|
pipeline.state.lastScheduledL1BatchBlockNum = startBlock
|
||||||
stats.Sync.LastL1BatchBlock = startBlock - 10
|
stats.Sync.LastL1BatchBlock = startBlock - 10
|
||||||
|
|
||||||
// We are are one block before the timeout range * 0.5
|
// We are are one block before the timeout range * 0.5
|
||||||
|
|||||||
Reference in New Issue
Block a user