mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
separated the policy to decide if we forge batch or not
This commit is contained in:
@@ -389,6 +389,14 @@ func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if we reach the ForgeDelay or not before batch forging
|
||||||
|
func (p *Pipeline) preForgeBatchCheck(slotCommitted bool, now time.Time) error {
|
||||||
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
|
||||||
|
return errForgeBeforeDelay
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// forgeBatch forges the batchNum batch.
|
// forgeBatch forges the batchNum batch.
|
||||||
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
|
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
|
||||||
// remove transactions from the pool that have been there for too long
|
// remove transactions from the pool that have been there for too long
|
||||||
@@ -421,8 +429,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If we haven't reached the ForgeDelay, skip forging the batch
|
// If we haven't reached the ForgeDelay, skip forging the batch
|
||||||
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
|
if err = p.preForgeBatchCheck(slotCommitted, now); err != nil {
|
||||||
return nil, tracerr.Wrap(errForgeBeforeDelay)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||||
@@ -454,35 +462,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
// If there are no txs to forge, no l1UserTxs in the open queue to
|
// If there are no txs to forge, no l1UserTxs in the open queue to
|
||||||
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
|
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
|
||||||
// batch.
|
// batch.
|
||||||
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
|
if err = p.postForgeBatchCheck(slotCommitted, now, l1UserTxsExtra, l1CoordTxs, poolL2Txs, batchInfo); err != nil {
|
||||||
noTxs := false
|
|
||||||
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
|
|
||||||
if batchInfo.L1Batch {
|
|
||||||
// Query the number of unforged L1UserTxs
|
|
||||||
// (either in a open queue or in a frozen
|
|
||||||
// not-yet-forged queue).
|
|
||||||
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// If there are future L1UserTxs, we forge a
|
|
||||||
// batch to advance the queues to be able to
|
|
||||||
// forge the L1UserTxs in the future.
|
|
||||||
// Otherwise, skip.
|
|
||||||
if count == 0 {
|
|
||||||
noTxs = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
noTxs = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if noTxs {
|
|
||||||
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
return nil, tracerr.Wrap(errForgeNoTxsBeforeDelay)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if batchInfo.L1Batch {
|
if batchInfo.L1Batch {
|
||||||
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||||
@@ -539,6 +521,41 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
return batchInfo, nil
|
return batchInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if there is no txs to forge, no l1UserTxs in the open queue to freeze and we haven't reached the ForgeNoTxsDelay
|
||||||
|
func (p *Pipeline) postForgeBatchCheck(slotCommitted bool, now time.Time, l1UserTxsExtra, l1CoordTxs []common.L1Tx,
|
||||||
|
poolL2Txs []common.PoolL2Tx, batchInfo *BatchInfo) error {
|
||||||
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
|
||||||
|
noTxs := false
|
||||||
|
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
|
||||||
|
if batchInfo.L1Batch {
|
||||||
|
// Query the number of unforged L1UserTxs
|
||||||
|
// (either in a open queue or in a frozen
|
||||||
|
// not-yet-forged queue).
|
||||||
|
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// If there are future L1UserTxs, we forge a
|
||||||
|
// batch to advance the queues to be able to
|
||||||
|
// forge the L1UserTxs in the future.
|
||||||
|
// Otherwise, skip.
|
||||||
|
if count == 0 {
|
||||||
|
noTxs = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
noTxs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if noTxs {
|
||||||
|
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return errForgeNoTxsBeforeDelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
||||||
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
|
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
|
||||||
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
|
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
|
||||||
|
|||||||
Reference in New Issue
Block a user