|
|
@ -389,6 +389,14 @@ func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) er |
|
|
|
return nil |
|
|
|
} |
|
|
|
|
|
|
|
// check if we reach the ForgeDelay or not before batch forging
|
|
|
|
func (p *Pipeline) preForgeBatchCheck(slotCommitted bool, now time.Time) error { |
|
|
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay { |
|
|
|
return errForgeBeforeDelay |
|
|
|
} |
|
|
|
return nil |
|
|
|
} |
|
|
|
|
|
|
|
// forgeBatch forges the batchNum batch.
|
|
|
|
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, err error) { |
|
|
|
// remove transactions from the pool that have been there for too long
|
|
|
@ -421,8 +429,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e |
|
|
|
} |
|
|
|
|
|
|
|
// If we haven't reached the ForgeDelay, skip forging the batch
|
|
|
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay { |
|
|
|
return nil, tracerr.Wrap(errForgeBeforeDelay) |
|
|
|
if err = p.preForgeBatchCheck(slotCommitted, now); err != nil { |
|
|
|
return nil, tracerr.Wrap(err) |
|
|
|
} |
|
|
|
|
|
|
|
// 1. Decide if we forge L2Tx or L1+L2Tx
|
|
|
@ -454,34 +462,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e |
|
|
|
// If there are no txs to forge, no l1UserTxs in the open queue to
|
|
|
|
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
|
|
|
|
// batch.
|
|
|
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay { |
|
|
|
noTxs := false |
|
|
|
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 { |
|
|
|
if batchInfo.L1Batch { |
|
|
|
// Query the number of unforged L1UserTxs
|
|
|
|
// (either in a open queue or in a frozen
|
|
|
|
// not-yet-forged queue).
|
|
|
|
count, err := p.historyDB.GetUnforgedL1UserTxsCount() |
|
|
|
if err != nil { |
|
|
|
return nil, tracerr.Wrap(err) |
|
|
|
} |
|
|
|
// If there are future L1UserTxs, we forge a
|
|
|
|
// batch to advance the queues to be able to
|
|
|
|
// forge the L1UserTxs in the future.
|
|
|
|
// Otherwise, skip.
|
|
|
|
if count == 0 { |
|
|
|
noTxs = true |
|
|
|
} |
|
|
|
} else { |
|
|
|
noTxs = true |
|
|
|
} |
|
|
|
} |
|
|
|
if noTxs { |
|
|
|
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil { |
|
|
|
return nil, tracerr.Wrap(err) |
|
|
|
} |
|
|
|
return nil, tracerr.Wrap(errForgeNoTxsBeforeDelay) |
|
|
|
} |
|
|
|
if err = p.postForgeBatchCheck(slotCommitted, now, l1UserTxsExtra, l1CoordTxs, poolL2Txs, batchInfo); err != nil { |
|
|
|
return nil, tracerr.Wrap(err) |
|
|
|
} |
|
|
|
|
|
|
|
if batchInfo.L1Batch { |
|
|
@ -539,6 +521,41 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e |
|
|
|
return batchInfo, nil |
|
|
|
} |
|
|
|
|
|
|
|
// check if there is no txs to forge, no l1UserTxs in the open queue to freeze and we haven't reached the ForgeNoTxsDelay
|
|
|
|
func (p *Pipeline) postForgeBatchCheck(slotCommitted bool, now time.Time, l1UserTxsExtra, l1CoordTxs []common.L1Tx, |
|
|
|
poolL2Txs []common.PoolL2Tx, batchInfo *BatchInfo) error { |
|
|
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay { |
|
|
|
noTxs := false |
|
|
|
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 { |
|
|
|
if batchInfo.L1Batch { |
|
|
|
// Query the number of unforged L1UserTxs
|
|
|
|
// (either in a open queue or in a frozen
|
|
|
|
// not-yet-forged queue).
|
|
|
|
count, err := p.historyDB.GetUnforgedL1UserTxsCount() |
|
|
|
if err != nil { |
|
|
|
return err |
|
|
|
} |
|
|
|
// If there are future L1UserTxs, we forge a
|
|
|
|
// batch to advance the queues to be able to
|
|
|
|
// forge the L1UserTxs in the future.
|
|
|
|
// Otherwise, skip.
|
|
|
|
if count == 0 { |
|
|
|
noTxs = true |
|
|
|
} |
|
|
|
} else { |
|
|
|
noTxs = true |
|
|
|
} |
|
|
|
} |
|
|
|
if noTxs { |
|
|
|
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil { |
|
|
|
return err |
|
|
|
} |
|
|
|
return errForgeNoTxsBeforeDelay |
|
|
|
} |
|
|
|
} |
|
|
|
return nil |
|
|
|
} |
|
|
|
|
|
|
|
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
|
|
|
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error { |
|
|
|
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
|
|
|
|