Implement Pipeline.prepareForgeBatchArgs()

- Implement Pipeline.prepareForgeBatchArgs()
- Add a minimal stress test for the coordinator (that also runs the
  synchronizer)
- Update txprocessor.ProcessTxs() to return valid results for batches without
  transactions
  	- Add the boilerplate for the corresponding test, leaving as TODO the
	  zkInput values
- Update prover client to use the same point format as proof server (projective)
- Update interface of TxSelector.GetCoordIdxs to also return the authorizations
  to create accounts that go with the l1CoordinatorTxs.
This commit is contained in:
Eduard S
2020-12-22 16:50:58 +01:00
parent c61c4f3376
commit 6a990376b4
10 changed files with 361 additions and 49 deletions

View File

@@ -26,15 +26,18 @@ const (
// BatchInfo contans the Batch information
type BatchInfo struct {
BatchNum common.BatchNum
ServerProof prover.Client
ZKInputs *common.ZKInputs
Proof *prover.Proof
PublicInputs []*big.Int
L1UserTxsExtra []common.L1Tx
L1CoordTxs []common.L1Tx
L2Txs []common.PoolL2Tx
ForgeBatchArgs *eth.RollupForgeBatchArgs
BatchNum common.BatchNum
ServerProof prover.Client
ZKInputs *common.ZKInputs
Proof *prover.Proof
PublicInputs []*big.Int
L1Batch bool
L1UserTxsExtra []common.L1Tx
L1CoordTxs []common.L1Tx
L1CoordinatorTxsAuths [][]byte
L2Txs []common.L2Tx
CoordIdxs []common.Idx
ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo
TxStatus TxStatus
EthTx *types.Transaction

View File

@@ -3,6 +3,7 @@ package coordinator
import (
"context"
"fmt"
"math/big"
"strings"
"sync"
"time"
@@ -772,7 +773,15 @@ func (p *Pipeline) Stop(ctx context.Context) {
}
}
func l2TxsIDs(txs []common.PoolL2Tx) []common.TxID {
func poolL2TxsIDs(txs []common.PoolL2Tx) []common.TxID {
txIDs := make([]common.TxID, len(txs))
for i, tx := range txs {
txIDs[i] = tx.TxID
}
return txIDs
}
func l2TxsIDs(txs []common.L2Tx) []common.TxID {
txIDs := make([]common.TxID, len(txs))
for i, tx := range txs {
txIDs[i] = tx.TxID
@@ -810,9 +819,11 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
var poolL2Txs []common.PoolL2Tx
// var feesInfo
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
var auths [][]byte
var coordIdxs []common.Idx
// 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch() {
batchInfo.L1Batch = true
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num
// 2a: L1+L2 txs
p.lastForgeL1TxsNum++
@@ -821,14 +832,14 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
return nil, tracerr.Wrap(err)
}
// TODO once feesInfo is added to method return, add the var
coordIdxs, l1UserTxsExtra, l1CoordTxs, poolL2Txs, err =
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, err =
p.txSelector.GetL1L2TxSelection(selectionConfig, batchNum, l1UserTxs)
if err != nil {
return nil, tracerr.Wrap(err)
}
} else {
// 2b: only L2 txs
coordIdxs, l1CoordTxs, poolL2Txs, err =
coordIdxs, auths, l1CoordTxs, poolL2Txs, err =
p.txSelector.GetL2TxSelection(selectionConfig, batchNum)
if err != nil {
return nil, tracerr.Wrap(err)
@@ -840,9 +851,10 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
// TODO feesInfo
batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs
batchInfo.L2Txs = poolL2Txs
batchInfo.L1CoordinatorTxsAuths = auths
batchInfo.CoordIdxs = coordIdxs
if err := p.l2DB.StartForging(l2TxsIDs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
if err := p.l2DB.StartForging(poolL2TxsIDs(poolL2Txs), batchInfo.BatchNum); err != nil {
return nil, tracerr.Wrap(err)
}
@@ -864,6 +876,11 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
if err != nil {
return nil, tracerr.Wrap(err)
}
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
if err != nil {
return nil, tracerr.Wrap(err)
}
batchInfo.L2Txs = l2Txs
// 5. Save metadata from BatchBuilder output for BatchNum
batchInfo.ZKInputs = zkInputs
@@ -903,6 +920,25 @@ func (p *Pipeline) shouldL1L2Batch() bool {
}
func (p *Pipeline) prepareForgeBatchArgs(batchInfo *BatchInfo) *eth.RollupForgeBatchArgs {
// TODO
return &eth.RollupForgeBatchArgs{}
proof := batchInfo.Proof
zki := batchInfo.ZKInputs
return &eth.RollupForgeBatchArgs{
NewLastIdx: int64(zki.Metadata.NewLastIdxRaw),
NewStRoot: zki.Metadata.NewStateRootRaw.BigInt(),
NewExitRoot: zki.Metadata.NewExitRootRaw.BigInt(),
L1UserTxs: batchInfo.L1UserTxsExtra,
L1CoordinatorTxs: batchInfo.L1CoordTxs,
L1CoordinatorTxsAuths: batchInfo.L1CoordinatorTxsAuths,
L2TxsData: batchInfo.L2Txs,
FeeIdxCoordinator: batchInfo.CoordIdxs,
// Circuit selector
VerifierIdx: 0, // TODO
L1Batch: batchInfo.L1Batch,
ProofA: [2]*big.Int{proof.PiA[0], proof.PiA[1]},
ProofB: [2][2]*big.Int{
{proof.PiB[0][0], proof.PiB[0][1]},
{proof.PiB[1][0], proof.PiB[1][1]},
},
ProofC: [2]*big.Int{proof.PiC[0], proof.PiC[1]},
}
}

View File

@@ -6,6 +6,7 @@ import (
"io/ioutil"
"math/big"
"os"
"sync"
"testing"
"time"
@@ -157,11 +158,21 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
ConfirmBlocks: 5,
L1BatchTimeoutPerc: 0.5,
EthClientAttempts: 5,
SyncRetryInterval: 400 * time.Microsecond,
EthClientAttemptsDelay: 100 * time.Millisecond,
TxManagerCheckInterval: 300 * time.Millisecond,
DebugBatchPath: debugBatchPath,
Purger: PurgerCfg{
PurgeBatchDelay: 10,
PurgeBlockDelay: 10,
InvalidateBatchDelay: 4,
InvalidateBlockDelay: 4,
},
}
serverProofs := []prover.Client{
&prover.MockClient{Delay: 300 * time.Millisecond},
&prover.MockClient{Delay: 400 * time.Millisecond},
}
serverProofs := []prover.Client{&prover.MockClient{}, &prover.MockClient{}}
scConsts := &synchronizer.SCConsts{
Rollup: *ethClientSetup.RollupConstants,
@@ -628,6 +639,70 @@ PoolTransfer(0) User2-User3: 300 (126)
assert.Equal(t, 0, len(batchInfo.L2Txs))
}
func TestCoordinatorStress(t *testing.T) {
if os.Getenv("TEST_COORD_STRESS") == "" {
return
}
log.Info("Begin Test Coord Stress")
ethClientSetup := test.NewClientSetupExample()
var timer timer
ethClient := test.NewClient(true, &timer, &bidder, ethClientSetup)
modules := newTestModules(t)
coord := newTestCoordinator(t, forger, ethClient, ethClientSetup, modules)
syn := newTestSynchronizer(t, ethClient, ethClientSetup, modules)
coord.Start()
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
// Synchronizer loop
wg.Add(1)
go func() {
for {
blockData, _, err := syn.Sync2(ctx, nil)
if ctx.Err() != nil {
wg.Done()
return
}
require.NoError(t, err)
if blockData != nil {
stats := syn.Stats()
coord.SendMsg(MsgSyncBlock{
Stats: *stats,
Batches: blockData.Rollup.Batches,
Vars: synchronizer.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,
},
})
} else {
time.Sleep(100 * time.Millisecond)
}
}
}()
// Blockchain mining loop
wg.Add(1)
go func() {
for {
select {
case <-ctx.Done():
wg.Done()
return
case <-time.After(100 * time.Millisecond):
ethClient.CtlMineBlock()
}
}
}()
time.Sleep(600 * time.Second)
cancel()
wg.Wait()
coord.Stop()
}
// TODO: Test Reorg
// TODO: Test Pipeline
// TODO: Test TxMonitor