diff --git a/batchbuilder/batchbuilder.go b/batchbuilder/batchbuilder.go index 4d039a6..0f864e5 100644 --- a/batchbuilder/batchbuilder.go +++ b/batchbuilder/batchbuilder.go @@ -27,7 +27,7 @@ type ConfigBatch struct { // NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset // method -func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, configCircuits []ConfigCircuit, batchNum uint64, nLevels uint64) (*BatchBuilder, error) { +func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, configCircuits []ConfigCircuit, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) { localStateDB, err := statedb.NewLocalStateDB(dbpath, synchronizerStateDB, true, int(nLevels)) if err != nil { return nil, err @@ -46,12 +46,16 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, config // `batchNum`. If `fromSynchronizer` is true, the BatchBuilder must take a // copy of the rollup state from the Synchronizer at that `batchNum`, otherwise // it can just roll back the internal copy. -func (bb *BatchBuilder) Reset(batchNum uint64, fromSynchronizer bool) error { +func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { return bb.localStateDB.Reset(batchNum, fromSynchronizer) } // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) { zkInputs, _, err := bb.localStateDB.ProcessTxs(false, l1usertxs, l1coordinatortxs, l2txs) + if err != nil { + return nil, err + } + err = bb.localStateDB.MakeCheckpoint() return zkInputs, err } diff --git a/batchbuilder/batchbuilder_test.go b/batchbuilder/batchbuilder_test.go index 4a56547..1c6c26c 100644 --- a/batchbuilder/batchbuilder_test.go +++ b/batchbuilder/batchbuilder_test.go @@ -1,7 +1,6 @@ package batchbuilder import ( - "fmt" "io/ioutil" "testing" @@ -19,7 +18,6 @@ func TestBatchBuilder(t *testing.T) { bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB") require.Nil(t, err) - bb, err := NewBatchBuilder(bbDir, synchDB, nil, 0, 32) + _, err = NewBatchBuilder(bbDir, synchDB, nil, 0, 32) assert.Nil(t, err) - fmt.Println(bb) } diff --git a/common/exittree.go b/common/exittree.go index 201d3da..4f6dfcf 100644 --- a/common/exittree.go +++ b/common/exittree.go @@ -2,11 +2,13 @@ package common import ( "math/big" + + "github.com/iden3/go-merkletree" ) type ExitInfo struct { AccountIdx Idx - MerkleProof []byte + MerkleProof *merkletree.CircomVerifierProof Balance *big.Int Nullifier *big.Int } diff --git a/coordinator/batch.go b/coordinator/batch.go index 1cf2e8d..6eacbde 100644 --- a/coordinator/batch.go +++ b/coordinator/batch.go @@ -10,7 +10,7 @@ type Proof struct { // BatchInfo contans the Batch information type BatchInfo struct { - batchNum uint64 + batchNum common.BatchNum serverProof *ServerProofInfo zkInputs *common.ZKInputs proof *Proof @@ -22,7 +22,7 @@ type BatchInfo struct { // NewBatchInfo creates a new BatchInfo with the given batchNum & // ServerProofInfo -func NewBatchInfo(batchNum uint64, serverProof *ServerProofInfo) BatchInfo { +func NewBatchInfo(batchNum common.BatchNum, serverProof *ServerProofInfo) BatchInfo { return BatchInfo{ batchNum: batchNum, serverProof: serverProof, diff --git a/coordinator/batch_test.go b/coordinator/batch_test.go index da1c819..f8c64b9 100644 --- a/coordinator/batch_test.go +++ b/coordinator/batch_test.go @@ -3,6 +3,7 @@ package coordinator import ( "testing" + "github.com/hermeznetwork/hermez-node/common" "github.com/stretchr/testify/assert" ) @@ -19,8 +20,8 @@ func TestBatchQueue(t *testing.T) { batchNum: 1, }) - assert.Equal(t, uint64(0), bq.Pop().batchNum) - assert.Equal(t, uint64(2), bq.Pop().batchNum) - assert.Equal(t, uint64(1), bq.Pop().batchNum) + assert.Equal(t, common.BatchNum(0), bq.Pop().batchNum) + assert.Equal(t, common.BatchNum(2), bq.Pop().batchNum) + assert.Equal(t, common.BatchNum(1), bq.Pop().batchNum) assert.Nil(t, bq.Pop()) } diff --git a/coordinator/coordinator.go b/coordinator/coordinator.go index 9379b27..b8310f4 100644 --- a/coordinator/coordinator.go +++ b/coordinator/coordinator.go @@ -6,7 +6,9 @@ import ( ethCommon "github.com/ethereum/go-ethereum/common" "github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/common" + "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/eth" + "github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/txselector" kvdb "github.com/iden3/go-merkletree/db" "github.com/iden3/go-merkletree/db/memory" @@ -15,17 +17,26 @@ import ( // CoordinatorConfig contains the Coordinator configuration type CoordinatorConfig struct { ForgerAddress ethCommon.Address + LoopInterval time.Duration } // Coordinator implements the Coordinator type type Coordinator struct { + // m sync.Mutex + stopch chan bool + stopforgerch chan bool + + forging bool + isForgeSeq bool // WIP just for testing while implementing + config CoordinatorConfig - batchNum uint64 + batchNum common.BatchNum batchQueue *BatchQueue serverProofPool ServerProofPool // synchronizer *synchronizer.Synchronizer + hdb *historydb.HistoryDB txsel *txselector.TxSelector batchBuilder *batchbuilder.BatchBuilder @@ -34,48 +45,121 @@ type Coordinator struct { } // NewCoordinator creates a new Coordinator -func NewCoordinator() *Coordinator { // once synchronizer is ready, synchronizer.Synchronizer will be passed as parameter here - var c *Coordinator - // c.ethClient = eth.NewClient() // TBD - c.ethTxStore = memory.NewMemoryStorage() - return c +func NewCoordinator(conf CoordinatorConfig, + hdb *historydb.HistoryDB, + txsel *txselector.TxSelector, + bb *batchbuilder.BatchBuilder, + ethClient *eth.Client) *Coordinator { // once synchronizer is ready, synchronizer.Synchronizer will be passed as parameter here + c := Coordinator{ + config: conf, + hdb: hdb, + txsel: txsel, + batchBuilder: bb, + ethClient: ethClient, + ethTxStore: memory.NewMemoryStorage(), + } + return &c +} + +func (c *Coordinator) Stop() { + log.Info("Stopping Coordinator") + c.stopch <- true } // Start starts the Coordinator service func (c *Coordinator) Start() { - // TODO TBD note: the sequences & loops & errors & logging & goroutines - // & channels approach still needs to be defined, the current code is a - // wip draft - - // TBD: goroutines strategy - - // if in Forge Sequence: - if c.isForgeSequence() { - // c.batchNum = c.synchronizer.LastBatchNum() - _ = c.txsel.Reset(c.batchNum) - _ = c.batchBuilder.Reset(c.batchNum, true) - c.batchQueue = NewBatchQueue() - go func() { - for { - _ = c.forgeSequence() - time.Sleep(1 * time.Second) + c.stopch = make(chan bool) // initialize channel + go func() { + log.Info("Starting Coordinator") + for { + select { + case <-c.stopch: + close(c.stopforgerch) + log.Info("Coordinator stopped") + return + case <-time.After(c.config.LoopInterval): + if !c.isForgeSequence() { + if c.forging { + log.Info("forging stopped") + c.forging = false + close(c.stopforgerch) + } + log.Debug("not in forge time") + continue + } + if !c.forging { + log.Info("Start Forging") + // c.batchNum = c.hdb.GetLastBatchNum() // uncomment when HistoryDB is ready + err := c.txsel.Reset(c.batchNum) + if err != nil { + log.Error("forging err: ", err) + } + err = c.batchBuilder.Reset(c.batchNum, true) + if err != nil { + log.Error("forging err: ", err) + } + c.batchQueue = NewBatchQueue() + + c.forgerLoop() + c.forging = true + } + } + } + }() +} + +// forgerLoop trigers goroutines for: +// - forgeSequence +// - proveSequence +// - forgeConfirmationSequence +func (c *Coordinator) forgerLoop() { + c.stopforgerch = make(chan bool) // initialize channel + + go func() { + log.Info("forgeSequence started") + for { + select { + case <-c.stopforgerch: + log.Info("forgeSequence stopped") + return + case <-time.After(c.config.LoopInterval): + if err := c.forgeSequence(); err != nil { + log.Error("forgeSequence err: ", err) + } } - }() - go func() { - for { - _ = c.proveSequence() - time.Sleep(1 * time.Second) + } + }() + go func() { + log.Info("proveSequence started") + for { + select { + case <-c.stopforgerch: + log.Info("proveSequence stopped") + return + case <-time.After(c.config.LoopInterval): + if err := c.proveSequence(); err != nil && err != common.ErrBatchQueueEmpty { + log.Error("proveSequence err: ", err) + } } - }() - go func() { - for { - _ = c.forgeConfirmationSequence() - time.Sleep(1 * time.Second) + } + }() + go func() { + log.Info("forgeConfirmationSequence started") + for { + select { + case <-c.stopforgerch: + log.Info("forgeConfirmationSequence stopped") + return + case <-time.After(c.config.LoopInterval): + if err := c.forgeConfirmationSequence(); err != nil { + log.Error("forgeConfirmationSequence err: ", err) + } } - }() - } + } + }() } +// forgeSequence func (c *Coordinator) forgeSequence() error { // TODO once synchronizer has this method ready: // If there's been a reorg, handle it @@ -104,8 +188,8 @@ func (c *Coordinator) forgeSequence() error { // 1. Decide if we forge L2Tx or L1+L2Tx if c.shouldL1L2Batch() { // 2a: L1+L2 txs - // l1UserTxs, toForgeL1TxsNumber := c.synchronizer.GetNextL1UserTxs() // TODO once synchronizer is ready, uncomment - var l1UserTxs []*common.L1Tx = nil // tmp, depends on synchronizer + // l1UserTxs, toForgeL1TxsNumber := c.hdb.GetNextL1UserTxs() // TODO once HistoryDB is ready, uncomment + var l1UserTxs []*common.L1Tx = nil // tmp, depends on HistoryDB l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var if err != nil { return err @@ -144,6 +228,7 @@ func (c *Coordinator) forgeSequence() error { // 5. Save metadata from BatchBuilder output for BatchNum batchInfo.SetZKInputs(zkInputs) + log.Debugf("Batch builded, batchNum: %d ", c.batchNum) // 6. Call an idle server proof with BatchBuilder output, save server proof info for batchNum err = batchInfo.serverProof.CalculateProof(zkInputs) @@ -160,6 +245,7 @@ func (c *Coordinator) proveSequence() error { batchInfo := c.batchQueue.Pop() if batchInfo == nil { // no batches in queue, return + log.Debug("not batch to prove yet") return common.ErrBatchQueueEmpty } serverProofInfo := batchInfo.serverProof @@ -173,6 +259,8 @@ func (c *Coordinator) proveSequence() error { if err != nil { return err } + log.Debugf("ethClient ForgeCall sent, batchNum: %d", c.batchNum) + // TODO once tx data type is defined, store ethTx (returned by ForgeCall) // TBD if use ethTxStore as a disk k-v database, or use a Queue // tx, err := c.ethTxStore.NewTx() @@ -202,8 +290,7 @@ func (c *Coordinator) handleReorg() error { // isForgeSequence returns true if the node is the Forger in the current ethereum block func (c *Coordinator) isForgeSequence() bool { - - return false + return c.isForgeSeq } func (c *Coordinator) purgeRemoveByTimeout() error { diff --git a/coordinator/coordinator_test.go b/coordinator/coordinator_test.go new file mode 100644 index 0000000..33c9191 --- /dev/null +++ b/coordinator/coordinator_test.go @@ -0,0 +1,76 @@ +package coordinator + +import ( + "io/ioutil" + "os" + "testing" + "time" + + "github.com/hermeznetwork/hermez-node/batchbuilder" + "github.com/hermeznetwork/hermez-node/db/historydb" + "github.com/hermeznetwork/hermez-node/db/l2db" + "github.com/hermeznetwork/hermez-node/db/statedb" + "github.com/hermeznetwork/hermez-node/log" + "github.com/hermeznetwork/hermez-node/txselector" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBuilder) { // FUTURE once Synchronizer is ready, should return it also + nLevels := 32 + + synchDB, err := ioutil.TempDir("", "tmpSynchDB") + require.Nil(t, err) + sdb, err := statedb.NewStateDB(synchDB, true, nLevels) + assert.Nil(t, err) + + pass := os.Getenv("POSTGRES_PASS") + l2DB, err := l2db.NewL2DB(5432, "localhost", "hermez", pass, "l2", 10, 512, 24*time.Hour) + require.Nil(t, err) + + txselDir, err := ioutil.TempDir("", "tmpTxSelDB") + require.Nil(t, err) + txsel, err := txselector.NewTxSelector(txselDir, sdb, l2DB, 10, 10, 10) + assert.Nil(t, err) + + bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB") + require.Nil(t, err) + bb, err := batchbuilder.NewBatchBuilder(bbDir, sdb, nil, 0, uint64(nLevels)) + assert.Nil(t, err) + + // l1Txs, coordinatorL1Txs, poolL2Txs := test.GenerateTestTxsFromSet(t, test.SetTest0) + + return txsel, bb +} + +func TestCoordinator(t *testing.T) { + txsel, bb := newTestModules(t) + + conf := CoordinatorConfig{ + LoopInterval: 100 * time.Millisecond, + } + hdb := &historydb.HistoryDB{} + c := NewCoordinator(conf, hdb, txsel, bb, nil) + c.Start() + time.Sleep(1 * time.Second) + + // simulate forgeSequence time + log.Debug("simulate entering in forge time") + c.isForgeSeq = true + time.Sleep(1 * time.Second) + + // simulate going out from forgeSequence + log.Debug("simulate going out from forge time") + c.isForgeSeq = false + time.Sleep(1 * time.Second) + + // simulate entering forgeSequence time again + log.Debug("simulate entering in forge time again") + c.isForgeSeq = true + time.Sleep(1 * time.Second) + + // simulate stopping forgerLoop by channel + log.Debug("simulate stopping forgerLoop by closing coordinator stopch") + c.Stop() + time.Sleep(1 * time.Second) +} diff --git a/db/statedb/statedb.go b/db/statedb/statedb.go index f608383..7f4c231 100644 --- a/db/statedb/statedb.go +++ b/db/statedb/statedb.go @@ -130,7 +130,7 @@ func (s *StateDB) MakeCheckpoint() error { } // DeleteCheckpoint removes if exist the checkpoint of the given batchNum -func (s *StateDB) DeleteCheckpoint(batchNum uint64) error { +func (s *StateDB) DeleteCheckpoint(batchNum common.BatchNum) error { checkpointPath := s.path + PATHBATCHNUM + strconv.Itoa(int(batchNum)) if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { @@ -144,7 +144,7 @@ func (s *StateDB) DeleteCheckpoint(batchNum uint64) error { // does not delete the checkpoints between old current and the new current, // those checkpoints will remain in the storage, and eventually will be // deleted when MakeCheckpoint overwrites them. -func (s *StateDB) Reset(batchNum uint64) error { +func (s *StateDB) Reset(batchNum common.BatchNum) error { if batchNum == 0 { s.idx = 0 return nil @@ -331,7 +331,7 @@ func NewLocalStateDB(path string, synchronizerDB *StateDB, withMT bool, nLevels // Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it // gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. If fromSynchronizer is false, get the state from LocalStateDB checkpoints. -func (l *LocalStateDB) Reset(batchNum uint64, fromSynchronizer bool) error { +func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { if batchNum == 0 { l.idx = 0 return nil diff --git a/db/statedb/statedb_test.go b/db/statedb/statedb_test.go index df35909..39ec264 100644 --- a/db/statedb/statedb_test.go +++ b/db/statedb/statedb_test.go @@ -193,13 +193,13 @@ func TestCheckpoints(t *testing.T) { assert.Nil(t, err) assert.Equal(t, common.BatchNum(4), cb) - err = sdb.DeleteCheckpoint(uint64(9)) + err = sdb.DeleteCheckpoint(common.BatchNum(9)) assert.Nil(t, err) - err = sdb.DeleteCheckpoint(uint64(10)) + err = sdb.DeleteCheckpoint(common.BatchNum(10)) assert.Nil(t, err) - err = sdb.DeleteCheckpoint(uint64(9)) // does not exist, should return err + err = sdb.DeleteCheckpoint(common.BatchNum(9)) // does not exist, should return err assert.NotNil(t, err) - err = sdb.DeleteCheckpoint(uint64(11)) // does not exist, should return err + err = sdb.DeleteCheckpoint(common.BatchNum(11)) // does not exist, should return err assert.NotNil(t, err) // Create a LocalStateDB from the initial StateDB diff --git a/db/statedb/txprocessors.go b/db/statedb/txprocessors.go index e7f38e6..7db03af 100644 --- a/db/statedb/txprocessors.go +++ b/db/statedb/txprocessors.go @@ -13,20 +13,12 @@ import ( // keyidx is used as key in the db to store the current Idx var keyidx = []byte("idx") -// FUTURE This will be used from common once pending PR is merged -type ExitInfo struct { - Idx *common.Idx - Proof *merkletree.CircomVerifierProof - Nullifier *big.Int - Balance *big.Int -} - // ProcessTxs process the given L1Txs & L2Txs applying the needed updates to // the StateDB depending on the transaction Type. Returns the common.ZKInputs // to generate the SnarkProof later used by the BatchBuilder, and if // cmpExitTree is set to true, returns common.ExitTreeLeaf that is later used // by the Synchronizer to update the HistoryDB. -func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx) (*common.ZKInputs, []*ExitInfo, error) { +func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx) (*common.ZKInputs, []*common.ExitInfo, error) { var err error var exitTree *merkletree.MerkleTree exits := make(map[common.Idx]common.Account) @@ -71,8 +63,8 @@ func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*co } // once all txs processed (exitTree root frozen), for each leaf - // generate ExitInfo data - var exitInfos []*ExitInfo + // generate common.ExitInfo data + var exitInfos []*common.ExitInfo for exitIdx, exitAccount := range exits { // 0. generate MerkleProof p, err := exitTree.GenerateCircomVerifierProof(exitIdx.BigInt(), nil) @@ -92,12 +84,12 @@ func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*co if err != nil { return nil, nil, err } - // 2. generate ExitInfo - ei := &ExitInfo{ - Idx: &exitIdx, - Proof: p, - Nullifier: nullifier, - Balance: exitAccount.Balance, + // 2. generate common.ExitInfo + ei := &common.ExitInfo{ + AccountIdx: exitIdx, + MerkleProof: p, + Nullifier: nullifier, + Balance: exitAccount.Balance, } exitInfos = append(exitInfos, ei) } diff --git a/eth/client.go b/eth/client.go index 4328f74..5b2e5da 100644 --- a/eth/client.go +++ b/eth/client.go @@ -195,9 +195,18 @@ func (c *Client) HeaderByNumber(ctx context.Context, number *big.Int) (*types.He return c.client.HeaderByNumber(ctx, number) } -// BlockByNumber internally calls ethclient.Client BlockByNumber -func (c *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - return c.client.BlockByNumber(ctx, number) +// BlockByNumber internally calls ethclient.Client BlockByNumber and returns *common.Block +func (c *Client) BlockByNumber(ctx context.Context, number *big.Int) (*common.Block, error) { + block, err := c.client.BlockByNumber(ctx, number) + if err != nil { + return nil, err + } + b := &common.Block{ + EthBlockNum: block.Number().Uint64(), + Timestamp: time.Unix(int64(block.Time()), 0), + Hash: block.Hash(), + } + return b, nil } func (c *Client) ForgeCall(callData *common.CallDataForge) ([]byte, error) { diff --git a/log/log.go b/log/log.go index c86a709..53fe7e1 100644 --- a/log/log.go +++ b/log/log.go @@ -57,7 +57,8 @@ func Init(levelStr, errorsPath string) { } //nolint:errcheck defer logger.Sync() - log = logger.Sugar() + withOptions := logger.WithOptions(zap.AddCallerSkip(1)) + log = withOptions.Sugar() if errorsPath != "" { log.Infof("file where errors will be written: %s", errorsPath) diff --git a/test/txs.go b/test/txs.go index b5e10f5..49be7ec 100644 --- a/test/txs.go +++ b/test/txs.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "math/big" "strconv" + "strings" "testing" "time" @@ -11,6 +12,7 @@ import ( ethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/hermeznetwork/hermez-node/common" "github.com/iden3/go-iden3-crypto/babyjub" + "github.com/stretchr/testify/require" ) type Account struct { @@ -53,12 +55,12 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, accounts := GenerateKeys(t, instructions.Accounts) l1CreatedAccounts := make(map[string]*Account) - var batchL1txs []*common.L1Tx - var batchCoordinatorL1txs []*common.L1Tx - var batchL2txs []*common.PoolL2Tx - var l1txs [][]*common.L1Tx - var coordinatorL1txs [][]*common.L1Tx - var l2txs [][]*common.PoolL2Tx + var batchL1Txs []*common.L1Tx + var batchCoordinatorL1Txs []*common.L1Tx + var batchPoolL2Txs []*common.PoolL2Tx + var l1Txs [][]*common.L1Tx + var coordinatorL1Txs [][]*common.L1Tx + var poolL2Txs [][]*common.PoolL2Tx idx := 1 for _, inst := range instructions.Instructions { switch inst.Type { @@ -71,7 +73,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, LoadAmount: big.NewInt(int64(inst.Amount)), Type: common.TxTypeCreateAccountDeposit, } - batchL1txs = append(batchL1txs, &tx) + batchL1Txs = append(batchL1Txs, &tx) if accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx == common.Idx(0) { // if account.Idx is not set yet, set it and increment idx accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx = common.Idx(idx) @@ -90,7 +92,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, } accounts[idxTokenIDToString(inst.To, inst.TokenID)].Idx = common.Idx(idx) l1CreatedAccounts[idxTokenIDToString(inst.To, inst.TokenID)] = accounts[idxTokenIDToString(inst.To, inst.TokenID)] - batchCoordinatorL1txs = append(batchCoordinatorL1txs, &tx) + batchCoordinatorL1Txs = append(batchCoordinatorL1Txs, &tx) idx++ } @@ -120,7 +122,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, tx.Signature = sig accounts[idxTokenIDToString(inst.From, inst.TokenID)].Nonce++ - batchL2txs = append(batchL2txs, &tx) + batchPoolL2Txs = append(batchPoolL2Txs, &tx) case common.TxTypeExit, common.TxTypeForceExit: tx := common.L1Tx{ @@ -130,22 +132,30 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, Amount: big.NewInt(int64(inst.Amount)), Type: common.TxTypeExit, } - batchL1txs = append(batchL1txs, &tx) + batchL1Txs = append(batchL1Txs, &tx) case TypeNewBatch: - l1txs = append(l1txs, batchL1txs) - coordinatorL1txs = append(coordinatorL1txs, batchCoordinatorL1txs) - l2txs = append(l2txs, batchL2txs) - batchL1txs = []*common.L1Tx{} - batchCoordinatorL1txs = []*common.L1Tx{} - batchL2txs = []*common.PoolL2Tx{} + l1Txs = append(l1Txs, batchL1Txs) + coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs) + poolL2Txs = append(poolL2Txs, batchPoolL2Txs) + batchL1Txs = []*common.L1Tx{} + batchCoordinatorL1Txs = []*common.L1Tx{} + batchPoolL2Txs = []*common.PoolL2Tx{} default: continue } } - l1txs = append(l1txs, batchL1txs) - coordinatorL1txs = append(coordinatorL1txs, batchCoordinatorL1txs) - l2txs = append(l2txs, batchL2txs) + l1Txs = append(l1Txs, batchL1Txs) + coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs) + poolL2Txs = append(poolL2Txs, batchPoolL2Txs) - return l1txs, coordinatorL1txs, l2txs + return l1Txs, coordinatorL1Txs, poolL2Txs +} + +func GenerateTestTxsFromSet(t *testing.T, set string) ([][]*common.L1Tx, [][]*common.L1Tx, [][]*common.PoolL2Tx) { + parser := NewParser(strings.NewReader(set)) + instructions, err := parser.Parse() + require.Nil(t, err) + + return GenerateTestTxs(t, instructions) } diff --git a/txselector/txselector.go b/txselector/txselector.go index 360e595..c5f6f63 100644 --- a/txselector/txselector.go +++ b/txselector/txselector.go @@ -52,7 +52,7 @@ func NewTxSelector(dbpath string, synchronizerStateDB *statedb.StateDB, l2 *l2db // Reset tells the TxSelector to get it's internal AccountsDB // from the required `batchNum` -func (txsel *TxSelector) Reset(batchNum uint64) error { +func (txsel *TxSelector) Reset(batchNum common.BatchNum) error { err := txsel.localAccountsDB.Reset(batchNum, true) if err != nil { return err @@ -61,7 +61,7 @@ func (txsel *TxSelector) Reset(batchNum uint64) error { } // GetL2TxSelection returns a selection of the L2Txs for the next batch, from the L2DB pool -func (txsel *TxSelector) GetL2TxSelection(batchNum uint64) ([]*common.PoolL2Tx, error) { +func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]*common.PoolL2Tx, error) { // get pending l2-tx from tx-pool l2TxsRaw, err := txsel.l2db.GetPendingTxs() // once l2db ready, maybe use parameter 'batchNum' if err != nil { @@ -90,7 +90,7 @@ func (txsel *TxSelector) GetL2TxSelection(batchNum uint64) ([]*common.PoolL2Tx, } // GetL1L2TxSelection returns the selection of L1 + L2 txs -func (txsel *TxSelector) GetL1L2TxSelection(batchNum uint64, l1txs []*common.L1Tx) ([]*common.L1Tx, []*common.L1Tx, []*common.PoolL2Tx, error) { +func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1txs []*common.L1Tx) ([]*common.L1Tx, []*common.L1Tx, []*common.PoolL2Tx, error) { // apply l1-user-tx to localAccountDB // create new leaves // update balances @@ -153,6 +153,9 @@ func (txsel *TxSelector) checkIfAccountExistOrPending(idx common.Idx) bool { func (txsel *TxSelector) getL2Profitable(txs txs, max uint64) txs { sort.Sort(txs) + if len(txs) < int(max) { + return txs + } return txs[:max] } func (txsel *TxSelector) createL1OperatorTxForL2Tx(accounts []*common.Account) []*common.L1Tx {