diff --git a/batchbuilder/batchbuilder.go b/batchbuilder/batchbuilder.go index 7d28f5d..b92a0dd 100644 --- a/batchbuilder/batchbuilder.go +++ b/batchbuilder/batchbuilder.go @@ -52,10 +52,10 @@ func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) e // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) { - zkInputs, _, _, err := bb.localStateDB.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs) + ptOut, err := bb.localStateDB.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs) if err != nil { return nil, err } err = bb.localStateDB.MakeCheckpoint() - return zkInputs, err + return ptOut.ZKInputs, err } diff --git a/db/statedb/txprocessors.go b/db/statedb/txprocessors.go index 26a3b3c..cc2f182 100644 --- a/db/statedb/txprocessors.go +++ b/db/statedb/txprocessors.go @@ -21,7 +21,7 @@ var ( func (s *StateDB) resetZKInputs() { s.zki = nil - s.i = 0 + s.i = 0 // initialize current transaction index in the ZKInputs generation } type processedExit struct { @@ -31,6 +31,14 @@ type processedExit struct { acc common.Account } +// ProcessTxOutput contains the output of the ProcessTxs method +type ProcessTxOutput struct { + ZKInputs *common.ZKInputs + ExitInfos []common.ExitInfo + CreatedAccounts []common.Account + CoordinatorIdxsMap map[common.TokenID]common.Idx +} + // ProcessTxs process the given L1Txs & L2Txs applying the needed updates to // the StateDB depending on the transaction Type. If StateDB // type==TypeBatchBuilder, returns the common.ZKInputs to generate the @@ -40,27 +48,32 @@ type processedExit struct { // the HistoryDB, and adds Nonce & TokenID to the L2Txs. // And if TypeSynchronizer returns an array of common.Account with all the // created accounts. -func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (*common.ZKInputs, []common.ExitInfo, []common.Account, error) { +func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (*ProcessTxOutput, error) { var err error var exitTree *merkletree.MerkleTree var createdAccounts []common.Account if s.zki != nil { - return nil, nil, nil, errors.New("Expected StateDB.zki==nil, something went wrong and it's not empty") + return nil, errors.New("Expected StateDB.zki==nil, something went wrong and it's not empty") } defer s.resetZKInputs() nTx := len(l1usertxs) + len(l1coordinatortxs) + len(l2txs) if nTx == 0 { // TODO return ZKInputs of batch without txs - return nil, nil, nil, nil + return &ProcessTxOutput{ + ZKInputs: nil, + ExitInfos: nil, + CreatedAccounts: nil, + CoordinatorIdxsMap: nil, + }, nil } exits := make([]processedExit, nTx) // get TokenIDs of coordIdxs coordIdxsMap, err := s.getTokenIDsFromIdxs(coordIdxs) if err != nil { - return nil, nil, nil, err + return nil, err } if s.typ == TypeBatchBuilder { @@ -74,7 +87,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder { tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree") if err != nil { - return nil, nil, nil, err + return nil, err } defer func() { if err := os.RemoveAll(tmpDir); err != nil { @@ -83,11 +96,11 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs }() sto, err := pebble.NewPebbleStorage(tmpDir, false) if err != nil { - return nil, nil, nil, err + return nil, err } exitTree, err = merkletree.NewMerkleTree(sto, s.mt.MaxLevels()) if err != nil { - return nil, nil, nil, err + return nil, err } } @@ -95,7 +108,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs for i := 0; i < len(l1usertxs); i++ { exitIdx, exitAccount, newExit, createdAccount, err := s.processL1Tx(exitTree, &l1usertxs[i]) if err != nil { - return nil, nil, nil, err + return nil, err } if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder { if exitIdx != nil && exitTree != nil { @@ -115,7 +128,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs for i := 0; i < len(l1coordinatortxs); i++ { exitIdx, _, _, createdAccount, err := s.processL1Tx(exitTree, &l1coordinatortxs[i]) if err != nil { - return nil, nil, nil, err + return nil, err } if exitIdx != nil { log.Error("Unexpected Exit in L1CoordinatorTx") @@ -127,7 +140,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs for i := 0; i < len(l2txs); i++ { exitIdx, exitAccount, newExit, err := s.processL2Tx(coordIdxsMap, exitTree, &l2txs[i]) if err != nil { - return nil, nil, nil, err + return nil, err } if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder { if exitIdx != nil && exitTree != nil { @@ -143,12 +156,13 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs } if s.typ == TypeTxSelector { - return nil, nil, nil, nil + return nil, nil } // once all txs processed (exitTree root frozen), for each Exit, // generate common.ExitInfo data var exitInfos []common.ExitInfo + // exitInfos := []common.ExitInfo{} for i := 0; i < nTx; i++ { if !exits[i].exit { continue @@ -159,7 +173,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs // 0. generate MerkleProof p, err := exitTree.GenerateCircomVerifierProof(exitIdx.BigInt(), nil) if err != nil { - return nil, nil, nil, err + return nil, err } // 1. generate common.ExitInfo ei := common.ExitInfo{ @@ -190,9 +204,14 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs } } if s.typ == TypeSynchronizer { - // return exitInfos and createdAccounts, so Synchronizer will be able - // to store it into HistoryDB for the concrete BatchNum - return nil, exitInfos, createdAccounts, nil + // return exitInfos and createdAccounts, so Synchronizer will + // be able to store it into HistoryDB for the concrete BatchNum + return &ProcessTxOutput{ + ZKInputs: nil, + ExitInfos: exitInfos, + CreatedAccounts: createdAccounts, + CoordinatorIdxsMap: coordIdxsMap, + }, nil } // compute last ZKInputs parameters @@ -200,7 +219,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs // zki.FeeIdxs = ? // TODO, this will be get from the config file tokenIDs, err := s.getTokenIDsBigInt(l1usertxs, l1coordinatortxs, l2txs) if err != nil { - return nil, nil, nil, err + return nil, err } s.zki.FeePlanTokens = tokenIDs @@ -210,7 +229,12 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs // compute fees & update ZKInputs // return ZKInputs as the BatchBuilder will return it to forge the Batch - return s.zki, nil, nil, nil + return &ProcessTxOutput{ + ZKInputs: s.zki, + ExitInfos: nil, + CreatedAccounts: nil, + CoordinatorIdxsMap: coordIdxsMap, + }, nil } // getTokenIDsBigInt returns the list of TokenIDs in *big.Int format diff --git a/db/statedb/txprocessors_test.go b/db/statedb/txprocessors_test.go index c87390f..10498fc 100644 --- a/db/statedb/txprocessors_test.go +++ b/db/statedb/txprocessors_test.go @@ -45,50 +45,50 @@ func TestProcessTxsSynchronizer(t *testing.T) { idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx log.Debug("1st batch, 1st block, only L1CoordinatorTxs") - _, _, createdAccounts, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil) + ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil) require.Nil(t, err) - assert.Equal(t, 4, len(createdAccounts)) + assert.Equal(t, 4, len(ptOut.CreatedAccounts)) log.Debug("2nd batch, 1st block") l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs) - _, exitInfos, createdAccounts, err := sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs) + ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs) require.Nil(t, err) - assert.Equal(t, 0, len(exitInfos)) - assert.Equal(t, 31, len(createdAccounts)) + assert.Equal(t, 0, len(ptOut.ExitInfos)) + assert.Equal(t, 31, len(ptOut.CreatedAccounts)) acc, err := sdb.GetAccount(idxA1) require.Nil(t, err) assert.Equal(t, "50", acc.Balance.String()) log.Debug("3rd batch, 1st block") l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs) - _, exitInfos, createdAccounts, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs) + ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs) require.Nil(t, err) // TODO once TTGL is updated, add a check that a input poolL2Tx with // Nonce & TokenID =0, after ProcessTxs call has the expected value - assert.Equal(t, 0, len(exitInfos)) - assert.Equal(t, 0, len(createdAccounts)) + assert.Equal(t, 0, len(ptOut.ExitInfos)) + assert.Equal(t, 0, len(ptOut.CreatedAccounts)) acc, err = sdb.GetAccount(idxA1) require.Nil(t, err) assert.Equal(t, "28", acc.Balance.String()) log.Debug("1st batch, 2nd block") l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[0].L2Txs) - _, exitInfos, createdAccounts, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs) + ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs) require.Nil(t, err) - assert.Equal(t, 4, len(exitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs - assert.Equal(t, 1, len(createdAccounts)) + assert.Equal(t, 4, len(ptOut.ExitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs + assert.Equal(t, 1, len(ptOut.CreatedAccounts)) acc, err = sdb.GetAccount(idxA1) require.Nil(t, err) assert.Equal(t, "53", acc.Balance.String()) log.Debug("2nd batch, 2nd block") l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[1].L2Txs) - _, exitInfos, createdAccounts, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs) + ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs) require.Nil(t, err) - assert.Equal(t, 2, len(exitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5' - assert.Equal(t, 1, len(createdAccounts)) + assert.Equal(t, 2, len(ptOut.ExitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5' + assert.Equal(t, 1, len(ptOut.CreatedAccounts)) acc, err = sdb.GetAccount(idxA1) assert.Nil(t, err) assert.Equal(t, "78", acc.Balance.String()) diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 61e8d43..3eb6e30 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -405,7 +405,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) { // TODO: Get CollectedFees from ProcessTxs() // TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs() // ProcessTxs updates poolL2Txs adding: Nonce, TokenID - _, exitInfo, _, err := s.stateDB.ProcessTxs([]common.Idx{}, l1UserTxs, batchData.L1CoordinatorTxs, poolL2Txs) + ptOut, err := s.stateDB.ProcessTxs([]common.Idx{}, l1UserTxs, batchData.L1CoordinatorTxs, poolL2Txs) if err != nil { return nil, err } @@ -429,7 +429,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) { position++ } - batchData.ExitTree = exitInfo + batchData.ExitTree = ptOut.ExitInfos slotNum := int64(0) if ethBlock.EthBlockNum >= s.auctionConstants.GenesisBlockNum { diff --git a/txselector/txselector.go b/txselector/txselector.go index d11f683..f4553fc 100644 --- a/txselector/txselector.go +++ b/txselector/txselector.go @@ -89,7 +89,7 @@ func (txsel *TxSelector) GetL2TxSelection(coordIdxs []common.Idx, batchNum commo txs := txsel.getL2Profitable(validTxs, txsel.MaxTxs) // process the txs in the local AccountsDB - _, _, _, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, nil, nil, txs) + _, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, nil, nil, txs) if err != nil { return nil, err } @@ -238,7 +238,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(coordIdxs []common.Idx, batchNum com l2Txs := txsel.getL2Profitable(validTxs, maxL2Txs) // process the txs in the local AccountsDB - _, _, _, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, l1Txs, l1CoordinatorTxs, l2Txs) + _, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, l1Txs, l1CoordinatorTxs, l2Txs) if err != nil { return nil, nil, nil, err }