mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Group ProcessTxs output
This commit is contained in:
@@ -52,10 +52,10 @@ func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) e
|
|||||||
|
|
||||||
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
||||||
func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
|
func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
|
||||||
zkInputs, _, _, err := bb.localStateDB.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
|
ptOut, err := bb.localStateDB.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = bb.localStateDB.MakeCheckpoint()
|
err = bb.localStateDB.MakeCheckpoint()
|
||||||
return zkInputs, err
|
return ptOut.ZKInputs, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ var (
|
|||||||
|
|
||||||
func (s *StateDB) resetZKInputs() {
|
func (s *StateDB) resetZKInputs() {
|
||||||
s.zki = nil
|
s.zki = nil
|
||||||
s.i = 0
|
s.i = 0 // initialize current transaction index in the ZKInputs generation
|
||||||
}
|
}
|
||||||
|
|
||||||
type processedExit struct {
|
type processedExit struct {
|
||||||
@@ -31,6 +31,14 @@ type processedExit struct {
|
|||||||
acc common.Account
|
acc common.Account
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProcessTxOutput contains the output of the ProcessTxs method
|
||||||
|
type ProcessTxOutput struct {
|
||||||
|
ZKInputs *common.ZKInputs
|
||||||
|
ExitInfos []common.ExitInfo
|
||||||
|
CreatedAccounts []common.Account
|
||||||
|
CoordinatorIdxsMap map[common.TokenID]common.Idx
|
||||||
|
}
|
||||||
|
|
||||||
// ProcessTxs process the given L1Txs & L2Txs applying the needed updates to
|
// ProcessTxs process the given L1Txs & L2Txs applying the needed updates to
|
||||||
// the StateDB depending on the transaction Type. If StateDB
|
// the StateDB depending on the transaction Type. If StateDB
|
||||||
// type==TypeBatchBuilder, returns the common.ZKInputs to generate the
|
// type==TypeBatchBuilder, returns the common.ZKInputs to generate the
|
||||||
@@ -40,27 +48,32 @@ type processedExit struct {
|
|||||||
// the HistoryDB, and adds Nonce & TokenID to the L2Txs.
|
// the HistoryDB, and adds Nonce & TokenID to the L2Txs.
|
||||||
// And if TypeSynchronizer returns an array of common.Account with all the
|
// And if TypeSynchronizer returns an array of common.Account with all the
|
||||||
// created accounts.
|
// created accounts.
|
||||||
func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (*common.ZKInputs, []common.ExitInfo, []common.Account, error) {
|
func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (*ProcessTxOutput, error) {
|
||||||
var err error
|
var err error
|
||||||
var exitTree *merkletree.MerkleTree
|
var exitTree *merkletree.MerkleTree
|
||||||
var createdAccounts []common.Account
|
var createdAccounts []common.Account
|
||||||
|
|
||||||
if s.zki != nil {
|
if s.zki != nil {
|
||||||
return nil, nil, nil, errors.New("Expected StateDB.zki==nil, something went wrong and it's not empty")
|
return nil, errors.New("Expected StateDB.zki==nil, something went wrong and it's not empty")
|
||||||
}
|
}
|
||||||
defer s.resetZKInputs()
|
defer s.resetZKInputs()
|
||||||
|
|
||||||
nTx := len(l1usertxs) + len(l1coordinatortxs) + len(l2txs)
|
nTx := len(l1usertxs) + len(l1coordinatortxs) + len(l2txs)
|
||||||
if nTx == 0 {
|
if nTx == 0 {
|
||||||
// TODO return ZKInputs of batch without txs
|
// TODO return ZKInputs of batch without txs
|
||||||
return nil, nil, nil, nil
|
return &ProcessTxOutput{
|
||||||
|
ZKInputs: nil,
|
||||||
|
ExitInfos: nil,
|
||||||
|
CreatedAccounts: nil,
|
||||||
|
CoordinatorIdxsMap: nil,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
exits := make([]processedExit, nTx)
|
exits := make([]processedExit, nTx)
|
||||||
|
|
||||||
// get TokenIDs of coordIdxs
|
// get TokenIDs of coordIdxs
|
||||||
coordIdxsMap, err := s.getTokenIDsFromIdxs(coordIdxs)
|
coordIdxsMap, err := s.getTokenIDsFromIdxs(coordIdxs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.typ == TypeBatchBuilder {
|
if s.typ == TypeBatchBuilder {
|
||||||
@@ -74,7 +87,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
|
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
|
||||||
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
|
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := os.RemoveAll(tmpDir); err != nil {
|
if err := os.RemoveAll(tmpDir); err != nil {
|
||||||
@@ -83,11 +96,11 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
}()
|
}()
|
||||||
sto, err := pebble.NewPebbleStorage(tmpDir, false)
|
sto, err := pebble.NewPebbleStorage(tmpDir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
exitTree, err = merkletree.NewMerkleTree(sto, s.mt.MaxLevels())
|
exitTree, err = merkletree.NewMerkleTree(sto, s.mt.MaxLevels())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,7 +108,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
for i := 0; i < len(l1usertxs); i++ {
|
for i := 0; i < len(l1usertxs); i++ {
|
||||||
exitIdx, exitAccount, newExit, createdAccount, err := s.processL1Tx(exitTree, &l1usertxs[i])
|
exitIdx, exitAccount, newExit, createdAccount, err := s.processL1Tx(exitTree, &l1usertxs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
|
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
|
||||||
if exitIdx != nil && exitTree != nil {
|
if exitIdx != nil && exitTree != nil {
|
||||||
@@ -115,7 +128,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
for i := 0; i < len(l1coordinatortxs); i++ {
|
for i := 0; i < len(l1coordinatortxs); i++ {
|
||||||
exitIdx, _, _, createdAccount, err := s.processL1Tx(exitTree, &l1coordinatortxs[i])
|
exitIdx, _, _, createdAccount, err := s.processL1Tx(exitTree, &l1coordinatortxs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if exitIdx != nil {
|
if exitIdx != nil {
|
||||||
log.Error("Unexpected Exit in L1CoordinatorTx")
|
log.Error("Unexpected Exit in L1CoordinatorTx")
|
||||||
@@ -127,7 +140,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
for i := 0; i < len(l2txs); i++ {
|
for i := 0; i < len(l2txs); i++ {
|
||||||
exitIdx, exitAccount, newExit, err := s.processL2Tx(coordIdxsMap, exitTree, &l2txs[i])
|
exitIdx, exitAccount, newExit, err := s.processL2Tx(coordIdxsMap, exitTree, &l2txs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
|
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
|
||||||
if exitIdx != nil && exitTree != nil {
|
if exitIdx != nil && exitTree != nil {
|
||||||
@@ -143,12 +156,13 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
}
|
}
|
||||||
|
|
||||||
if s.typ == TypeTxSelector {
|
if s.typ == TypeTxSelector {
|
||||||
return nil, nil, nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// once all txs processed (exitTree root frozen), for each Exit,
|
// once all txs processed (exitTree root frozen), for each Exit,
|
||||||
// generate common.ExitInfo data
|
// generate common.ExitInfo data
|
||||||
var exitInfos []common.ExitInfo
|
var exitInfos []common.ExitInfo
|
||||||
|
// exitInfos := []common.ExitInfo{}
|
||||||
for i := 0; i < nTx; i++ {
|
for i := 0; i < nTx; i++ {
|
||||||
if !exits[i].exit {
|
if !exits[i].exit {
|
||||||
continue
|
continue
|
||||||
@@ -159,7 +173,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
// 0. generate MerkleProof
|
// 0. generate MerkleProof
|
||||||
p, err := exitTree.GenerateCircomVerifierProof(exitIdx.BigInt(), nil)
|
p, err := exitTree.GenerateCircomVerifierProof(exitIdx.BigInt(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// 1. generate common.ExitInfo
|
// 1. generate common.ExitInfo
|
||||||
ei := common.ExitInfo{
|
ei := common.ExitInfo{
|
||||||
@@ -190,9 +204,14 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if s.typ == TypeSynchronizer {
|
if s.typ == TypeSynchronizer {
|
||||||
// return exitInfos and createdAccounts, so Synchronizer will be able
|
// return exitInfos and createdAccounts, so Synchronizer will
|
||||||
// to store it into HistoryDB for the concrete BatchNum
|
// be able to store it into HistoryDB for the concrete BatchNum
|
||||||
return nil, exitInfos, createdAccounts, nil
|
return &ProcessTxOutput{
|
||||||
|
ZKInputs: nil,
|
||||||
|
ExitInfos: exitInfos,
|
||||||
|
CreatedAccounts: createdAccounts,
|
||||||
|
CoordinatorIdxsMap: coordIdxsMap,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute last ZKInputs parameters
|
// compute last ZKInputs parameters
|
||||||
@@ -200,7 +219,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
// zki.FeeIdxs = ? // TODO, this will be get from the config file
|
// zki.FeeIdxs = ? // TODO, this will be get from the config file
|
||||||
tokenIDs, err := s.getTokenIDsBigInt(l1usertxs, l1coordinatortxs, l2txs)
|
tokenIDs, err := s.getTokenIDsBigInt(l1usertxs, l1coordinatortxs, l2txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
s.zki.FeePlanTokens = tokenIDs
|
s.zki.FeePlanTokens = tokenIDs
|
||||||
|
|
||||||
@@ -210,7 +229,12 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
|
|||||||
// compute fees & update ZKInputs
|
// compute fees & update ZKInputs
|
||||||
|
|
||||||
// return ZKInputs as the BatchBuilder will return it to forge the Batch
|
// return ZKInputs as the BatchBuilder will return it to forge the Batch
|
||||||
return s.zki, nil, nil, nil
|
return &ProcessTxOutput{
|
||||||
|
ZKInputs: s.zki,
|
||||||
|
ExitInfos: nil,
|
||||||
|
CreatedAccounts: nil,
|
||||||
|
CoordinatorIdxsMap: coordIdxsMap,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTokenIDsBigInt returns the list of TokenIDs in *big.Int format
|
// getTokenIDsBigInt returns the list of TokenIDs in *big.Int format
|
||||||
|
|||||||
@@ -45,50 +45,50 @@ func TestProcessTxsSynchronizer(t *testing.T) {
|
|||||||
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
|
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
|
||||||
|
|
||||||
log.Debug("1st batch, 1st block, only L1CoordinatorTxs")
|
log.Debug("1st batch, 1st block, only L1CoordinatorTxs")
|
||||||
_, _, createdAccounts, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil)
|
ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, 4, len(createdAccounts))
|
assert.Equal(t, 4, len(ptOut.CreatedAccounts))
|
||||||
|
|
||||||
log.Debug("2nd batch, 1st block")
|
log.Debug("2nd batch, 1st block")
|
||||||
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs)
|
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs)
|
||||||
_, exitInfos, createdAccounts, err := sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs)
|
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, 0, len(exitInfos))
|
assert.Equal(t, 0, len(ptOut.ExitInfos))
|
||||||
assert.Equal(t, 31, len(createdAccounts))
|
assert.Equal(t, 31, len(ptOut.CreatedAccounts))
|
||||||
acc, err := sdb.GetAccount(idxA1)
|
acc, err := sdb.GetAccount(idxA1)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, "50", acc.Balance.String())
|
assert.Equal(t, "50", acc.Balance.String())
|
||||||
|
|
||||||
log.Debug("3rd batch, 1st block")
|
log.Debug("3rd batch, 1st block")
|
||||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs)
|
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs)
|
||||||
_, exitInfos, createdAccounts, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs)
|
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
// TODO once TTGL is updated, add a check that a input poolL2Tx with
|
// TODO once TTGL is updated, add a check that a input poolL2Tx with
|
||||||
// Nonce & TokenID =0, after ProcessTxs call has the expected value
|
// Nonce & TokenID =0, after ProcessTxs call has the expected value
|
||||||
|
|
||||||
assert.Equal(t, 0, len(exitInfos))
|
assert.Equal(t, 0, len(ptOut.ExitInfos))
|
||||||
assert.Equal(t, 0, len(createdAccounts))
|
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
|
||||||
acc, err = sdb.GetAccount(idxA1)
|
acc, err = sdb.GetAccount(idxA1)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, "28", acc.Balance.String())
|
assert.Equal(t, "28", acc.Balance.String())
|
||||||
|
|
||||||
log.Debug("1st batch, 2nd block")
|
log.Debug("1st batch, 2nd block")
|
||||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[0].L2Txs)
|
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[0].L2Txs)
|
||||||
_, exitInfos, createdAccounts, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs)
|
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, 4, len(exitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs
|
assert.Equal(t, 4, len(ptOut.ExitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs
|
||||||
assert.Equal(t, 1, len(createdAccounts))
|
assert.Equal(t, 1, len(ptOut.CreatedAccounts))
|
||||||
acc, err = sdb.GetAccount(idxA1)
|
acc, err = sdb.GetAccount(idxA1)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, "53", acc.Balance.String())
|
assert.Equal(t, "53", acc.Balance.String())
|
||||||
|
|
||||||
log.Debug("2nd batch, 2nd block")
|
log.Debug("2nd batch, 2nd block")
|
||||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[1].L2Txs)
|
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[1].L2Txs)
|
||||||
_, exitInfos, createdAccounts, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs)
|
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 2, len(exitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5'
|
assert.Equal(t, 2, len(ptOut.ExitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5'
|
||||||
assert.Equal(t, 1, len(createdAccounts))
|
assert.Equal(t, 1, len(ptOut.CreatedAccounts))
|
||||||
acc, err = sdb.GetAccount(idxA1)
|
acc, err = sdb.GetAccount(idxA1)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, "78", acc.Balance.String())
|
assert.Equal(t, "78", acc.Balance.String())
|
||||||
|
|||||||
@@ -405,7 +405,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
|
|||||||
// TODO: Get CollectedFees from ProcessTxs()
|
// TODO: Get CollectedFees from ProcessTxs()
|
||||||
// TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs()
|
// TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs()
|
||||||
// ProcessTxs updates poolL2Txs adding: Nonce, TokenID
|
// ProcessTxs updates poolL2Txs adding: Nonce, TokenID
|
||||||
_, exitInfo, _, err := s.stateDB.ProcessTxs([]common.Idx{}, l1UserTxs, batchData.L1CoordinatorTxs, poolL2Txs)
|
ptOut, err := s.stateDB.ProcessTxs([]common.Idx{}, l1UserTxs, batchData.L1CoordinatorTxs, poolL2Txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -429,7 +429,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
|
|||||||
position++
|
position++
|
||||||
}
|
}
|
||||||
|
|
||||||
batchData.ExitTree = exitInfo
|
batchData.ExitTree = ptOut.ExitInfos
|
||||||
|
|
||||||
slotNum := int64(0)
|
slotNum := int64(0)
|
||||||
if ethBlock.EthBlockNum >= s.auctionConstants.GenesisBlockNum {
|
if ethBlock.EthBlockNum >= s.auctionConstants.GenesisBlockNum {
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ func (txsel *TxSelector) GetL2TxSelection(coordIdxs []common.Idx, batchNum commo
|
|||||||
txs := txsel.getL2Profitable(validTxs, txsel.MaxTxs)
|
txs := txsel.getL2Profitable(validTxs, txsel.MaxTxs)
|
||||||
|
|
||||||
// process the txs in the local AccountsDB
|
// process the txs in the local AccountsDB
|
||||||
_, _, _, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, nil, nil, txs)
|
_, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, nil, nil, txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -238,7 +238,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(coordIdxs []common.Idx, batchNum com
|
|||||||
l2Txs := txsel.getL2Profitable(validTxs, maxL2Txs)
|
l2Txs := txsel.getL2Profitable(validTxs, maxL2Txs)
|
||||||
|
|
||||||
// process the txs in the local AccountsDB
|
// process the txs in the local AccountsDB
|
||||||
_, _, _, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, l1Txs, l1CoordinatorTxs, l2Txs)
|
_, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, l1Txs, l1CoordinatorTxs, l2Txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user