diff --git a/common/l1tx.go b/common/l1tx.go index 1a2ec43..69a572d 100644 --- a/common/l1tx.go +++ b/common/l1tx.go @@ -22,7 +22,7 @@ type L1Tx struct { // - L1UserTx: 0 // - L1CoordinatorTx: 1 TxID TxID `meddler:"id"` - // ToForgeL1TxsNum indicates in which the tx was forged / will be forged + // ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` Position int `meddler:"position"` // UserOrigin is set to true if the tx was originated by a user, false if it was diff --git a/coordinator/pipeline.go b/coordinator/pipeline.go index 939815e..5c75577 100644 --- a/coordinator/pipeline.go +++ b/coordinator/pipeline.go @@ -523,15 +523,30 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, if err != nil { return nil, nil, tracerr.Wrap(err) } + // l1UserFutureTxs are the l1UserTxs that are not being forged + // in the next batch, but that are also in the queue for the + // future batches + l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1) + if err != nil { + return nil, nil, tracerr.Wrap(err) + } + coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err = - p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs) + p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs) if err != nil { return nil, nil, tracerr.Wrap(err) } } else { + // get l1UserFutureTxs which are all the l1 pending in all the + // queues + l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd + if err != nil { + return nil, nil, tracerr.Wrap(err) + } + // 2b: only L2 txs coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = - p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig) + p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs) if err != nil { return nil, nil, tracerr.Wrap(err) } diff --git a/db/historydb/historydb.go b/db/historydb/historydb.go index 46581ba..804a21d 100644 --- a/db/historydb/historydb.go +++ b/db/historydb/historydb.go @@ -751,6 +751,24 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) } +// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch +// with toForgeL1TxsNum (in one of the future batches, not in the next one). +func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) { + var txs []*common.L1Tx + err := meddler.QueryAll( + hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null + `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, + tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, + tx.amount, NULL AS effective_amount, + tx.deposit_amount, NULL AS effective_deposit_amount, + tx.eth_block_num, tx.type, tx.batch_num + FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1 + ORDER BY position;`, + toForgeL1TxsNum, + ) + return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) +} + // GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in // open or frozen queues that are not yet forged) func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) { diff --git a/db/historydb/historydb_test.go b/db/historydb/historydb_test.go index 744da1f..92a8730 100644 --- a/db/historydb/historydb_test.go +++ b/db/historydb/historydb_test.go @@ -699,35 +699,56 @@ func TestGetUnforgedL1UserTxs(t *testing.T) { CreateAccountDeposit(1) B: 5 CreateAccountDeposit(1) C: 5 CreateAccountDeposit(1) D: 5 + > block + > batchL1 > block + + CreateAccountDeposit(1) E: 5 + CreateAccountDeposit(1) F: 5 + > block + ` tc := til.NewContext(uint16(0), 128) blocks, err := tc.GenerateBlocks(set) require.NoError(t, err) // Sanity check - require.Equal(t, 1, len(blocks)) + require.Equal(t, 3, len(blocks)) require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs)) - toForgeL1TxsNum := int64(1) - for i := range blocks { err = historyDB.AddBlockSCData(&blocks[i]) require.NoError(t, err) } - l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum) + l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0) + require.NoError(t, err) + assert.Equal(t, 7, len(l1UserTxs)) + + l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1) require.NoError(t, err) assert.Equal(t, 5, len(l1UserTxs)) assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs) + l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1) + require.NoError(t, err) + assert.Equal(t, 2, len(l1UserTxs)) + count, err := historyDB.GetUnforgedL1UserTxsCount() require.NoError(t, err) - assert.Equal(t, 5, count) + assert.Equal(t, 7, count) - // No l1UserTxs for this toForgeL1TxsNum l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2) require.NoError(t, err) + assert.Equal(t, 2, len(l1UserTxs)) + + l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2) + require.NoError(t, err) + assert.Equal(t, 0, len(l1UserTxs)) + + // No l1UserTxs for this toForgeL1TxsNum + l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3) + require.NoError(t, err) assert.Equal(t, 0, len(l1UserTxs)) } diff --git a/test/zkproof/flows_test.go b/test/zkproof/flows_test.go index 407a15a..c0cf176 100644 --- a/test/zkproof/flows_test.go +++ b/test/zkproof/flows_test.go @@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { } // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) { // TxSelector select the transactions for the next Batch l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) { require.NoError(t, err) addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(txprocConfig, nil) + txsel.GetL1L2TxSelection(txprocConfig, nil, nil) require.NoError(t, err) assert.Equal(t, 1, len(coordIdxs)) assert.Equal(t, 0, len(oL1UserTxs)) diff --git a/txselector/txselector.go b/txselector/txselector.go index 1453367..6bc252f 100644 --- a/txselector/txselector.go +++ b/txselector/txselector.go @@ -85,7 +85,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) { // check if CoordinatorAccount for TokenID is already pending to create - if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID, + if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID, txsel.coordAccount.Addr, txsel.coordAccount.BJJ) { return nil, positionL1, nil } @@ -122,11 +122,12 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, // but there is a transactions to them and the authorization of account // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // included in the next batch. -func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx, +func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { metric.GetL2TxSelection.Inc() coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, - discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{}) + discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, + []common.L1Tx{}, l1UserFutureTxs) return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err) } @@ -140,11 +141,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([ // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // included in the next batch. func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, - l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, + l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { metric.GetL1L2TxSelection.Inc() coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, - discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs) + discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs) return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err) } @@ -158,7 +159,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // included in the next batch. func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, - l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, + l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { // WIP.0: the TxSelector is not optimized and will need a redesign. The // current version is implemented in order to have a functional @@ -235,7 +236,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, var validTxs, discardedL2Txs []common.PoolL2Tx l2TxsForgable = sortL2Txs(l2TxsForgable) accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err = - txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), + txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs, l2TxsForgable, validTxs, discardedL2Txs) if err != nil { return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) @@ -249,8 +250,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, var l1CoordinatorTxs2 []common.L1Tx accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err = txsel.processL2Txs(tp, selectionConfig, - len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable, - validTxs, discardedL2Txs) + len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs, + l2TxsNonForgable, validTxs, discardedL2Txs) if err != nil { return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) } @@ -331,8 +332,9 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, } func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, - selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ( - [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { + selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx, + l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx, + []common.PoolL2Tx, []common.PoolL2Tx, error) { var l1CoordinatorTxs []common.L1Tx positionL1 := nL1Txs var accAuths [][]byte @@ -434,7 +436,8 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case validL2Tx, l1CoordinatorTx, accAuth, err := txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, - nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i]) + nL1Txs, l1UserFutureTxs, l1CoordinatorTxs, + positionL1, l2Txs[i]) if err != nil { log.Debugw("txsel.processTxToEthAddrBJJ", "err", err) // Discard L2Tx, and update Info parameter of @@ -574,18 +577,35 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, // l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs // array. func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx, - selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx, - positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx, - *common.AccountCreationAuth, error) { + selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs, + l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) ( + *common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) { // if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a // previous L2Tx in the current process already created a // L1CoordinatorTx of this type, in the DB there still seem that needs // to create a new L1CoordinatorTx, but as is already created, the tx // is valid - if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { + if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { return &l2Tx, nil, nil, nil } + // check if L2Tx receiver account will be created by a L1UserFutureTxs + // (in the next batch, the current frozen queue). In that case, the L2Tx + // will be discarded at the current batch, even if there is an + // AccountCreationAuth for the account, as there is a L1UserTx in the + // frozen queue that will create the receiver Account. The L2Tx is + // discarded to avoid the Coordinator creating a new L1CoordinatorTx to + // create the receiver account, which will be also created in the next + // batch from the L1UserFutureTx, ending with the user having 2 + // different accounts for the same TokenID. The double account creation + // is supported by the Hermez zkRollup specification, but it was decided + // to mitigate it at the TxSelector level for the explained cases. + if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { + return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" + + " receiver account does not exist yet, and there is a L1UserTx that" + + " will create that account in a future batch.") + } + var l1CoordinatorTx *common.L1Tx var accAuth *common.AccountCreationAuth if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr { @@ -688,7 +708,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx, return &l2Tx, l1CoordinatorTx, accAuth, nil } -func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, +func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { for i := 0; i < len(l1CoordinatorTxs); i++ { if l1CoordinatorTxs[i].FromEthAddr == addr && @@ -700,6 +720,23 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common. return false } +func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID, + addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { + for i := 0; i < len(l1UserFutureTxs); i++ { + if l1UserFutureTxs[i].FromEthAddr == addr && + l1UserFutureTxs[i].TokenID == tokenID && + l1UserFutureTxs[i].FromBJJ == bjj { + return true + } + if l1UserFutureTxs[i].FromEthAddr == addr && + l1UserFutureTxs[i].TokenID == tokenID && + common.EmptyBJJComp == bjj { + return true + } + } + return false +} + // sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx { // Sort by absolute fee with SliceStable, so that txs with same diff --git a/txselector/txselector_test.go b/txselector/txselector_test.go index 78e7834..3831b9a 100644 --- a/txselector/txselector_test.go +++ b/txselector/txselector_test.go @@ -182,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:1") l1UserTxs := []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -193,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:2") l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -204,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:3") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 2, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -217,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:4") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -231,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:5") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -245,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:6") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -279,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0]) @@ -328,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress())) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, 0, len(accAuths)) @@ -372,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, []common.Idx{263}, coordIdxs) assert.Equal(t, 0, len(accAuths)) @@ -434,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { } // batch1 l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // 1st TransferToEthAddr expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" @@ -456,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -481,7 +481,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -500,7 +500,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { // initial PoolExit, which now is valid as B has enough Balance l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -550,7 +550,7 @@ func TestTransferToBjj(t *testing.T) { // batch1 to freeze L1UserTxs that will create some accounts with // positive balance l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // Transfer is ToBJJ to a BJJ-only account that doesn't exist @@ -568,7 +568,7 @@ func TestTransferToBjj(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 4, len(oL1UserTxs)) // We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx @@ -595,7 +595,7 @@ func TestTransferToBjj(t *testing.T) { l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) // Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs @@ -623,7 +623,7 @@ func TestTransferToBjj(t *testing.T) { l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) // We expect the coordinator to add an L1CoordTx to create an account @@ -678,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // 8 transfers from the same account @@ -710,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) { // transfers from account A l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -760,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) batchPoolL2 := ` @@ -794,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { // select L1 & L2 txs _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -809,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { // batch 3 l1UserTxs = []common.L1Tx{} _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -825,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { // batch 4 l1UserTxs = []common.L1Tx{} _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -873,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) - // 8 transfers from the same account + // 3 transfers from the same account batchPoolL2 := ` Type: PoolL2 PoolTransfer(0) A-B: 10 (126) @@ -889,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) { // add the PoolL2Txs to the l2DB addL2Txs(t, txsel, poolL2Txs) - // batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A + // batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -968,7 +968,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // batch 2 to crate the accounts (from L1UserTxs) @@ -976,7 +976,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { // select L1 & L2 txs _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -1014,7 +1014,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { addL2Txs(t, txsel, poolL2Txs) l1UserTxs = []common.L1Tx{} _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -1029,7 +1029,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { // batch 4. In this Batch, account B has enough balance to send the txs _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -1038,3 +1038,112 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { require.Equal(t, 3, len(discardedL2Txs)) require.Equal(t, 0, len(accAuths)) } + +func TestL1UserFutureTxs(t *testing.T) { + set := ` + Type: Blockchain + + CreateAccountDeposit(0) Coord: 0 + CreateAccountDeposit(0) A: 100 + + > batchL1 // freeze L1User{2} + CreateAccountDeposit(0) B: 18 + > batchL1 // forge L1User{2}, freeze L1User{1} + > batchL1 // forge L1User{1} + > block + ` + + chainID := uint16(0) + tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) + blocks, err := tc.GenerateBlocks(set) + assert.NoError(t, err) + + hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") + txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) + + // restart nonces of TilContext, as will be set by generating directly + // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs + tc.RestartNonces() + + tpc := txprocessor.Config{ + NLevels: 16, + MaxFeeTx: 10, + MaxTx: 10, + MaxL1Tx: 10, + ChainID: chainID, + } + // batch1 to freeze L1UserTxs + l1UserTxs := []common.L1Tx{} + l1UserFutureTxs := []common.L1Tx{} + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs) + require.NoError(t, err) + + batchPoolL2 := ` + Type: PoolL2 + PoolTransferToEthAddr(0) A-B: 10 (126) + ` + poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2) + require.NoError(t, err) + require.Equal(t, 1, len(poolL2Txs)) + + // add AccountCreationAuth for B + _ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B") + // add the PoolL2Txs to the l2DB + addL2Txs(t, txsel, poolL2Txs) + // batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A + l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) + l1UserFutureTxs = + til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) + require.Equal(t, 2, len(l1UserTxs)) + require.Equal(t, 1, len(l1UserFutureTxs)) + _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := + txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs) + require.NoError(t, err) + assert.Equal(t, 2, len(oL1UserTxs)) + require.Equal(t, 0, len(oL1CoordTxs)) + // no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed + // at the next batch once the L1UserTx of CreateAccount B is processed, + // despite that there is an AccountCreationAuth for Account B. + assert.Equal(t, 0, len(oL2Txs)) + assert.Equal(t, 1, len(discardedL2Txs)) + assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+ + " discarded at the current batch, as the receiver account does"+ + " not exist yet, and there is a L1UserTx that will create that"+ + " account in a future batch.", + discardedL2Txs[0].Info) + + err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), + txsel.localAccountsDB.CurrentBatch()) + require.NoError(t, err) + + l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) + l1UserFutureTxs = []common.L1Tx{} + _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = + txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs) + require.NoError(t, err) + assert.Equal(t, 1, len(oL1UserTxs)) + require.Equal(t, 0, len(oL1CoordTxs)) + // L2Tx selected as now the L1UserTx of CreateAccount B is processed + assert.Equal(t, 1, len(oL2Txs)) + assert.Equal(t, 0, len(discardedL2Txs)) + err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), + txsel.localAccountsDB.CurrentBatch()) + require.NoError(t, err) + + // generate a new L2Tx A-B and check that is processed + poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2) + require.NoError(t, err) + require.Equal(t, 1, len(poolL2Txs)) + // add the PoolL2Txs to the l2DB + addL2Txs(t, txsel, poolL2Txs) + _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = + txsel.GetL1L2TxSelection(tpc, nil, nil) + require.NoError(t, err) + assert.Equal(t, 0, len(oL1UserTxs)) + require.Equal(t, 0, len(oL1CoordTxs)) + assert.Equal(t, 1, len(oL2Txs)) + assert.Equal(t, 0, len(discardedL2Txs)) + err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), + txsel.localAccountsDB.CurrentBatch()) + require.NoError(t, err) +}