mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-06 19:06:42 +01:00
Fix TxSel discard tx when ProcessL2Tx gives err
Refactor getL1L2TxSelection, which fixes some problems for certain combinations of txs.
This commit is contained in:
@@ -148,17 +148,37 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
discardedL2Txs, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// getL1L2TxSelection returns the selection of L1 + L2 txs.
|
||||
// It returns: the CoordinatorIdxs used to receive the fees of the selected
|
||||
// L2Txs. An array of bytearrays with the signatures of the
|
||||
// AccountCreationAuthorization of the accounts of the users created by the
|
||||
// Coordinator with L1CoordinatorTxs of those accounts that does not exist yet
|
||||
// but there is a transactions to them and the authorization of account
|
||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||
// included in the next batch.
|
||||
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
||||
// current version is implemented in order to have a functional
|
||||
// implementation that can be used asap.
|
||||
//
|
||||
// WIP.1: this method uses a 'cherry-pick' of internal calls of the
|
||||
// StateDB, a refactor of the StateDB to reorganize it internally is
|
||||
// planned once the main functionallities are covered, with that
|
||||
// refactor the TxSelector will be updated also.
|
||||
// implementation that can be used ASAP.
|
||||
|
||||
// Steps of this method:
|
||||
// - getPendingTxs
|
||||
// - ProcessL1Txs
|
||||
// - getProfitable (sort by fee & nonce)
|
||||
// - loop over l2Txs
|
||||
// - Fill tx.TokenID tx.Nonce
|
||||
// - Check enough Balance on sender
|
||||
// - Check Nonce
|
||||
// - Create CoordAccount L1CoordTx for TokenID if needed
|
||||
// - & ProcessL1Tx of L1CoordTx
|
||||
// - Check validity of receiver Account for ToEthAddr / ToBJJ
|
||||
// - Create UserAccount L1CoordTx if needed (and possible)
|
||||
// - If everything is fine, store l2Tx to validTxs & update NoncesMap
|
||||
// - Prepare coordIdxsMap & AccumulatedFees
|
||||
// - Distribute AccumulatedFees to CoordIdxs
|
||||
// - MakeCheckpoint
|
||||
|
||||
// get pending l2-tx from tx-pool
|
||||
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
|
||||
@@ -185,25 +205,62 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
// Sort l2TxsRaw (cropping at MaxTx at this point).
|
||||
// discardedL2Txs contains an array of the L2Txs that have not been
|
||||
// selected in this Batch.
|
||||
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx)
|
||||
l2Txs, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx-uint32(len(l1UserTxs)))
|
||||
for i := range discardedL2Txs {
|
||||
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee (does not fit inside the profitable set)"
|
||||
discardedL2Txs[i].Info =
|
||||
"Tx not selected due to low absolute fee (does not fit inside the profitable set)"
|
||||
}
|
||||
|
||||
noncesMap := make(map[common.Idx]common.Nonce)
|
||||
var l2Txs []common.PoolL2Tx
|
||||
// iterate over l2Txs
|
||||
// - if tx.TokenID does not exist at CoordsIdxDB
|
||||
// - create new L1CoordinatorTx creating a CoordAccount, for
|
||||
// Coordinator to receive the fee of the new TokenID
|
||||
for i := 0; i < len(l2Txs0); i++ {
|
||||
accSender, err := tp.StateDB().GetAccount(l2Txs0[i].FromIdx)
|
||||
var validTxs []common.PoolL2Tx
|
||||
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
||||
// Iterate over l2Txs
|
||||
// - check Nonces
|
||||
// - check enough Balance for the Amount+Fee
|
||||
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
|
||||
// - keep used accAuths
|
||||
// - put the valid txs into validTxs array
|
||||
for i := 0; i < len(l2Txs); i++ {
|
||||
// Check if there is space for more L2Txs in the selection
|
||||
maxL2Txs := int(selectionConfig.MaxTx) -
|
||||
len(l1UserTxs) - len(l1CoordinatorTxs)
|
||||
if len(validTxs) >= maxL2Txs {
|
||||
// no more available slots for L2Txs
|
||||
l2Txs[i].Info =
|
||||
"Tx not selected due not available slots for L2Txs"
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// get Nonce & TokenID from the Account by l2Tx.FromIdx
|
||||
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
l2Txs0[i].TokenID = accSender.TokenID
|
||||
// populate the noncesMap used at the next iteration
|
||||
noncesMap[l2Txs0[i].FromIdx] = accSender.Nonce
|
||||
l2Txs[i].TokenID = accSender.TokenID
|
||||
|
||||
// Check enough Balance on sender
|
||||
enoughBalance, balance, feeAndAmount := tp.CheckEnoughBalance(l2Txs[i])
|
||||
if !enoughBalance {
|
||||
// not valid Amount with current Balance. Discard L2Tx,
|
||||
// and update Info parameter of the tx, and add it to
|
||||
// the discardedTxs array
|
||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
|
||||
"Current sender account Balance: %s, Amount+Fee: %s",
|
||||
balance.String(), feeAndAmount.String())
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if Nonce is correct
|
||||
if l2Txs[i].Nonce != accSender.Nonce {
|
||||
// not valid Nonce at tx. Discard L2Tx, and update Info
|
||||
// parameter of the tx, and add it to the discardedTxs
|
||||
// array
|
||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
|
||||
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, accSender.Nonce)
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// if TokenID does not exist yet, create new L1CoordinatorTx to
|
||||
// create the CoordinatorAccount for that TokenID, to receive
|
||||
@@ -218,52 +275,27 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
if newL1CoordTx != nil {
|
||||
// if there is no space for the L1CoordinatorTx, discard the L2Tx
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) {
|
||||
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
|
||||
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) ||
|
||||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) {
|
||||
// discard L2Tx, and update Info parameter of
|
||||
// the tx, and add it to the discardedTxs array
|
||||
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
|
||||
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
|
||||
"L1CoordinatorTx and there is not enough space for L1Coordinator"
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
// increase positionL1
|
||||
positionL1++
|
||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *newL1CoordTx)
|
||||
accAuths = append(accAuths, txsel.coordAccount.AccountCreationAuth)
|
||||
}
|
||||
l2Txs = append(l2Txs, l2Txs0[i])
|
||||
}
|
||||
|
||||
var validTxs []common.PoolL2Tx
|
||||
// iterate over l2TxsRaw
|
||||
// - check Nonces
|
||||
// - check enough Balance for the Amount+Fee
|
||||
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
|
||||
// - keep used accAuths
|
||||
// - put the valid txs into validTxs array
|
||||
for i := 0; i < len(l2Txs); i++ {
|
||||
enoughBalance, balance, feeAndAmount := tp.CheckEnoughBalance(l2Txs[i])
|
||||
if !enoughBalance {
|
||||
// not valid Amount with current Balance. Discard L2Tx,
|
||||
// and update Info parameter of the tx, and add it to
|
||||
// the discardedTxs array
|
||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
|
||||
"Current sender account Balance: %s, Amount+Fee: %s",
|
||||
balance.String(), feeAndAmount.String())
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
// check if Nonce is correct
|
||||
nonce := noncesMap[l2Txs[i].FromIdx]
|
||||
if l2Txs[i].Nonce != nonce {
|
||||
// not valid Nonce at tx. Discard L2Tx, and update Info
|
||||
// parameter of the tx, and add it to the discardedTxs
|
||||
// array
|
||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
|
||||
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
// process the L1CoordTx
|
||||
_, _, _, _, err := tp.ProcessL1Tx(nil, newL1CoordTx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
// If tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB,
|
||||
@@ -287,7 +319,19 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
if l1CoordinatorTx != nil {
|
||||
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
|
||||
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) ||
|
||||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) {
|
||||
// discard L2Tx, and update Info parameter of
|
||||
// the tx, and add it to the discardedTxs array
|
||||
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
|
||||
"L1CoordinatorTx and there is not enough space for L1Coordinator"
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
if l1CoordinatorTx != nil && validL2Tx != nil {
|
||||
// If ToEthAddr == 0xff.. this means that we
|
||||
// are handling a TransferToBJJ, which doesn't
|
||||
// require an authorization because it doesn't
|
||||
@@ -303,9 +347,16 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
||||
positionL1++
|
||||
}
|
||||
|
||||
// process the L1CoordTx
|
||||
_, _, _, _, err := tp.ProcessL1Tx(nil, l1CoordinatorTx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
if validL2Tx != nil {
|
||||
validTxs = append(validTxs, *validL2Tx)
|
||||
if validL2Tx == nil {
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
} else if l2Txs[i].ToIdx >= common.IdxUserThreshold {
|
||||
receiverAcc, err := txsel.localAccountsDB.GetAccount(l2Txs[i].ToIdx)
|
||||
@@ -352,23 +403,41 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Account found in the DB, include the l2Tx in the selection
|
||||
validTxs = append(validTxs, l2Txs[i])
|
||||
} else if l2Txs[i].ToIdx == common.Idx(1) {
|
||||
// valid txs (of Exit type)
|
||||
validTxs = append(validTxs, l2Txs[i])
|
||||
}
|
||||
noncesMap[l2Txs[i].FromIdx]++
|
||||
}
|
||||
|
||||
// Process L1CoordinatorTxs
|
||||
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
||||
_, _, _, _, err := tp.ProcessL1Tx(nil, &l1CoordinatorTxs[i])
|
||||
// get CoordIdxsMap for the TokenID of the current l2Txs[i]
|
||||
// get TokenID from tx.Sender account
|
||||
tokenID := accSender.TokenID
|
||||
coordIdx, err := txsel.getCoordIdx(tokenID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
// if err is db.ErrNotFound, should not happen, as all
|
||||
// the validTxs.TokenID should have a CoordinatorIdx
|
||||
// created in the DB at this point
|
||||
return nil, nil, nil, nil, nil, nil,
|
||||
tracerr.Wrap(fmt.Errorf("Could not get CoordIdx for TokenID=%d, "+
|
||||
"due: %s", tokenID, err))
|
||||
}
|
||||
}
|
||||
// prepare temp coordIdxsMap & AccumulatedFees for the call to
|
||||
// ProcessL2Tx
|
||||
coordIdxsMap := map[common.TokenID]common.Idx{tokenID: coordIdx}
|
||||
// tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
||||
if _, ok := tp.AccumulatedFees[coordIdx]; !ok {
|
||||
tp.AccumulatedFees[coordIdx] = big.NewInt(0)
|
||||
}
|
||||
|
||||
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &l2Txs[i])
|
||||
if err != nil {
|
||||
log.Debugw("txselector.getL1L2TxSelection at ProcessL2Tx", "err", err)
|
||||
// Discard L2Tx, and update Info parameter of the tx,
|
||||
// and add it to the discardedTxs array
|
||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s",
|
||||
err.Error())
|
||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
validTxs = append(validTxs, l2Txs[i])
|
||||
} // after this loop, no checks to discard txs should be done
|
||||
|
||||
// get CoordIdxsMap for the TokenIDs
|
||||
coordIdxsMap := make(map[common.TokenID]common.Idx)
|
||||
@@ -391,9 +460,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
}
|
||||
|
||||
var coordIdxs []common.Idx
|
||||
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
||||
for _, idx := range coordIdxsMap {
|
||||
tp.AccumulatedFees[idx] = big.NewInt(0)
|
||||
coordIdxs = append(coordIdxs, idx)
|
||||
}
|
||||
// sort CoordIdxs
|
||||
@@ -401,29 +468,6 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
return coordIdxs[i] < coordIdxs[j]
|
||||
})
|
||||
|
||||
// get most profitable L2-tx
|
||||
maxL2Txs := int(selectionConfig.MaxTx) -
|
||||
len(l1UserTxs) - len(l1CoordinatorTxs)
|
||||
|
||||
selectedL2Txs := validTxs
|
||||
if len(validTxs) > maxL2Txs {
|
||||
selectedL2Txs = selectedL2Txs[:maxL2Txs]
|
||||
}
|
||||
var finalL2Txs []common.PoolL2Tx
|
||||
for i := 0; i < len(selectedL2Txs); i++ {
|
||||
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &selectedL2Txs[i])
|
||||
if err != nil {
|
||||
// the error can be due not valid tx data, or due other
|
||||
// cases (such as StateDB error). At this initial
|
||||
// version of the TxSelector, we discard the L2Tx and
|
||||
// log the error, assuming that this will be iterated in
|
||||
// a near future.
|
||||
return nil, nil, nil, nil, nil, nil,
|
||||
tracerr.Wrap(fmt.Errorf("TxSelector: txprocessor.ProcessL2Tx: %w", err))
|
||||
}
|
||||
finalL2Txs = append(finalL2Txs, selectedL2Txs[i])
|
||||
}
|
||||
|
||||
// distribute the AccumulatedFees from the processed L2Txs into the
|
||||
// Coordinator Idxs
|
||||
for idx, accumulatedFee := range tp.AccumulatedFees {
|
||||
@@ -452,10 +496,11 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
|
||||
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
||||
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||
metricSelectedL2Txs.Set(float64(len(finalL2Txs)))
|
||||
metricSelectedL2Txs.Set(float64(len(validTxs)))
|
||||
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||
|
||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
|
||||
// return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
||||
}
|
||||
|
||||
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
|
||||
@@ -567,7 +612,10 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
}
|
||||
}
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1UserTxs {
|
||||
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
|
||||
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1UserTxs ||
|
||||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1UserTxs {
|
||||
// L2Tx discarded
|
||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
|
||||
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
|
||||
|
||||
@@ -276,22 +276,23 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
assert.True(t, l2TxsFromDB[0].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
||||
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
|
||||
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[1])
|
||||
assert.Equal(t, accAuthSig0, accAuths[2])
|
||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[2])
|
||||
assert.Equal(t, accAuthSig0, accAuths[1])
|
||||
assert.Equal(t, accAuthSig1, accAuths[3])
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
assert.Equal(t, 4, len(oL1CoordTxs))
|
||||
assert.Equal(t, 2, len(oL2Txs))
|
||||
assert.Equal(t, 0, len(discardedL2Txs))
|
||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||
assert.Equal(t, common.BatchNum(7), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalanceByIdx(t, txsel, 261, "20") // CoordIdx for TokenID=1
|
||||
checkBalanceByIdx(t, txsel, 262, "10") // CoordIdx for TokenID=0
|
||||
checkBalance(t, tc, txsel, "Coord", 1, "20") // CoordIdx for TokenID=1
|
||||
checkBalance(t, tc, txsel, "Coord", 0, "10") // CoordIdx for TokenID=1
|
||||
checkBalance(t, tc, txsel, "A", 0, "600")
|
||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||
checkBalance(t, tc, txsel, "B", 0, "290")
|
||||
@@ -324,19 +325,20 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
assert.True(t, l2TxsFromDB[2].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
|
||||
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||
assert.Equal(t, 0, len(accAuths))
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 4, len(oL2Txs))
|
||||
assert.Equal(t, 0, len(discardedL2Txs))
|
||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||
assert.Equal(t, common.BatchNum(8), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalanceByIdx(t, txsel, 261, "30")
|
||||
checkBalanceByIdx(t, txsel, 262, "35")
|
||||
checkBalance(t, tc, txsel, "Coord", 1, "30") // CoordIdx for TokenID=1
|
||||
checkBalance(t, tc, txsel, "Coord", 0, "35") // CoordIdx for TokenID=1
|
||||
checkBalance(t, tc, txsel, "A", 0, "430")
|
||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||
checkBalance(t, tc, txsel, "B", 0, "390")
|
||||
@@ -370,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{262}, coordIdxs)
|
||||
assert.Equal(t, []common.Idx{263}, coordIdxs)
|
||||
assert.Equal(t, 0, len(accAuths))
|
||||
assert.Equal(t, 4, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -379,7 +381,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
assert.Equal(t, common.BatchNum(9), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalanceByIdx(t, txsel, 261, "30")
|
||||
checkBalanceByIdx(t, txsel, 262, "75")
|
||||
checkBalanceByIdx(t, txsel, 263, "75")
|
||||
checkBalance(t, tc, txsel, "A", 0, "730")
|
||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||
checkBalance(t, tc, txsel, "B", 0, "380")
|
||||
@@ -580,7 +582,6 @@ func TestTransferToBjj(t *testing.T) {
|
||||
require.Equal(t, 1, len(oL1CoordTxs))
|
||||
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
|
||||
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
|
||||
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
|
||||
assert.Equal(t, 1, len(oL2Txs))
|
||||
assert.Equal(t, 0, len(discardedL2Txs))
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
@@ -712,7 +713,8 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
||||
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx
|
||||
// transfers from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
@@ -720,7 +722,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 7, len(oL2Txs))
|
||||
assert.Equal(t, 1, len(discardedL2Txs))
|
||||
assert.Equal(t, 4, len(discardedL2Txs))
|
||||
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
txsel.localAccountsDB.CurrentBatch())
|
||||
@@ -803,8 +805,8 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
require.Equal(t, 2, len(oL2Txs))
|
||||
require.Equal(t, 8, len(discardedL2Txs))
|
||||
require.Equal(t, 0, len(oL2Txs))
|
||||
require.Equal(t, 10, len(discardedL2Txs))
|
||||
require.Equal(t, 0, len(accAuths))
|
||||
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
@@ -819,7 +821,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
|
||||
require.Equal(t, 0, len(oL1UserTxs))
|
||||
require.Equal(t, 3, len(oL1CoordTxs))
|
||||
require.Equal(t, 6, len(oL2Txs))
|
||||
require.Equal(t, 8, len(oL2Txs))
|
||||
require.Equal(t, 2, len(discardedL2Txs))
|
||||
require.Equal(t, 3, len(accAuths))
|
||||
|
||||
@@ -843,3 +845,79 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessL2Selection(t *testing.T) {
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
CreateAccountDeposit(0) Coord: 0
|
||||
CreateAccountDeposit(0) A: 18
|
||||
CreateAccountDeposit(0) B: 0
|
||||
|
||||
> batchL1 // freeze L1User{3}
|
||||
> batchL1 // forge L1User{3}
|
||||
> block
|
||||
`
|
||||
|
||||
chainID := uint16(0)
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||
|
||||
// restart nonces of TilContext, as will be set by generating directly
|
||||
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||
tc.RestartNonces()
|
||||
|
||||
tpc := txprocessor.Config{
|
||||
NLevels: 16,
|
||||
MaxFeeTx: 10,
|
||||
MaxTx: 10,
|
||||
MaxL1Tx: 10,
|
||||
ChainID: chainID,
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 8 transfers from the same account
|
||||
batchPoolL2 := `
|
||||
Type: PoolL2
|
||||
PoolTransfer(0) A-B: 10 (126)
|
||||
PoolTransfer(0) A-B: 10 (126) // not enough funds
|
||||
PoolTransfer(0) A-B: 5 (126) // enough funds
|
||||
`
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(poolL2Txs))
|
||||
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
// only 1st L2Tx should be accepted, as:
|
||||
// - 2nd will not be selected as has not enough funds
|
||||
// - 3rd will not be selected as has Nonce=2, and the account Nonce==1
|
||||
// (due the 2nd txs not being accepted)
|
||||
assert.Equal(t, 1, len(oL2Txs))
|
||||
assert.Equal(t, 2, len(discardedL2Txs))
|
||||
assert.Equal(t, common.Nonce(0), oL2Txs[0].Nonce)
|
||||
assert.Equal(t, common.Nonce(1), discardedL2Txs[0].Nonce)
|
||||
assert.Equal(t, common.Nonce(2), discardedL2Txs[1].Nonce)
|
||||
assert.Equal(t, "Tx not selected due to not enough Balance at the sender. "+
|
||||
"Current sender account Balance: 7, Amount+Fee: 11", discardedL2Txs[0].Info)
|
||||
assert.Equal(t, "Tx not selected due to not current Nonce. Tx.Nonce: 2, "+
|
||||
"Account.Nonce: 1", discardedL2Txs[1].Info)
|
||||
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user