mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Merge pull request #448 from hermeznetwork/feature/txselector0
Feature/txselector0
This commit is contained in:
@@ -124,6 +124,15 @@ func L2TxsToPoolL2Txs(txs []L2Tx) []PoolL2Tx {
|
||||
return r
|
||||
}
|
||||
|
||||
// TxIDsFromL2Txs returns an array of TxID from the []L2Tx
|
||||
func TxIDsFromL2Txs(txs []L2Tx) []TxID {
|
||||
txIDs := make([]TxID, len(txs))
|
||||
for i, tx := range txs {
|
||||
txIDs[i] = tx.TxID
|
||||
}
|
||||
return txIDs
|
||||
}
|
||||
|
||||
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
|
||||
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
idxLen := nLevels / 8 //nolint:gomnd
|
||||
|
||||
@@ -345,6 +345,15 @@ func PoolL2TxsToL2Txs(txs []PoolL2Tx) ([]L2Tx, error) {
|
||||
return l2Txs, nil
|
||||
}
|
||||
|
||||
// TxIDsFromPoolL2Txs returns an array of TxID from the []PoolL2Tx
|
||||
func TxIDsFromPoolL2Txs(txs []PoolL2Tx) []TxID {
|
||||
txIDs := make([]TxID, len(txs))
|
||||
for i, tx := range txs {
|
||||
txIDs[i] = tx.TxID
|
||||
}
|
||||
return txIDs
|
||||
}
|
||||
|
||||
// PoolL2TxState is a string that represents the status of a L2 transaction
|
||||
type PoolL2TxState string
|
||||
|
||||
|
||||
@@ -467,7 +467,7 @@ func (t *TxManager) rollupForgeBatch(ctx context.Context, batchInfo *BatchInfo)
|
||||
batchInfo.EthTx = ethTx
|
||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
if err := t.l2DB.DoneForging(l2TxsIDs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
@@ -776,22 +776,6 @@ func (p *Pipeline) Stop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func poolL2TxsIDs(txs []common.PoolL2Tx) []common.TxID {
|
||||
txIDs := make([]common.TxID, len(txs))
|
||||
for i, tx := range txs {
|
||||
txIDs[i] = tx.TxID
|
||||
}
|
||||
return txIDs
|
||||
}
|
||||
|
||||
func l2TxsIDs(txs []common.L2Tx) []common.TxID {
|
||||
txIDs := make([]common.TxID, len(txs))
|
||||
for i, tx := range txs {
|
||||
txIDs[i] = tx.TxID
|
||||
}
|
||||
return txIDs
|
||||
}
|
||||
|
||||
// sendServerProof sends the circuit inputs to the proof server
|
||||
func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) error {
|
||||
p.cfg.debugBatchStore(batchInfo)
|
||||
@@ -861,7 +845,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (*BatchInfo, error) {
|
||||
batchInfo.CoordIdxs = coordIdxs
|
||||
batchInfo.VerifierIdx = p.cfg.VerifierIdx
|
||||
|
||||
if err := p.l2DB.StartForging(poolL2TxsIDs(poolL2Txs), batchInfo.BatchNum); err != nil {
|
||||
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -215,19 +215,14 @@ Type: Blockchain
|
||||
AddToken(1)
|
||||
|
||||
|
||||
// Coordinator accounts, Idxs: 256, 257
|
||||
CreateAccountCoordinator(0) Coord
|
||||
CreateAccountCoordinator(1) Coord
|
||||
|
||||
// close Block:0, Batch:0
|
||||
> batch // forge L1Coord{2}
|
||||
> batch
|
||||
|
||||
CreateAccountDeposit(0) A: 500
|
||||
CreateAccountDeposit(1) C: 0
|
||||
CreateAccountCoordinator(0) C
|
||||
|
||||
// close Block:0, Batch:1
|
||||
> batchL1 // freeze L1User{2}, forge L1Coord{1}
|
||||
> batchL1 // freeze L1User{2}, forge L1Coord{0}
|
||||
// Expected balances:
|
||||
// Coord(0): 0, Coord(1): 0
|
||||
// C(0): 0
|
||||
@@ -263,13 +258,19 @@ CreateAccountDeposit(0) D: 800
|
||||
// B(0): 400
|
||||
// C(0): 0
|
||||
|
||||
// Coordinator creates needed accounts to receive Fees
|
||||
CreateAccountCoordinator(1) Coord
|
||||
CreateAccountCoordinator(0) Coord
|
||||
// Coordinator creates needed 'To' accounts for the L2Txs
|
||||
CreateAccountCoordinator(1) B
|
||||
CreateAccountCoordinator(0) C
|
||||
|
||||
|
||||
Transfer(1) A-B: 200 (126)
|
||||
Transfer(0) B-C: 100 (126)
|
||||
|
||||
// close Block:0, Batch:6
|
||||
> batchL1 // forge L1User{1}, forge L1Coord{2}, forge L2{2}
|
||||
> batchL1 // forge L1User{1}, forge L1Coord{4}, forge L2{2}
|
||||
// Expected balances:
|
||||
// Coord(0): 10, Coord(1): 20
|
||||
// A(0): 600, A(1): 280
|
||||
|
||||
@@ -123,6 +123,7 @@ type Account struct {
|
||||
type User struct {
|
||||
Name string
|
||||
BJJ *babyjub.PrivateKey
|
||||
EthSk *ecdsa.PrivateKey
|
||||
Addr ethCommon.Address
|
||||
Accounts map[common.TokenID]*Account
|
||||
}
|
||||
@@ -673,6 +674,15 @@ func (tc *Context) generatePoolL2Txs() ([]common.PoolL2Tx, error) {
|
||||
return txs, nil
|
||||
}
|
||||
|
||||
// RestartNonces sets all the Users.Accounts.Nonces to 0
|
||||
func (tc *Context) RestartNonces() {
|
||||
for name, user := range tc.Users {
|
||||
for tokenID := range user.Accounts {
|
||||
tc.Users[name].Accounts[tokenID].Nonce = common.Nonce(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generateKeys generates BabyJubJub & Address keys for the given list of user
|
||||
// names in a deterministic way. This means, that for the same given
|
||||
// 'userNames' in a certain order, the keys will be always the same.
|
||||
@@ -696,6 +706,7 @@ func (tc *Context) generateKeys(userNames []string) {
|
||||
u := User{
|
||||
Name: userNames[i-1],
|
||||
BJJ: &sk,
|
||||
EthSk: &key,
|
||||
Addr: addr,
|
||||
Accounts: make(map[common.TokenID]*Account),
|
||||
}
|
||||
|
||||
@@ -219,8 +219,15 @@ func TestGeneratePoolL2Txs(t *testing.T) {
|
||||
assert.Equal(t, tc.Users["User1"].BJJ.Public().String(), poolL2Txs[5].ToBJJ.String())
|
||||
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[0].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[1].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[2].Nonce)
|
||||
assert.Equal(t, common.Nonce(1), poolL2Txs[3].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[4].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[5].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[6].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), poolL2Txs[7].Nonce)
|
||||
assert.Equal(t, common.Nonce(2), poolL2Txs[8].Nonce)
|
||||
assert.Equal(t, common.Nonce(3), poolL2Txs[9].Nonce)
|
||||
|
||||
assert.Equal(t, tc.Users["B"].Addr.Hex(), poolL2Txs[9].ToEthAddr.Hex())
|
||||
assert.Equal(t, common.EmptyBJJComp, poolL2Txs[9].ToBJJ)
|
||||
|
||||
@@ -65,6 +65,11 @@ func NewTxProcessor(sdb *statedb.StateDB, config Config) *TxProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
// StateDB returns a pointer to the StateDB of the TxProcessor
|
||||
func (tp *TxProcessor) StateDB() *statedb.StateDB {
|
||||
return tp.s
|
||||
}
|
||||
|
||||
func (tp *TxProcessor) resetZKInputs() {
|
||||
tp.zki = nil
|
||||
tp.i = 0 // initialize current transaction index in the ZKInputs generation
|
||||
@@ -566,7 +571,12 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
return nil, nil, false, tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
|
||||
}
|
||||
// case when tx.Type== common.TxTypeTransferToEthAddr or common.TxTypeTransferToBJJ
|
||||
tx.AuxToIdx, err = tp.s.GetIdxByEthAddrBJJ(tx.ToEthAddr, tx.ToBJJ, tx.TokenID)
|
||||
|
||||
accSender, err := tp.s.GetAccount(tx.FromIdx)
|
||||
if err != nil {
|
||||
return nil, nil, false, tracerr.Wrap(err)
|
||||
}
|
||||
tx.AuxToIdx, err = tp.s.GetIdxByEthAddrBJJ(tx.ToEthAddr, tx.ToBJJ, accSender.TokenID)
|
||||
if err != nil {
|
||||
return nil, nil, false, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -782,8 +792,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
||||
// the receiver. This parameter is used when the tx.ToIdx is not specified and
|
||||
// the real ToIdx is found trhrough the ToEthAddr or ToBJJ.
|
||||
func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
collectedFees map[common.TokenID]*big.Int,
|
||||
tx common.Tx, auxToIdx common.Idx) error {
|
||||
collectedFees map[common.TokenID]*big.Int, tx common.Tx, auxToIdx common.Idx) error {
|
||||
if auxToIdx == common.Idx(0) {
|
||||
auxToIdx = tx.ToIdx
|
||||
}
|
||||
@@ -858,7 +867,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
} else {
|
||||
accReceiver, err = tp.s.GetAccount(auxToIdx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
log.Error(err, auxToIdx)
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
@@ -1020,6 +1029,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
if err != nil {
|
||||
return nil, false, tracerr.Wrap(fmt.Errorf("Can not use CoordIdx that does not exist in the tree. TokenID: %d, CoordIdx: %d", acc.TokenID, coordIdxsMap[acc.TokenID]))
|
||||
}
|
||||
|
||||
// accumulate the fee for the Coord account
|
||||
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
||||
accumulated.Add(accumulated, fee)
|
||||
|
||||
@@ -214,8 +214,6 @@ func TestProcessTxsBalances(t *testing.T) {
|
||||
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Coordinator Idx where to send the fees
|
||||
coordIdxs := []common.Idx{256, 257}
|
||||
config := Config{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
@@ -232,20 +230,20 @@ func TestProcessTxsBalances(t *testing.T) {
|
||||
log.Debug("block:0 batch:1")
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
|
||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
|
||||
log.Debug("block:0 batch:2")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
|
||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
|
||||
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "500")
|
||||
|
||||
log.Debug("block:0 batch:3")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[3].L2Txs)
|
||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[3].L1CoordinatorTxs, l2Txs)
|
||||
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[3].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "500")
|
||||
checkBalance(t, tc, sdb, "A", 1, "500")
|
||||
@@ -253,7 +251,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
||||
log.Debug("block:0 batch:4")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[4].L2Txs)
|
||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[4].L1CoordinatorTxs, l2Txs)
|
||||
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[4].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "500")
|
||||
checkBalance(t, tc, sdb, "A", 1, "500")
|
||||
@@ -261,12 +259,13 @@ func TestProcessTxsBalances(t *testing.T) {
|
||||
log.Debug("block:0 batch:5")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[5].L2Txs)
|
||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[5].L1CoordinatorTxs, l2Txs)
|
||||
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[5].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "600")
|
||||
checkBalance(t, tc, sdb, "A", 1, "500")
|
||||
checkBalance(t, tc, sdb, "B", 0, "400")
|
||||
|
||||
coordIdxs := []common.Idx{261, 262}
|
||||
log.Debug("block:0 batch:6")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[6].L2Txs)
|
||||
|
||||
@@ -1212,11 +1212,11 @@ func TestZKInputs6(t *testing.T) {
|
||||
ptOut, err := tp.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "9039235803989265562752459273677612535578150724983094202749787856042851287937", sdb.MT.Root().BigInt().String())
|
||||
assert.Equal(t, "0", sdb.MT.Root().BigInt().String())
|
||||
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||
h, err := ptOut.ZKInputs.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "16379429180374022967705349031545993941940235797391087559198349725707777217313", h.String())
|
||||
assert.Equal(t, "11185464138041166840819960504404439577014916009324100031008662249284619863031", h.String())
|
||||
|
||||
// printZKInputs(t, ptOut.ZKInputs)
|
||||
|
||||
@@ -1226,11 +1226,11 @@ func TestZKInputs6(t *testing.T) {
|
||||
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "11268490488303545450371226436237399651863451560820293060171443690124510027423", sdb.MT.Root().BigInt().String())
|
||||
assert.Equal(t, "0", sdb.MT.Root().BigInt().String())
|
||||
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||
h, err = ptOut.ZKInputs.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "7929589021941867224637424679829482351183189155476180469293857163025959492111", h.String())
|
||||
assert.Equal(t, "12631863710217571237457816324742333499903148838621785764212585803181094983889", h.String())
|
||||
|
||||
// printZKInputs(t, ptOut.ZKInputs)
|
||||
|
||||
@@ -1239,10 +1239,10 @@ func TestZKInputs6(t *testing.T) {
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
|
||||
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "4506051426679555819811005692198685182747763336038770877076710632305611650930", sdb.MT.Root().BigInt().String())
|
||||
assert.Equal(t, "1226521246017973425160735051912281623711495425744154152193517863144350256876", sdb.MT.Root().BigInt().String())
|
||||
h, err = ptOut.ZKInputs.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "4701632846207201125105176884973241543664109364248244712634276477520091620527", h.String())
|
||||
assert.Equal(t, "10907825458127261621699288732778996369331396845273565886224483543414801610880", h.String())
|
||||
|
||||
// printZKInputs(t, ptOut.ZKInputs)
|
||||
}
|
||||
|
||||
@@ -16,13 +16,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/iden3/go-merkletree/db"
|
||||
)
|
||||
|
||||
const (
|
||||
// PathCoordIdxsDB defines the path of the key-value db where the
|
||||
// CoordIdxs will be stored
|
||||
PathCoordIdxsDB = "/coordidxs"
|
||||
)
|
||||
|
||||
// txs implements the interface Sort for an array of Tx
|
||||
@@ -44,7 +37,7 @@ func (t txs) Less(i, j int) bool {
|
||||
type CoordAccount struct {
|
||||
Addr ethCommon.Address
|
||||
BJJ babyjub.PublicKeyComp
|
||||
AccountCreationAuth []byte
|
||||
AccountCreationAuth []byte // signature in byte array format
|
||||
}
|
||||
|
||||
// SelectionConfig contains the parameters of configuration of the selection of
|
||||
@@ -52,8 +45,6 @@ type CoordAccount struct {
|
||||
type SelectionConfig struct {
|
||||
// MaxL1UserTxs is the maximum L1-user-tx for a batch
|
||||
MaxL1UserTxs uint64
|
||||
// MaxL1CoordinatorTxs is the maximum L1-coordinator-tx for a batch
|
||||
MaxL1CoordinatorTxs uint64
|
||||
|
||||
// TxProcessorConfig contains the config for ProcessTxs
|
||||
TxProcessorConfig txprocessor.Config
|
||||
@@ -100,7 +91,8 @@ func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
|
||||
}
|
||||
|
||||
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
||||
return txsel.localAccountsDB.GetIdxByEthAddrBJJ(txsel.coordAccount.Addr, txsel.coordAccount.BJJ, tokenID)
|
||||
return txsel.localAccountsDB.GetIdxByEthAddrBJJ(txsel.coordAccount.Addr,
|
||||
txsel.coordAccount.BJJ, tokenID)
|
||||
}
|
||||
|
||||
// coordAccountForTokenID creates a new L1CoordinatorTx to create a new
|
||||
@@ -108,8 +100,8 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error)
|
||||
// not exist yet in the db, and does not exist a L1CoordinatorTx to creat that
|
||||
// account in the given array of L1CoordinatorTxs. If a new Coordinator account
|
||||
// needs to be created, a new L1CoordinatorTx will be returned from this
|
||||
// function.
|
||||
//nolint:unused
|
||||
// function. After calling this method, if the l1CoordinatorTx is added to the
|
||||
// selection, positionL1 must be increased 1.
|
||||
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
|
||||
// check if CoordinatorAccount for TokenID is already pending to create
|
||||
@@ -117,7 +109,7 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
|
||||
return nil, positionL1, nil
|
||||
}
|
||||
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(txsel.coordAccount.Addr, txsel.coordAccount.BJJ, tokenID)
|
||||
_, err := txsel.getCoordIdx(tokenID)
|
||||
if tracerr.Unwrap(err) == statedb.ErrIdxNotFound {
|
||||
// create L1CoordinatorTx to create new CoordAccount for
|
||||
// TokenID
|
||||
@@ -131,7 +123,6 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||
DepositAmount: big.NewInt(0),
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
}
|
||||
positionL1++
|
||||
|
||||
return &l1CoordinatorTx, positionL1, nil
|
||||
}
|
||||
@@ -152,7 +143,8 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||
// included in the next batch.
|
||||
func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig,
|
||||
batchNum common.BatchNum) ([]common.Idx, [][]byte, []common.L1Tx, []common.PoolL2Tx, error) {
|
||||
batchNum common.BatchNum) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
[]common.PoolL2Tx, error) {
|
||||
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(selectionConfig, batchNum, []common.L1Tx{})
|
||||
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, tracerr.Wrap(err)
|
||||
@@ -167,12 +159,18 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig,
|
||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||
// included in the next batch.
|
||||
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
batchNum common.BatchNum, l1Txs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
[]common.L1Tx, []common.PoolL2Tx, error) {
|
||||
// TODO WIP this method uses a 'cherry-pick' of internal calls of the
|
||||
batchNum common.BatchNum, l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte,
|
||||
[]common.L1Tx, []common.L1Tx, []common.PoolL2Tx, error) {
|
||||
// TODO input parameter 'batchNum' is not used, delete
|
||||
|
||||
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
||||
// current version is implemented in order to have a functional
|
||||
// implementation that can be used asap.
|
||||
//
|
||||
// WIP.1: this method uses a 'cherry-pick' of internal calls of the
|
||||
// StateDB, a refactor of the StateDB to reorganize it internally is
|
||||
// planned once the main functionallities are covered, with that
|
||||
// refactor the TxSelector will be updated also
|
||||
// refactor the TxSelector will be updated also.
|
||||
|
||||
// get pending l2-tx from tx-pool
|
||||
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
|
||||
@@ -184,59 +182,77 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
tp := txprocessor.NewTxProcessor(txselStateDB, selectionConfig.TxProcessorConfig)
|
||||
|
||||
// Process L1UserTxs
|
||||
for i := 0; i < len(l1Txs); i++ {
|
||||
for i := 0; i < len(l1UserTxs); i++ {
|
||||
// assumption: l1usertx are sorted by L1Tx.Position
|
||||
_, _, _, _, err := tp.ProcessL1Tx(nil, &l1Txs[i])
|
||||
_, _, _, _, err := tp.ProcessL1Tx(nil, &l1UserTxs[i])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
var l1CoordinatorTxs []common.L1Tx
|
||||
positionL1 := len(l1Txs)
|
||||
positionL1 := len(l1UserTxs)
|
||||
var accAuths [][]byte
|
||||
|
||||
// sort l2TxsRaw (cropping at MaxTx at this point)
|
||||
l2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
|
||||
|
||||
// TODO for L1CoordinatorTxs check that always len(l1UserTxs)+len(l1CoordinatorTxs)<MaxL1Txs
|
||||
l2Txs0 := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
|
||||
|
||||
noncesMap := make(map[common.Idx]common.Nonce)
|
||||
var l2Txs []common.PoolL2Tx
|
||||
// iterate over l2Txs
|
||||
// - if tx.TokenID does not exist at CoordsIdxDB
|
||||
// - create new L1CoordinatorTx, for Coordinator to receive the fee of the new TokenID
|
||||
for i := 0; i < len(l2Txs); i++ {
|
||||
// check if l2Tx.TokenID does not exist at CoordsIdxDB
|
||||
_, err = txsel.getCoordIdx(l2Txs[i].TokenID) // TODO already used inside coordAccountForTokenID, this will be removed
|
||||
if tracerr.Unwrap(err) != db.ErrNotFound {
|
||||
// if TokenID does not exist yet, create new
|
||||
// L1CoordinatorTx to create the CoordinatorAccount for
|
||||
// that TokenID, to receive the fees. Only in the case
|
||||
// that there does not exist yet a pending
|
||||
// L1CoordinatorTx to create the account for the
|
||||
// Coordinator for that TokenID
|
||||
|
||||
// TODO TMP
|
||||
// var newL1CoordTx *common.L1Tx
|
||||
// newL1CoordTx, positionL1, err =
|
||||
// txsel.coordAccountForTokenID(l1CoordinatorTxs,
|
||||
// l2TxsRaw[i].TokenID, positionL1)
|
||||
// if err != nil {
|
||||
// return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
// }
|
||||
// if newL1CoordTx != nil {
|
||||
// l1CoordinatorTxs = append(l1CoordinatorTxs, *newL1CoordTx)
|
||||
// }
|
||||
} else if err != nil {
|
||||
// - create new L1CoordinatorTx creating a CoordAccount, for
|
||||
// Coordinator to receive the fee of the new TokenID
|
||||
for i := 0; i < len(l2Txs0); i++ {
|
||||
accSender, err := tp.StateDB().GetAccount(l2Txs0[i].FromIdx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
// populate the noncesMap used at the next iteration
|
||||
noncesMap[l2Txs0[i].FromIdx] = accSender.Nonce
|
||||
|
||||
// if TokenID does not exist yet, create new L1CoordinatorTx to
|
||||
// create the CoordinatorAccount for that TokenID, to receive
|
||||
// the fees. Only in the case that there does not exist yet a
|
||||
// pending L1CoordinatorTx to create the account for the
|
||||
// Coordinator for that TokenID
|
||||
var newL1CoordTx *common.L1Tx
|
||||
newL1CoordTx, positionL1, err =
|
||||
txsel.coordAccountForTokenID(l1CoordinatorTxs,
|
||||
accSender.TokenID, positionL1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
if newL1CoordTx != nil {
|
||||
// if there is no space for the L1CoordinatorTx, discard the L2Tx
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
|
||||
// discard L2Tx
|
||||
continue
|
||||
}
|
||||
// increase positionL1
|
||||
positionL1++
|
||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *newL1CoordTx)
|
||||
accAuths = append(accAuths, txsel.coordAccount.AccountCreationAuth)
|
||||
}
|
||||
l2Txs = append(l2Txs, l2Txs0[i])
|
||||
}
|
||||
|
||||
var validTxs txs
|
||||
var validTxs []common.PoolL2Tx
|
||||
// iterate over l2TxsRaw
|
||||
// - check Nonces
|
||||
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
|
||||
// - keep used accAuths
|
||||
// - put the valid txs into validTxs array
|
||||
for i := 0; i < len(l2Txs); i++ {
|
||||
// check if Nonce is correct
|
||||
nonce := noncesMap[l2Txs[i].FromIdx]
|
||||
if l2Txs[i].Nonce == nonce {
|
||||
noncesMap[l2Txs[i].FromIdx]++
|
||||
} else {
|
||||
// not valid Nonce at tx
|
||||
continue
|
||||
}
|
||||
|
||||
// If tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB,
|
||||
// if so, tx is used. If tx.ToIdx==0, for an L2Tx will be the
|
||||
// case of TxToEthAddr or TxToBJJ, check if
|
||||
@@ -246,16 +262,20 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
// of CreateAccountAndDeposit is created. If tx.ToIdx==1, is a
|
||||
// Exit type and is used.
|
||||
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
|
||||
var accAuth *common.AccountCreationAuth
|
||||
validTxs, l1CoordinatorTxs, accAuth, positionL1, err =
|
||||
txsel.processTxToEthAddrBJJ(validTxs, l1CoordinatorTxs,
|
||||
positionL1, l2Txs[i])
|
||||
validL2Tx, l1CoordinatorTx, accAuth, err :=
|
||||
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
|
||||
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
continue
|
||||
}
|
||||
if accAuth != nil {
|
||||
if accAuth != nil && l1CoordinatorTx != nil {
|
||||
accAuths = append(accAuths, accAuth.Signature)
|
||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
||||
positionL1++
|
||||
}
|
||||
if validL2Tx != nil {
|
||||
validTxs = append(validTxs, *validL2Tx)
|
||||
}
|
||||
} else if l2Txs[i].ToIdx >= common.IdxUserThreshold {
|
||||
_, err = txsel.localAccountsDB.GetAccount(l2Txs[i].ToIdx)
|
||||
@@ -285,17 +305,23 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
|
||||
// get CoordIdxsMap for the TokenIDs
|
||||
coordIdxsMap := make(map[common.TokenID]common.Idx)
|
||||
// TODO TMP (related to L#260
|
||||
// for i := 0; i < len(l2Txs); i++ {
|
||||
// coordIdx, err := txsel.getCoordIdx(l2Txs[i].TokenID)
|
||||
// if err != nil {
|
||||
// // if err is db.ErrNotFound, should not happen, as all
|
||||
// // the l2Txs.TokenID should have a CoordinatorIdx
|
||||
// // created in the DB at this point
|
||||
// return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
// }
|
||||
// coordIdxsMap[l2Txs[i].TokenID] = coordIdx
|
||||
// }
|
||||
for i := 0; i < len(l2Txs); i++ {
|
||||
// get TokenID from tx.Sender
|
||||
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
tokenID := accSender.TokenID
|
||||
|
||||
coordIdx, err := txsel.getCoordIdx(tokenID)
|
||||
if err != nil {
|
||||
// if err is db.ErrNotFound, should not happen, as all
|
||||
// the l2Txs.TokenID should have a CoordinatorIdx
|
||||
// created in the DB at this point
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
coordIdxsMap[tokenID] = coordIdx
|
||||
}
|
||||
|
||||
var coordIdxs []common.Idx
|
||||
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
||||
@@ -303,10 +329,16 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
tp.AccumulatedFees[idx] = big.NewInt(0)
|
||||
coordIdxs = append(coordIdxs, idx)
|
||||
}
|
||||
// sort CoordIdxs
|
||||
sort.SliceStable(coordIdxs, func(i, j int) bool {
|
||||
return coordIdxs[i] < coordIdxs[j]
|
||||
})
|
||||
|
||||
// get most profitable L2-tx
|
||||
maxL2Txs := selectionConfig.TxProcessorConfig.MaxTx - uint32(len(l1CoordinatorTxs)) // - len(l1UserTxs) // TODO if there are L1UserTxs take them in to account
|
||||
selectedL2Txs := txsel.getL2Profitable(l2Txs, maxL2Txs) // TODO this will only need to crop the lasts, as are already sorted
|
||||
maxL2Txs := selectionConfig.TxProcessorConfig.MaxTx -
|
||||
uint32(len(l1UserTxs)) - uint32(len(l1CoordinatorTxs))
|
||||
|
||||
selectedL2Txs := txsel.getL2Profitable(l2Txs, maxL2Txs) // TODO this will only need to crop the lasts, as are already sorted
|
||||
for i := 0; i < len(selectedL2Txs); i++ {
|
||||
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &selectedL2Txs[i])
|
||||
if err != nil {
|
||||
@@ -314,7 +346,32 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
}
|
||||
}
|
||||
|
||||
return coordIdxs, accAuths, l1Txs, l1CoordinatorTxs, selectedL2Txs, nil
|
||||
// distribute the AccumulatedFees from the processed L2Txs into the
|
||||
// Coordinator Idxs
|
||||
for idx, accumulatedFee := range tp.AccumulatedFees {
|
||||
cmp := accumulatedFee.Cmp(big.NewInt(0))
|
||||
if cmp == 1 { // accumulatedFee>0
|
||||
// send the fee to the Idx of the Coordinator for the TokenID
|
||||
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
|
||||
if err != nil {
|
||||
log.Errorw("Can not distribute accumulated fees to coordinator account: No coord Idx to receive fee", "idx", idx)
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
|
||||
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = tp.StateDB().MakeCheckpoint()
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, selectedL2Txs, nil
|
||||
}
|
||||
|
||||
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
|
||||
@@ -323,18 +380,26 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
// CreateAccountDeposit (with 0 as DepositAmount) is created and added to the
|
||||
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
|
||||
// array.
|
||||
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs txs, l1CoordinatorTxs []common.L1Tx,
|
||||
positionL1 int, l2Tx common.PoolL2Tx) (txs, []common.L1Tx, *common.AccountCreationAuth, int, error) {
|
||||
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
||||
selectionConfig *SelectionConfig, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx,
|
||||
positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx,
|
||||
*common.AccountCreationAuth, error) {
|
||||
accSender, err := txsel.localAccountsDB.GetAccount(l2Tx.FromIdx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
l2Tx.TokenID = accSender.TokenID // TODO rm and add it before at GetL1L2TxsSelection
|
||||
|
||||
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
|
||||
// previous L2Tx in the current process already created a
|
||||
// L1CoordinatorTx of this type, in the DB there still seem that needs
|
||||
// to create a new L1CoordinatorTx, but as is already created, the tx
|
||||
// is valid
|
||||
if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
||||
validTxs = append(validTxs, l2Tx)
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, nil
|
||||
return &l2Tx, nil, nil, nil
|
||||
}
|
||||
|
||||
var l1CoordinatorTx *common.L1Tx
|
||||
var accAuth *common.AccountCreationAuth
|
||||
if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) &&
|
||||
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
|
||||
@@ -348,24 +413,22 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs txs, l1CoordinatorTxs []
|
||||
// account for ToEthAddr&ToBJJ already exist,
|
||||
// there is no need to create a new one.
|
||||
// tx valid, StateDB will use the ToIdx==0 to define the AuxToIdx
|
||||
validTxs = append(validTxs, l2Tx)
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, nil
|
||||
return &l2Tx, nil, nil, nil
|
||||
}
|
||||
// if not, check if AccountCreationAuth exist for that
|
||||
// ToEthAddr
|
||||
accAuth, err = txsel.l2db.GetAccountCreationAuth(l2Tx.ToEthAddr)
|
||||
if err != nil {
|
||||
// not found, l2Tx will not be added in the selection
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, tracerr.Wrap(fmt.Errorf("invalid L2Tx: ToIdx not found in StateDB, neither ToEthAddr found in AccountCreationAuths L2DB. ToIdx: %d, ToEthAddr: %s",
|
||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("invalid L2Tx: ToIdx not found in StateDB, neither ToEthAddr found in AccountCreationAuths L2DB. ToIdx: %d, ToEthAddr: %s",
|
||||
l2Tx.ToIdx, l2Tx.ToEthAddr.Hex()))
|
||||
}
|
||||
if accAuth.BJJ != l2Tx.ToBJJ {
|
||||
// if AccountCreationAuth.BJJ is not the same
|
||||
// than in the tx, tx is not accepted
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, tracerr.Wrap(fmt.Errorf("invalid L2Tx: ToIdx not found in StateDB, neither ToEthAddr & ToBJJ found in AccountCreationAuths L2DB. ToIdx: %d, ToEthAddr: %s, ToBJJ: %s",
|
||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("invalid L2Tx: ToIdx not found in StateDB, neither ToEthAddr & ToBJJ found in AccountCreationAuths L2DB. ToIdx: %d, ToEthAddr: %s, ToBJJ: %s",
|
||||
l2Tx.ToIdx, l2Tx.ToEthAddr.Hex(), l2Tx.ToBJJ.String()))
|
||||
}
|
||||
validTxs = append(validTxs, l2Tx)
|
||||
} else {
|
||||
// case: ToBJJ==0:
|
||||
// if idx exist for EthAddr use it
|
||||
@@ -374,20 +437,18 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs txs, l1CoordinatorTxs []
|
||||
// account for ToEthAddr already exist,
|
||||
// there is no need to create a new one.
|
||||
// tx valid, StateDB will use the ToIdx==0 to define the AuxToIdx
|
||||
validTxs = append(validTxs, l2Tx)
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, nil
|
||||
return &l2Tx, nil, nil, nil
|
||||
}
|
||||
// if not, check if AccountCreationAuth exist for that ToEthAddr
|
||||
accAuth, err = txsel.l2db.GetAccountCreationAuth(l2Tx.ToEthAddr)
|
||||
if err != nil {
|
||||
// not found, l2Tx will not be added in the selection
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, tracerr.Wrap(fmt.Errorf("invalid L2Tx: ToIdx not found in StateDB, neither ToEthAddr found in AccountCreationAuths L2DB. ToIdx: %d, ToEthAddr: %s",
|
||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("invalid L2Tx: ToIdx not found in StateDB, neither ToEthAddr found in AccountCreationAuths L2DB. ToIdx: %d, ToEthAddr: %s",
|
||||
l2Tx.ToIdx, l2Tx.ToEthAddr))
|
||||
}
|
||||
validTxs = append(validTxs, l2Tx)
|
||||
}
|
||||
// create L1CoordinatorTx for the accountCreation
|
||||
l1CoordinatorTx := common.L1Tx{
|
||||
l1CoordinatorTx = &common.L1Tx{
|
||||
Position: positionL1,
|
||||
UserOrigin: false,
|
||||
FromEthAddr: accAuth.EthAddr,
|
||||
@@ -397,9 +458,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs txs, l1CoordinatorTxs []
|
||||
DepositAmount: big.NewInt(0),
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
}
|
||||
positionL1++
|
||||
l1CoordinatorTxs = append(l1CoordinatorTxs, l1CoordinatorTx)
|
||||
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) && l2Tx.ToBJJ != common.EmptyBJJComp {
|
||||
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) &&
|
||||
l2Tx.ToBJJ != common.EmptyBJJComp {
|
||||
// if idx exist for EthAddr&BJJ use it
|
||||
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
|
||||
l2Tx.TokenID)
|
||||
@@ -407,14 +467,12 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs txs, l1CoordinatorTxs []
|
||||
// account for ToEthAddr&ToBJJ already exist, (where ToEthAddr==0xff)
|
||||
// there is no need to create a new one.
|
||||
// tx valid, StateDB will use the ToIdx==0 to define the AuxToIdx
|
||||
validTxs = append(validTxs, l2Tx)
|
||||
return validTxs, l1CoordinatorTxs, nil, positionL1, nil
|
||||
return &l2Tx, nil, nil, nil
|
||||
}
|
||||
// if idx don't exist for EthAddr&BJJ,
|
||||
// coordinator can create a new account without
|
||||
// L1Authorization, as ToEthAddr==0xff
|
||||
// if idx don't exist for EthAddr&BJJ, coordinator can create a
|
||||
// new account without L1Authorization, as ToEthAddr==0xff
|
||||
// create L1CoordinatorTx for the accountCreation
|
||||
l1CoordinatorTx := common.L1Tx{
|
||||
l1CoordinatorTx = &common.L1Tx{
|
||||
Position: positionL1,
|
||||
UserOrigin: false,
|
||||
FromEthAddr: l2Tx.ToEthAddr,
|
||||
@@ -424,11 +482,13 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs txs, l1CoordinatorTxs []
|
||||
DepositAmount: big.NewInt(0),
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
}
|
||||
positionL1++
|
||||
l1CoordinatorTxs = append(l1CoordinatorTxs, l1CoordinatorTx)
|
||||
}
|
||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
|
||||
// L2Tx discarded
|
||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx"))
|
||||
}
|
||||
|
||||
return validTxs, l1CoordinatorTxs, accAuth, positionL1, nil
|
||||
return &l2Tx, l1CoordinatorTx, accAuth, nil
|
||||
}
|
||||
|
||||
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
||||
@@ -444,21 +504,21 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
|
||||
}
|
||||
|
||||
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
|
||||
func (txsel *TxSelector) getL2Profitable(txs txs, max uint32) txs {
|
||||
sort.Sort(txs)
|
||||
if len(txs) < int(max) {
|
||||
return txs
|
||||
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx {
|
||||
sort.Sort(txs(l2Txs))
|
||||
if len(l2Txs) < int(max) {
|
||||
return l2Txs
|
||||
}
|
||||
txs = txs[:max]
|
||||
l2Txs = l2Txs[:max]
|
||||
|
||||
// sort l2Txs by Nonce. This can be done in many different ways, what
|
||||
// is needed is to output the txs where the Nonce of txs for each
|
||||
// Account is sorted, but the txs can not be grouped by sender Account
|
||||
// is needed is to output the l2Txs where the Nonce of l2Txs for each
|
||||
// Account is sorted, but the l2Txs can not be grouped by sender Account
|
||||
// neither by Fee. This is because later on the Nonces will need to be
|
||||
// sequential for the zkproof generation.
|
||||
sort.SliceStable(txs, func(i, j int) bool {
|
||||
return txs[i].Nonce < txs[j].Nonce
|
||||
sort.SliceStable(l2Txs, func(i, j int) bool {
|
||||
return l2Txs[i].Nonce < l2Txs[j].Nonce
|
||||
})
|
||||
|
||||
return txs
|
||||
return l2Txs
|
||||
}
|
||||
|
||||
@@ -1,27 +1,32 @@
|
||||
package txselector
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
ethCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/test"
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func initTest(t *testing.T, chainID uint16, testSet string) *TxSelector {
|
||||
func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address, testSet string) (*TxSelector, *til.Context) {
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
@@ -37,52 +42,87 @@ func initTest(t *testing.T, chainID uint16, testSet string) *TxSelector {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
coordAccount := &CoordAccount{ // TODO TMP
|
||||
Addr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
|
||||
BJJ: common.EmptyBJJComp,
|
||||
// coordinator keys
|
||||
var ethSk ecdsa.PrivateKey
|
||||
ethSk.D = big.NewInt(int64(1)) // only for testing
|
||||
ethSk.PublicKey.X, ethSk.PublicKey.Y = ethCrypto.S256().ScalarBaseMult(ethSk.D.Bytes())
|
||||
ethSk.Curve = ethCrypto.S256()
|
||||
addr := ethCrypto.PubkeyToAddress(ethSk.PublicKey)
|
||||
var bjj babyjub.PublicKeyComp
|
||||
err = bjj.UnmarshalText([]byte("c433f7a696b7aa3a5224efb3993baf0ccd9e92eecee0c29a3f6c8208a9e81d9e"))
|
||||
require.NoError(t, err)
|
||||
coordAccount := &CoordAccount{
|
||||
Addr: addr,
|
||||
BJJ: bjj,
|
||||
AccountCreationAuth: nil,
|
||||
}
|
||||
a := &common.AccountCreationAuth{
|
||||
EthAddr: addr,
|
||||
BJJ: bjj,
|
||||
}
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
assert.NoError(t, err)
|
||||
sig, err := ethCrypto.Sign(msg, ðSk)
|
||||
assert.NoError(t, err)
|
||||
sig[64] += 27
|
||||
coordAccount.AccountCreationAuth = sig
|
||||
|
||||
txsel, err := NewTxSelector(coordAccount, txselDir, sdb, l2DB)
|
||||
require.NoError(t, err)
|
||||
|
||||
return txsel
|
||||
test.WipeDB(txsel.l2db.DB())
|
||||
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
|
||||
return txsel, tc
|
||||
}
|
||||
|
||||
func checkBalance(t *testing.T, tc *til.Context, txsel *TxSelector, username string, tokenid int, expected string) {
|
||||
// Accounts.Idx does not match with the TxSelector tests as we are not
|
||||
// using the Til L1CoordinatorTxs (as are generated by the TxSelector
|
||||
// itself when processing the txs, so the Idxs does not match the Til
|
||||
// idxs). But the Idx is obtained through StateDB.GetIdxByEthAddrBJJ
|
||||
user := tc.Users[username]
|
||||
idx, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(user.Addr, user.BJJ.Public().Compress(), common.TokenID(tokenid))
|
||||
require.NoError(t, err)
|
||||
checkBalanceByIdx(t, txsel, idx, expected)
|
||||
}
|
||||
|
||||
func checkBalanceByIdx(t *testing.T, txsel *TxSelector, idx common.Idx, expected string) {
|
||||
acc, err := txsel.localAccountsDB.GetAccount(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, acc.Balance.String())
|
||||
}
|
||||
|
||||
func addAccCreationAuth(t *testing.T, tc *til.Context, txsel *TxSelector, chainID uint16, hermezContractAddr ethCommon.Address, username string) []byte {
|
||||
user := tc.Users[username]
|
||||
a := &common.AccountCreationAuth{
|
||||
EthAddr: user.Addr,
|
||||
BJJ: user.BJJ.Public().Compress(),
|
||||
}
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
assert.NoError(t, err)
|
||||
sig, err := ethCrypto.Sign(msg, user.EthSk)
|
||||
assert.NoError(t, err)
|
||||
sig[64] += 27
|
||||
a.Signature = sig
|
||||
|
||||
err = txsel.l2db.AddAccountCreationAuth(a)
|
||||
assert.NoError(t, err)
|
||||
return a.Signature
|
||||
}
|
||||
|
||||
func addL2Txs(t *testing.T, txsel *TxSelector, poolL2Txs []common.PoolL2Tx) {
|
||||
for i := 0; i < len(poolL2Txs); i++ {
|
||||
err := txsel.l2db.AddTxTest(&poolL2Txs[i])
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func addTokens(t *testing.T, tokens []common.Token, db *sqlx.DB) {
|
||||
hdb := historydb.NewHistoryDB(db)
|
||||
test.WipeDB(hdb.DB())
|
||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||
Num: 1,
|
||||
}))
|
||||
assert.NoError(t, hdb.AddTokens(tokens))
|
||||
}
|
||||
|
||||
func TestGetL2TxSelection(t *testing.T) {
|
||||
chainID := uint16(0)
|
||||
txsel := initTest(t, chainID, til.SetPool0)
|
||||
test.WipeDB(txsel.l2db.DB())
|
||||
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
// generate test transactions
|
||||
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
||||
assert.NoError(t, err)
|
||||
// poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPool0)
|
||||
// assert.NoError(t, err)
|
||||
|
||||
coordIdxs := make(map[common.TokenID]common.Idx)
|
||||
coordIdxs[common.TokenID(0)] = common.Idx(256)
|
||||
coordIdxs[common.TokenID(1)] = common.Idx(257)
|
||||
coordIdxs[common.TokenID(2)] = common.Idx(258)
|
||||
coordIdxs[common.TokenID(3)] = common.Idx(259)
|
||||
|
||||
// add tokens to HistoryDB to avoid breaking FK constrains
|
||||
func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
||||
var tokens []common.Token
|
||||
for i := 0; i < int(tc.LastRegisteredTokenID); i++ {
|
||||
tokens = append(tokens, common.Token{
|
||||
@@ -94,61 +134,230 @@ func TestGetL2TxSelection(t *testing.T) {
|
||||
Decimals: 18,
|
||||
})
|
||||
}
|
||||
addTokens(t, tokens, txsel.l2db.DB())
|
||||
|
||||
hdb := historydb.NewHistoryDB(db)
|
||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||
Num: 1,
|
||||
}))
|
||||
assert.NoError(t, hdb.AddTokens(tokens))
|
||||
}
|
||||
|
||||
func TestGetL2TxSelection(t *testing.T) {
|
||||
chainID := uint16(0)
|
||||
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||
txsel, tc := initTest(t, chainID, hermezContractAddr, til.SetPool0)
|
||||
|
||||
// generate test transactions, the L1CoordinatorTxs generated by Til
|
||||
// will be ignored at this test, as will be the TxSelector who
|
||||
// generates them when needed
|
||||
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// restart nonces of TilContext, as will be set by generating directly
|
||||
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||
tc.RestartNonces()
|
||||
|
||||
// add tokens to HistoryDB to avoid breaking FK constrains
|
||||
addTokens(t, tc, txsel.l2db.DB())
|
||||
|
||||
tpc := txprocessor.Config{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 64,
|
||||
NLevels: 16,
|
||||
MaxFeeTx: 10,
|
||||
MaxTx: 20,
|
||||
MaxL1Tx: 10,
|
||||
ChainID: chainID,
|
||||
}
|
||||
selectionConfig := &SelectionConfig{
|
||||
MaxL1UserTxs: 32,
|
||||
MaxL1CoordinatorTxs: 32,
|
||||
TxProcessorConfig: tpc,
|
||||
MaxL1UserTxs: 5,
|
||||
TxProcessorConfig: tpc,
|
||||
}
|
||||
txselStateDB := txsel.localAccountsDB.StateDB
|
||||
tp := txprocessor.NewTxProcessor(txselStateDB, selectionConfig.TxProcessorConfig)
|
||||
|
||||
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
|
||||
// to create the Coordinator accounts to receive the fees
|
||||
_, err = tp.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
|
||||
// coordIdxs, accAuths, l1UserTxs, l1CoordTxs, l2Txs, err
|
||||
|
||||
log.Debug("block:0 batch:0")
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, common.BatchNum(1), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(255), txsel.localAccountsDB.CurrentIdx())
|
||||
|
||||
log.Debug("block:0 batch:1")
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, common.BatchNum(2), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(255), txsel.localAccountsDB.CurrentIdx())
|
||||
|
||||
log.Debug("block:0 batch:2")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, common.BatchNum(3), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(257), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalance(t, tc, txsel, "A", 0, "500")
|
||||
checkBalance(t, tc, txsel, "C", 1, "0")
|
||||
|
||||
log.Debug("block:0 batch:3")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, common.BatchNum(4), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(258), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalance(t, tc, txsel, "A", 0, "500")
|
||||
checkBalance(t, tc, txsel, "A", 1, "500")
|
||||
checkBalance(t, tc, txsel, "C", 1, "0")
|
||||
|
||||
log.Debug("block:0 batch:4")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, common.BatchNum(5), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(258), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalance(t, tc, txsel, "A", 0, "500")
|
||||
checkBalance(t, tc, txsel, "A", 1, "500")
|
||||
checkBalance(t, tc, txsel, "C", 1, "0")
|
||||
|
||||
log.Debug("block:0 batch:5")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, common.BatchNum(6), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(259), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalance(t, tc, txsel, "A", 0, "600")
|
||||
checkBalance(t, tc, txsel, "A", 1, "500")
|
||||
checkBalance(t, tc, txsel, "B", 0, "400")
|
||||
checkBalance(t, tc, txsel, "C", 1, "0")
|
||||
|
||||
log.Debug("block:0 batch:6")
|
||||
// simulate the PoolL2Txs of the batch6
|
||||
batchPoolL2 := `
|
||||
Type: PoolL2
|
||||
PoolTransfer(1) A-B: 200 (126)
|
||||
PoolTransfer(0) B-C: 100 (126)`
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
// add AccountCreationAuths that will be used at the next batch
|
||||
accAuthSig0 := addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B")
|
||||
accAuthSig1 := addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "C")
|
||||
// add ToEthAddr for the corresponent ToIdx, and remove ToIdx for Batches[6].L2Tx
|
||||
poolL2Txs[0].ToEthAddr = tc.Users["B"].Addr
|
||||
poolL2Txs[0].ToIdx = common.Idx(0)
|
||||
poolL2Txs[1].ToEthAddr = tc.Users["C"].Addr
|
||||
poolL2Txs[1].ToIdx = common.Idx(0)
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
|
||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[1])
|
||||
assert.Equal(t, accAuthSig0, accAuths[2])
|
||||
assert.Equal(t, accAuthSig1, accAuths[3])
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
assert.Equal(t, 4, len(oL1CoordTxs))
|
||||
assert.Equal(t, 2, len(oL2Txs))
|
||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||
assert.Equal(t, common.BatchNum(7), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalanceByIdx(t, txsel, 261, "20") // CoordIdx for TokenID=1
|
||||
checkBalanceByIdx(t, txsel, 262, "10") // CoordIdx for TokenID=0
|
||||
checkBalance(t, tc, txsel, "A", 0, "600")
|
||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||
checkBalance(t, tc, txsel, "B", 0, "290")
|
||||
checkBalance(t, tc, txsel, "B", 1, "200")
|
||||
checkBalance(t, tc, txsel, "C", 0, "100")
|
||||
checkBalance(t, tc, txsel, "D", 0, "800")
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
|
||||
// add the 1st batch of transactions to the TxSelector
|
||||
addL2Txs(t, txsel, common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[0].L2Txs))
|
||||
log.Debug("block:0 batch:7")
|
||||
// simulate the PoolL2Txs of the batch6
|
||||
batchPoolL2 = `
|
||||
Type: PoolL2
|
||||
PoolTransfer(0) A-B: 100 (126)
|
||||
PoolTransfer(0) C-A: 50 (126)
|
||||
PoolTransfer(1) B-C: 100 (126)
|
||||
PoolExit(0) A: 100 (126)`
|
||||
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
|
||||
assert.Equal(t, 0, len(accAuths))
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 4, len(oL2Txs))
|
||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||
assert.Equal(t, common.BatchNum(8), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalanceByIdx(t, txsel, 261, "30")
|
||||
checkBalanceByIdx(t, txsel, 262, "35")
|
||||
checkBalance(t, tc, txsel, "A", 0, "430")
|
||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||
checkBalance(t, tc, txsel, "B", 0, "390")
|
||||
checkBalance(t, tc, txsel, "B", 1, "90")
|
||||
checkBalance(t, tc, txsel, "C", 0, "45")
|
||||
checkBalance(t, tc, txsel, "C", 1, "100")
|
||||
checkBalance(t, tc, txsel, "D", 0, "800")
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, l1CoordTxs, l2Txs, err := txsel.GetL2TxSelection(selectionConfig, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(l2Txs))
|
||||
assert.Equal(t, 0, len(l1CoordTxs))
|
||||
log.Debug("block:1 batch:0")
|
||||
// simulate the PoolL2Txs of the batch6
|
||||
batchPoolL2 = `
|
||||
Type: PoolL2
|
||||
PoolTransfer(0) D-A: 300 (126)
|
||||
PoolTransfer(0) B-D: 100 (126)
|
||||
`
|
||||
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, 0, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{262}, coordIdxs)
|
||||
assert.Equal(t, 0, len(accAuths))
|
||||
assert.Equal(t, 4, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 2, len(oL2Txs))
|
||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||
assert.Equal(t, common.BatchNum(9), txsel.localAccountsDB.CurrentBatch())
|
||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||
checkBalanceByIdx(t, txsel, 261, "30")
|
||||
checkBalanceByIdx(t, txsel, 262, "75")
|
||||
checkBalance(t, tc, txsel, "A", 0, "730")
|
||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||
checkBalance(t, tc, txsel, "B", 0, "380")
|
||||
checkBalance(t, tc, txsel, "B", 1, "90")
|
||||
checkBalance(t, tc, txsel, "C", 0, "845")
|
||||
checkBalance(t, tc, txsel, "C", 1, "100")
|
||||
checkBalance(t, tc, txsel, "D", 0, "470")
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, 0, blocks[0].Rollup.L1UserTxs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// TODO once L2DB is updated to return error in case that AddTxTest
|
||||
// fails, and the Til is updated, update this test, checking that the
|
||||
// selected PoolL2Tx are correctly sorted by Nonce
|
||||
|
||||
// TODO once L2DB is updated to store the parameter AbsoluteFee (which
|
||||
// is used by TxSelector to sort L2Txs), uncomment this next lines of
|
||||
// test, and put the expected value for
|
||||
// l2Txs[len(l2Txs)-1].AbsoluteFee, which is the Tx which has the
|
||||
// Fee==192.
|
||||
/*
|
||||
// add the 3rd batch of transactions to the TxSelector
|
||||
addL2Txs(t, txsel, common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs))
|
||||
|
||||
_, l2Txs, err = txsel.GetL2TxSelection(coordIdxs, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, tx := range l2Txs {
|
||||
fmt.Println(tx.FromIdx, tx.ToIdx, tx.AbsoluteFee)
|
||||
}
|
||||
require.Equal(t, 10, len(l2Txs))
|
||||
assert.Equal(t, float64(0), l2Txs[0].AbsoluteFee)
|
||||
|
||||
fmt.Println(l2Txs[len(l2Txs)-1].Amount)
|
||||
assert.Equal(t, float64(4), l2Txs[len(l2Txs)-1].AbsoluteFee)
|
||||
*/
|
||||
// TODO:
|
||||
// - check PoolL2Txs returned are sorted by nonce
|
||||
// - check poolL2Txs.AbsoluteFee parameters
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user