Browse Source

Merge pull request #241 from hermeznetwork/feature/statedb-tests-up

ProcessTxs fees collct update & tests, and other:
feature/sql-semaphore1
Eduard S 3 years ago
committed by GitHub
parent
commit
04276a4d2a
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 167 additions and 86 deletions
  1. +8
    -0
      common/zk.go
  2. +14
    -10
      db/statedb/txprocessors.go
  3. +113
    -60
      db/statedb/txprocessors_test.go
  4. +3
    -0
      synchronizer/synchronizer_test.go
  5. +14
    -3
      test/til/sets.go
  6. +4
    -1
      test/til/txs.go
  7. +11
    -12
      test/til/txs_test.go

+ 8
- 0
common/zk.go

@ -149,6 +149,14 @@ type ZKInputs struct {
// //
// Intermediate States to parallelize witness computation // Intermediate States to parallelize witness computation
// Note: the Intermediate States (IS) of the last transaction does not
// exist. Meaning that transaction 3 (4th) will fill the parameters
// FromIdx[3] and ISOnChain[3], but last transaction (nTx-1) will fill
// FromIdx[nTx-1] but will not fill ISOnChain. That's why IS have
// length of nTx-1, while the other parameters have length of nTx.
// Last transaction does not need intermediate state since its output
// will not be used.
// decode-tx // decode-tx
// ISOnChain indicates if tx is L1 (true) or L2 (false) // ISOnChain indicates if tx is L1 (true) or L2 (false)
ISOnChain []*big.Int // bool, len: [nTx - 1] ISOnChain []*big.Int // bool, len: [nTx - 1]

+ 14
- 10
db/statedb/txprocessors.go

@ -214,7 +214,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
} }
} }
if s.typ == TypeSynchronizer { if s.typ == TypeSynchronizer {
// return exitInfos and createdAccounts, so Synchronizer will
// return exitInfos, createdAccounts and collectedFees, so Synchronizer will
// be able to store it into HistoryDB for the concrete BatchNum // be able to store it into HistoryDB for the concrete BatchNum
return &ProcessTxOutput{ return &ProcessTxOutput{
ZKInputs: nil, ZKInputs: nil,
@ -230,6 +230,7 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
// zki.FeeIdxs = ? // TODO, this will be get from the config file // zki.FeeIdxs = ? // TODO, this will be get from the config file
tokenIDs, err := s.getTokenIDsBigInt(l1usertxs, l1coordinatortxs, l2txs) tokenIDs, err := s.getTokenIDsBigInt(l1usertxs, l1coordinatortxs, l2txs)
if err != nil { if err != nil {
log.Error(err)
return nil, err return nil, err
} }
s.zki.FeePlanTokens = tokenIDs s.zki.FeePlanTokens = tokenIDs
@ -261,8 +262,9 @@ func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []common.L1Tx, l
for i := 0; i < len(l2txs); i++ { for i := 0; i < len(l2txs); i++ {
// as L2Tx does not have parameter TokenID, get it from the // as L2Tx does not have parameter TokenID, get it from the
// AccountsDB (in the StateDB) // AccountsDB (in the StateDB)
acc, err := s.GetAccount(l2txs[i].ToIdx)
acc, err := s.GetAccount(l2txs[i].FromIdx)
if err != nil { if err != nil {
log.Errorf("ToIdx %d not found: %s", l2txs[i].ToIdx, err.Error())
return nil, err return nil, err
} }
tokenIDs[acc.TokenID] = true tokenIDs[acc.TokenID] = true
@ -297,8 +299,10 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
s.zki.FromBJJCompressed[s.i] = BJJCompressedTo256BigInts(tx.FromBJJ.Compress()) s.zki.FromBJJCompressed[s.i] = BJJCompressedTo256BigInts(tx.FromBJJ.Compress())
} }
// Intermediate States
s.zki.ISOnChain[s.i] = big.NewInt(1)
// Intermediate States, for all the transactions except for the last one
if s.i < len(s.zki.ISOnChain) { // len(s.zki.ISOnChain) == nTx
s.zki.ISOnChain[s.i] = big.NewInt(1)
}
} }
switch tx.Type { switch tx.Type {
@ -604,18 +608,18 @@ func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, coll
feeAndAmount := new(big.Int).Add(tx.Amount, fee) feeAndAmount := new(big.Int).Add(tx.Amount, fee)
accSender.Balance = new(big.Int).Sub(accSender.Balance, feeAndAmount) accSender.Balance = new(big.Int).Sub(accSender.Balance, feeAndAmount)
// send the fee to the Idx of the Coordinator for the TokenID // send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := s.GetAccount(coordIdxsMap[tx.TokenID])
accCoord, err := s.GetAccount(coordIdxsMap[accSender.TokenID])
if err != nil { if err != nil {
log.Debugw("No coord Idx to receive fee", "tx", tx) log.Debugw("No coord Idx to receive fee", "tx", tx)
} else { } else {
accCoord.Balance = new(big.Int).Add(accCoord.Balance, fee) accCoord.Balance = new(big.Int).Add(accCoord.Balance, fee)
_, err = s.UpdateAccount(coordIdxsMap[tx.TokenID], accCoord)
_, err = s.UpdateAccount(coordIdxsMap[accSender.TokenID], accCoord)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return err return err
} }
if s.typ == TypeSynchronizer { if s.typ == TypeSynchronizer {
collected := collectedFees[tx.TokenID]
collected := collectedFees[accSender.TokenID]
collected.Add(collected, fee) collected.Add(collected, fee)
} }
} }
@ -743,18 +747,18 @@ func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, collecte
acc.Balance = new(big.Int).Sub(acc.Balance, feeAndAmount) acc.Balance = new(big.Int).Sub(acc.Balance, feeAndAmount)
// send the fee to the Idx of the Coordinator for the TokenID // send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := s.GetAccount(coordIdxsMap[tx.TokenID])
accCoord, err := s.GetAccount(coordIdxsMap[acc.TokenID])
if err != nil { if err != nil {
log.Debugw("No coord Idx to receive fee", "tx", tx) log.Debugw("No coord Idx to receive fee", "tx", tx)
} else { } else {
accCoord.Balance = new(big.Int).Add(accCoord.Balance, fee) accCoord.Balance = new(big.Int).Add(accCoord.Balance, fee)
_, err = s.UpdateAccount(coordIdxsMap[tx.TokenID], accCoord)
_, err = s.UpdateAccount(coordIdxsMap[acc.TokenID], accCoord)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, false, err return nil, false, err
} }
if s.typ == TypeSynchronizer { if s.typ == TypeSynchronizer {
collected := collectedFees[tx.TokenID]
collected := collectedFees[acc.TokenID]
collected.Add(collected, fee) collected.Add(collected, fee)
} }
} }

+ 113
- 60
db/statedb/txprocessors_test.go

@ -1,6 +1,8 @@
package statedb package statedb
import ( import (
"encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
@ -13,12 +15,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TODO: Test CollectedFees output
func TestProcessTxsSynchronizer(t *testing.T) { func TestProcessTxsSynchronizer(t *testing.T) {
// TODO once TTGL is updated, use the blockchain L2Tx (not PoolL2Tx) for
// the Synchronizer tests
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err) require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir)) defer assert.Nil(t, os.RemoveAll(dir))
@ -26,7 +23,7 @@ func TestProcessTxsSynchronizer(t *testing.T) {
sdb, err := NewStateDB(dir, TypeSynchronizer, 32) sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
assert.Nil(t, err) assert.Nil(t, err)
// generate test transactions from test.SetTest0 code
// generate test transactions from test.SetBlockchain0 code
tc := til.NewContext(eth.RollupConstMaxL1UserTx) tc := til.NewContext(eth.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(til.SetBlockchain0) blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
require.Nil(t, err) require.Nil(t, err)
@ -36,7 +33,7 @@ func TestProcessTxsSynchronizer(t *testing.T) {
assert.Equal(t, 0, len(blocks[0].Batches[1].L1CoordinatorTxs)) assert.Equal(t, 0, len(blocks[0].Batches[1].L1CoordinatorTxs))
assert.Equal(t, 22, len(blocks[0].Batches[2].L2Txs)) assert.Equal(t, 22, len(blocks[0].Batches[2].L2Txs))
assert.Equal(t, 1, len(blocks[1].Batches[0].L1CoordinatorTxs)) assert.Equal(t, 1, len(blocks[1].Batches[0].L1CoordinatorTxs))
assert.Equal(t, 59, len(blocks[1].Batches[0].L2Txs))
assert.Equal(t, 62, len(blocks[1].Batches[0].L2Txs))
assert.Equal(t, 1, len(blocks[1].Batches[1].L1CoordinatorTxs)) assert.Equal(t, 1, len(blocks[1].Batches[1].L1CoordinatorTxs))
assert.Equal(t, 8, len(blocks[1].Batches[1].L2Txs)) assert.Equal(t, 8, len(blocks[1].Batches[1].L2Txs))
@ -48,54 +45,83 @@ func TestProcessTxsSynchronizer(t *testing.T) {
// Process the 1st batch, which contains the L1CoordinatorTxs necessary // Process the 1st batch, which contains the L1CoordinatorTxs necessary
// to create the Coordinator accounts to receive the fees // to create the Coordinator accounts to receive the fees
log.Debug("1st batch, 1st block, only L1CoordinatorTxs")
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil) ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, 4, len(ptOut.CreatedAccounts)) assert.Equal(t, 4, len(ptOut.CreatedAccounts))
assert.Equal(t, 0, len(ptOut.CollectedFees))
log.Debug("2nd batch, 1st block")
log.Debug("block:0 batch:1")
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs) l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs) ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, 0, len(ptOut.ExitInfos)) assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 31, len(ptOut.CreatedAccounts)) assert.Equal(t, 31, len(ptOut.CreatedAccounts))
assert.Equal(t, 4, len(ptOut.CollectedFees))
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(0)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(1)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
acc, err := sdb.GetAccount(idxA1) acc, err := sdb.GetAccount(idxA1)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, "50", acc.Balance.String()) assert.Equal(t, "50", acc.Balance.String())
log.Debug("3rd batch, 1st block")
log.Debug("block:0 batch:2")
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs) l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs) ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
// TODO once TTGL is updated, add a check that a input poolL2Tx with
// Nonce & TokenID =0, after ProcessTxs call has the expected value
assert.Equal(t, 0, len(ptOut.ExitInfos)) assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 0, len(ptOut.CreatedAccounts)) assert.Equal(t, 0, len(ptOut.CreatedAccounts))
assert.Equal(t, 4, len(ptOut.CollectedFees))
assert.Equal(t, "2", ptOut.CollectedFees[common.TokenID(0)].String())
assert.Equal(t, "1", ptOut.CollectedFees[common.TokenID(1)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
acc, err = sdb.GetAccount(idxA1) acc, err = sdb.GetAccount(idxA1)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, "28", acc.Balance.String())
assert.Equal(t, "35", acc.Balance.String())
log.Debug("1st batch, 2nd block")
log.Debug("block:1 batch:0")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[0].L2Txs) l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[0].L2Txs)
// before processing expect l2Txs[0:2].Nonce==0
assert.Equal(t, common.Nonce(0), l2Txs[0].Nonce)
assert.Equal(t, common.Nonce(0), l2Txs[1].Nonce)
assert.Equal(t, common.Nonce(0), l2Txs[2].Nonce)
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs) ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
// after processing expect l2Txs[0:2].Nonce!=0 and has expected value
assert.Equal(t, common.Nonce(6), l2Txs[0].Nonce)
assert.Equal(t, common.Nonce(7), l2Txs[1].Nonce)
assert.Equal(t, common.Nonce(8), l2Txs[2].Nonce)
assert.Equal(t, 4, len(ptOut.ExitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs assert.Equal(t, 4, len(ptOut.ExitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs
assert.Equal(t, 1, len(ptOut.CreatedAccounts)) assert.Equal(t, 1, len(ptOut.CreatedAccounts))
assert.Equal(t, 4, len(ptOut.CollectedFees))
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(0)].String())
assert.Equal(t, "1", ptOut.CollectedFees[common.TokenID(1)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
acc, err = sdb.GetAccount(idxA1) acc, err = sdb.GetAccount(idxA1)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, "53", acc.Balance.String())
assert.Equal(t, "57", acc.Balance.String())
log.Debug("2nd batch, 2nd block")
log.Debug("block:1 batch:1")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[1].L2Txs) l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[1].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs) ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, 2, len(ptOut.ExitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5' assert.Equal(t, 2, len(ptOut.ExitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5'
assert.Equal(t, 1, len(ptOut.CreatedAccounts)) assert.Equal(t, 1, len(ptOut.CreatedAccounts))
assert.Equal(t, 4, len(ptOut.CollectedFees))
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(0)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(1)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
acc, err = sdb.GetAccount(idxA1) acc, err = sdb.GetAccount(idxA1)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "78", acc.Balance.String())
assert.Equal(t, "82", acc.Balance.String())
idxB0 := tc.Users["C"].Accounts[common.TokenID(0)].Idx idxB0 := tc.Users["C"].Accounts[common.TokenID(0)].Idx
acc, err = sdb.GetAccount(idxB0) acc, err = sdb.GetAccount(idxB0)
@ -108,9 +134,6 @@ func TestProcessTxsSynchronizer(t *testing.T) {
assert.Equal(t, "2", acc.Balance.String()) assert.Equal(t, "2", acc.Balance.String())
} }
/*
WIP
func TestProcessTxsBatchBuilder(t *testing.T) { func TestProcessTxsBatchBuilder(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err) require.Nil(t, err)
@ -119,49 +142,75 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32) sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
assert.Nil(t, err) assert.Nil(t, err)
// generate test transactions from test.SetTest0 code
tc := til.NewContext()
blocks := tc.GenerateBlocks(til.SetBlockchain0)
// generate test transactions from test.SetBlockchain0 code
tc := til.NewContext(eth.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
require.Nil(t, err)
assert.Equal(t, 29, len(blocks[0].Batches[0].L1UserTxs))
assert.Equal(t, 0, len(blocks[0].Batches[0].L1CoordinatorTxs))
assert.Equal(t, 21, len(blocks[0].Batches[0].L2Txs))
assert.Equal(t, 1, len(blocks[0].Batches[1].L1UserTxs))
assert.Equal(t, 1, len(blocks[0].Batches[1].L1CoordinatorTxs))
assert.Equal(t, 59, len(blocks[0].Batches[1].L2Txs))
assert.Equal(t, 9, len(blocks[0].Batches[2].L1UserTxs))
assert.Equal(t, 0, len(blocks[0].Batches[2].L1CoordinatorTxs))
assert.Equal(t, 8, len(blocks[0].Batches[2].L2Txs))
// Coordinator Idx where to send the fees
coordIdxs := []common.Idx{256, 257, 258, 259}
// Idx of user 'A' // Idx of user 'A'
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
// use first batch
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[0].L2Txs)
_, exitInfos, err := sdb.ProcessTxs(coordIdxs, blocks[0].Batches[0].L1UserTxs, blocks[0].Batches[0].L1CoordinatorTxs, l2Txs)
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
// to create the Coordinator accounts to receive the fees
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, 0, len(exitInfos))
// expect 0 at CreatedAccount, as is only computed when StateDB.Type==TypeSynchronizer
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
log.Debug("block:0 batch:1")
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
acc, err := sdb.GetAccount(idxA1) acc, err := sdb.GetAccount(idxA1)
assert.Nil(t, err)
assert.Equal(t, "28", acc.Balance.String())
require.Nil(t, err)
assert.Equal(t, "50", acc.Balance.String())
// use second batch
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs)
_, exitInfos, err = sdb.ProcessTxs(coordIdxs, blocks[0].Batches[1].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs)
log.Debug("block:0 batch:2")
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, 5, len(exitInfos))
assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
acc, err = sdb.GetAccount(idxA1) acc, err = sdb.GetAccount(idxA1)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, "48", acc.Balance.String())
assert.Equal(t, "35", acc.Balance.String())
// use third batch
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Batches[2].L2Txs)
_, exitInfos, err = sdb.ProcessTxs(coordIdxs, blocks[0].Batches[2].L1UserTxs, blocks[0].Batches[2].L1CoordinatorTxs, l2Txs)
log.Debug("block:1 batch:0")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[0].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Batches[0].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
acc, err = sdb.GetAccount(idxA1)
require.Nil(t, err)
assert.Equal(t, "57", acc.Balance.String())
log.Debug("block:1 batch:1")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Batches[1].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, blocks[1].L1UserTxs, blocks[1].Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, 1, len(exitInfos))
acc, err = sdb.GetAccount(idxA1) acc, err = sdb.GetAccount(idxA1)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "23", acc.Balance.String())
assert.Equal(t, "82", acc.Balance.String())
idxB0 := tc.Users["C"].Accounts[common.TokenID(0)].Idx
acc, err = sdb.GetAccount(idxB0)
require.Nil(t, err)
assert.Equal(t, "51", acc.Balance.String())
// get balance of Coordinator account for TokenID==0
acc, err = sdb.GetAccount(common.Idx(256))
require.Nil(t, err)
assert.Equal(t, common.TokenID(0), acc.TokenID)
assert.Equal(t, "2", acc.Balance.String())
acc, err = sdb.GetAccount(common.Idx(257))
require.Nil(t, err)
assert.Equal(t, common.TokenID(1), acc.TokenID)
assert.Equal(t, "2", acc.Balance.String())
} }
func TestZKInputsGeneration(t *testing.T) { func TestZKInputsGeneration(t *testing.T) {
@ -172,22 +221,26 @@ func TestZKInputsGeneration(t *testing.T) {
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32) sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
assert.Nil(t, err) assert.Nil(t, err)
// generate test transactions from test.SetTest0 code
tc := til.NewContext()
blocks := tc.GenerateBlocks(til.SetBlockchain0)
assert.Equal(t, 29, len(blocks[0].Batches[0].L1UserTxs))
assert.Equal(t, 0, len(blocks[0].Batches[0].L1CoordinatorTxs))
assert.Equal(t, 21, len(blocks[0].Batches[0].L2Txs))
// generate test transactions from test.SetBlockchain0 code
tc := til.NewContext(eth.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
require.Nil(t, err)
// Coordinator Idx where to send the fees
coordIdxs := []common.Idx{256, 257, 258, 259}
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[0].L2Txs)
zki, _, err := sdb.ProcessTxs(coordIdxs, blocks[0].Batches[0].L1UserTxs, blocks[0].Batches[0].L1CoordinatorTxs, l2Txs)
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
_, err = sdb.ProcessTxs(nil, nil, blocks[0].Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err)
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Batches[1].L2Txs)
ptOut, err := sdb.ProcessTxs(coordIdxs, blocks[0].L1UserTxs, blocks[0].Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err) require.Nil(t, err)
s, err := json.Marshal(zki)
s, err := json.Marshal(ptOut.ZKInputs)
require.Nil(t, err) require.Nil(t, err)
debug:=true
debug := false
if debug { if debug {
fmt.Println(string(s)) fmt.Println(string(s))
} }
} }
*/

+ 3
- 0
synchronizer/synchronizer_test.go

@ -24,6 +24,7 @@ import (
var tokenConsts = map[common.TokenID]eth.ERC20Consts{} var tokenConsts = map[common.TokenID]eth.ERC20Consts{}
var forceExits = map[int64][]common.ExitInfo{} // ForgeL1TxsNum -> []exit var forceExits = map[int64][]common.ExitInfo{} // ForgeL1TxsNum -> []exit
var nonces = map[common.Idx]common.Nonce{}
type timer struct { type timer struct {
time int64 time int64
@ -441,6 +442,8 @@ func TestSync(t *testing.T) {
tx := &batch.L2Txs[k] tx := &batch.L2Txs[k]
tx.Position = position tx.Position = position
position++ position++
nonces[tx.FromIdx]++
tx.Nonce = nonces[tx.FromIdx]
nTx, err := common.NewL2Tx(tx) nTx, err := common.NewL2Tx(tx)
require.Nil(t, err) require.Nil(t, err)
*tx = *nTx *tx = *nTx

+ 14
- 3
test/til/sets.go

@ -10,6 +10,8 @@ AddToken(1)
AddToken(2) AddToken(2)
AddToken(3) AddToken(3)
// block:0 batch:0
// Coordinator accounts, Idxs: 256, 257, 258, 259 // Coordinator accounts, Idxs: 256, 257, 258, 259
CreateAccountCoordinator(0) Coord CreateAccountCoordinator(0) Coord
CreateAccountCoordinator(1) Coord CreateAccountCoordinator(1) Coord
@ -17,11 +19,12 @@ CreateAccountCoordinator(2) Coord
CreateAccountCoordinator(3) Coord CreateAccountCoordinator(3) Coord
> batch > batch
// block:0 batch:1
// deposits TokenID: 1 // deposits TokenID: 1
CreateAccountDeposit(1) A: 50 CreateAccountDeposit(1) A: 50
CreateAccountDeposit(1) B: 5 CreateAccountDeposit(1) B: 5
CreateAccountDeposit(1) C: 20
CreateAccountDeposit(1) C: 200
CreateAccountDeposit(1) D: 25 CreateAccountDeposit(1) D: 25
CreateAccountDeposit(1) E: 25 CreateAccountDeposit(1) E: 25
CreateAccountDeposit(1) F: 25 CreateAccountDeposit(1) F: 25
@ -55,6 +58,7 @@ CreateAccountDeposit(0) B: 10000
CreateAccountDeposit(0) C: 1 CreateAccountDeposit(0) C: 1
> batchL1 > batchL1
// block:0 batch:2
// transactions TokenID: 1 // transactions TokenID: 1
Transfer(1) A-B: 5 (1) Transfer(1) A-B: 5 (1)
@ -63,7 +67,7 @@ Transfer(1) A-M: 5 (1)
Transfer(1) A-N: 5 (1) Transfer(1) A-N: 5 (1)
Transfer(1) A-O: 5 (1) Transfer(1) A-O: 5 (1)
Transfer(1) B-C: 3 (1) Transfer(1) B-C: 3 (1)
Transfer(1) C-A: 3 (255)
Transfer(1) C-A: 10 (200)
Transfer(1) D-A: 5 (1) Transfer(1) D-A: 5 (1)
Transfer(1) D-Z: 5 (1) Transfer(1) D-Z: 5 (1)
Transfer(1) D-Y: 5 (1) Transfer(1) D-Y: 5 (1)
@ -82,9 +86,15 @@ Transfer(0) B-C: 50 (192)
> batchL1 > batchL1
> block > block
// block:1 batch:0
// A (3) still does not exist, coordinator should create new L1Tx to create the account // A (3) still does not exist, coordinator should create new L1Tx to create the account
CreateAccountCoordinator(3) A CreateAccountCoordinator(3) A
Transfer(1) A-B: 1 (1)
Transfer(1) A-B: 1 (1)
Transfer(1) A-B: 1 (1)
Transfer(3) B-A: 5 (1) Transfer(3) B-A: 5 (1)
Transfer(2) A-B: 5 (1) Transfer(2) A-B: 5 (1)
Transfer(1) I-K: 3 (1) Transfer(1) I-K: 3 (1)
@ -132,7 +142,7 @@ Transfer(1) W-J: 3 (1)
Transfer(1) W-A: 5 (1) Transfer(1) W-A: 5 (1)
Transfer(1) W-Z: 5 (1) Transfer(1) W-Z: 5 (1)
Transfer(1) X-B: 5 (1) Transfer(1) X-B: 5 (1)
Transfer(1) X-C: 5 (50)
Transfer(1) X-C: 10 (200)
Transfer(1) X-D: 5 (1) Transfer(1) X-D: 5 (1)
Transfer(1) X-E: 5 (1) Transfer(1) X-E: 5 (1)
Transfer(1) Y-B: 5 (1) Transfer(1) Y-B: 5 (1)
@ -148,6 +158,7 @@ Exit(1) Y: 5 (1)
Exit(1) Z: 5 (1) Exit(1) Z: 5 (1)
> batch > batch
// block:1 batch:1
Deposit(1) A: 50 Deposit(1) A: 50
Deposit(1) B: 5 Deposit(1) B: 5

+ 4
- 1
test/til/txs.go

@ -385,7 +385,10 @@ func (tc *Context) setIdxs() error {
} }
} }
tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Nonce++ tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Nonce++
testTx.L2Tx.Nonce = tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Nonce
// next line is commented to avoid Blockchain L2Txs to have
// Nonce different from 0, as from Blockchain those
// transactions will come without Nonce
// testTx.L2Tx.Nonce = tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Nonce
// set real Idx // set real Idx
testTx.L2Tx.FromIdx = tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Idx testTx.L2Tx.FromIdx = tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Idx

+ 11
- 12
test/til/txs_test.go

@ -110,36 +110,36 @@ func TestGenerateBlocks(t *testing.T) {
// // #4: CreateAccountDeposit(1) D: 5 // // #4: CreateAccountDeposit(1) D: 5
tc.checkL1TxParams(t, blocks[0].L1UserTxs[4], common.TxTypeCreateAccountDeposit, 1, "D", "", big.NewInt(5), nil) tc.checkL1TxParams(t, blocks[0].L1UserTxs[4], common.TxTypeCreateAccountDeposit, 1, "D", "", big.NewInt(5), nil)
// #5: Transfer(1) A-B: 6 (1) // #5: Transfer(1) A-B: 6 (1)
tc.checkL2TxParams(t, blocks[0].Batches[2].L2Txs[0], common.TxTypeTransfer, 1, "A", "B", big.NewInt(6), common.BatchNum(3), common.Nonce(1))
tc.checkL2TxParams(t, blocks[0].Batches[2].L2Txs[0], common.TxTypeTransfer, 1, "A", "B", big.NewInt(6), common.BatchNum(3))
// #6: Transfer(1) B-D: 3 (1) // #6: Transfer(1) B-D: 3 (1)
tc.checkL2TxParams(t, blocks[0].Batches[2].L2Txs[1], common.TxTypeTransfer, 1, "B", "D", big.NewInt(3), common.BatchNum(3), common.Nonce(1))
tc.checkL2TxParams(t, blocks[0].Batches[2].L2Txs[1], common.TxTypeTransfer, 1, "B", "D", big.NewInt(3), common.BatchNum(3))
// #7: Transfer(1) A-D: 1 (1) // #7: Transfer(1) A-D: 1 (1)
tc.checkL2TxParams(t, blocks[0].Batches[2].L2Txs[2], common.TxTypeTransfer, 1, "A", "D", big.NewInt(1), common.BatchNum(3), common.Nonce(2))
tc.checkL2TxParams(t, blocks[0].Batches[2].L2Txs[2], common.TxTypeTransfer, 1, "A", "D", big.NewInt(1), common.BatchNum(3))
// change of Batch // change of Batch
// #8: CreateAccountDepositTransfer(1) F-A: 15, 10 (3) // #8: CreateAccountDepositTransfer(1) F-A: 15, 10 (3)
tc.checkL1TxParams(t, blocks[0].L1UserTxs[5], common.TxTypeCreateAccountDepositTransfer, 1, "F", "A", big.NewInt(15), big.NewInt(10)) tc.checkL1TxParams(t, blocks[0].L1UserTxs[5], common.TxTypeCreateAccountDepositTransfer, 1, "F", "A", big.NewInt(15), big.NewInt(10))
// #9: DepositTransfer(1) A-B: 15, 10 (1) // #9: DepositTransfer(1) A-B: 15, 10 (1)
tc.checkL1TxParams(t, blocks[0].L1UserTxs[6], common.TxTypeDepositTransfer, 1, "A", "B", big.NewInt(15), big.NewInt(10)) tc.checkL1TxParams(t, blocks[0].L1UserTxs[6], common.TxTypeDepositTransfer, 1, "A", "B", big.NewInt(15), big.NewInt(10))
// #11: Transfer(1) C-A : 3 (1) // #11: Transfer(1) C-A : 3 (1)
tc.checkL2TxParams(t, blocks[0].Batches[3].L2Txs[0], common.TxTypeTransfer, 1, "C", "A", big.NewInt(3), common.BatchNum(4), common.Nonce(1))
tc.checkL2TxParams(t, blocks[0].Batches[3].L2Txs[0], common.TxTypeTransfer, 1, "C", "A", big.NewInt(3), common.BatchNum(4))
// #12: Transfer(2) A-B: 15 (1) // #12: Transfer(2) A-B: 15 (1)
tc.checkL2TxParams(t, blocks[0].Batches[3].L2Txs[1], common.TxTypeTransfer, 2, "A", "B", big.NewInt(15), common.BatchNum(4), common.Nonce(1))
tc.checkL2TxParams(t, blocks[0].Batches[3].L2Txs[1], common.TxTypeTransfer, 2, "A", "B", big.NewInt(15), common.BatchNum(4))
// #13: Deposit(1) User0: 20 // #13: Deposit(1) User0: 20
tc.checkL1TxParams(t, blocks[0].L1UserTxs[7], common.TxTypeCreateAccountDeposit, 1, "User0", "", big.NewInt(20), nil) tc.checkL1TxParams(t, blocks[0].L1UserTxs[7], common.TxTypeCreateAccountDeposit, 1, "User0", "", big.NewInt(20), nil)
// // #14: Deposit(3) User1: 20 // // #14: Deposit(3) User1: 20
tc.checkL1TxParams(t, blocks[0].L1UserTxs[8], common.TxTypeCreateAccountDeposit, 3, "User1", "", big.NewInt(20), nil) tc.checkL1TxParams(t, blocks[0].L1UserTxs[8], common.TxTypeCreateAccountDeposit, 3, "User1", "", big.NewInt(20), nil)
// #15: Transfer(1) User0-User1: 15 (1) // #15: Transfer(1) User0-User1: 15 (1)
tc.checkL2TxParams(t, blocks[0].Batches[4].L2Txs[0], common.TxTypeTransfer, 1, "User0", "User1", big.NewInt(15), common.BatchNum(5), common.Nonce(1))
tc.checkL2TxParams(t, blocks[0].Batches[4].L2Txs[0], common.TxTypeTransfer, 1, "User0", "User1", big.NewInt(15), common.BatchNum(5))
// #16: Transfer(3) User1-User0: 15 (1) // #16: Transfer(3) User1-User0: 15 (1)
tc.checkL2TxParams(t, blocks[0].Batches[4].L2Txs[1], common.TxTypeTransfer, 3, "User1", "User0", big.NewInt(15), common.BatchNum(5), common.Nonce(1))
tc.checkL2TxParams(t, blocks[0].Batches[4].L2Txs[1], common.TxTypeTransfer, 3, "User1", "User0", big.NewInt(15), common.BatchNum(5))
// #17: Transfer(1) A-C: 1 (1) // #17: Transfer(1) A-C: 1 (1)
tc.checkL2TxParams(t, blocks[0].Batches[4].L2Txs[2], common.TxTypeTransfer, 1, "A", "C", big.NewInt(1), common.BatchNum(5), common.Nonce(4))
tc.checkL2TxParams(t, blocks[0].Batches[4].L2Txs[2], common.TxTypeTransfer, 1, "A", "C", big.NewInt(1), common.BatchNum(5))
// change of Batch // change of Batch
// #18: Transfer(1) User1-User0: 1 (1) // #18: Transfer(1) User1-User0: 1 (1)
tc.checkL2TxParams(t, blocks[1].Batches[0].L2Txs[0], common.TxTypeTransfer, 1, "User1", "User0", big.NewInt(1), common.BatchNum(6), common.Nonce(1))
tc.checkL2TxParams(t, blocks[1].Batches[0].L2Txs[0], common.TxTypeTransfer, 1, "User1", "User0", big.NewInt(1), common.BatchNum(6))
// change of Block (implies also a change of batch) // change of Block (implies also a change of batch)
// #19: Transfer(1) A-B: 1 (1) // #19: Transfer(1) A-B: 1 (1)
tc.checkL2TxParams(t, blocks[1].Batches[0].L2Txs[1], common.TxTypeTransfer, 1, "A", "B", big.NewInt(1), common.BatchNum(6), common.Nonce(5))
tc.checkL2TxParams(t, blocks[1].Batches[0].L2Txs[1], common.TxTypeTransfer, 1, "A", "B", big.NewInt(1), common.BatchNum(6))
} }
func (tc *Context) checkL1TxParams(t *testing.T, tx common.L1Tx, typ common.TxType, tokenID common.TokenID, from, to string, loadAmount, amount *big.Int) { func (tc *Context) checkL1TxParams(t *testing.T, tx common.L1Tx, typ common.TxType, tokenID common.TokenID, from, to string, loadAmount, amount *big.Int) {
@ -160,7 +160,7 @@ func (tc *Context) checkL1TxParams(t *testing.T, tx common.L1Tx, typ common.TxTy
} }
} }
func (tc *Context) checkL2TxParams(t *testing.T, tx common.L2Tx, typ common.TxType, tokenID common.TokenID, from, to string, amount *big.Int, batchNum common.BatchNum, nonce common.Nonce) {
func (tc *Context) checkL2TxParams(t *testing.T, tx common.L2Tx, typ common.TxType, tokenID common.TokenID, from, to string, amount *big.Int, batchNum common.BatchNum) {
assert.Equal(t, typ, tx.Type) assert.Equal(t, typ, tx.Type)
assert.Equal(t, tc.Users[from].Accounts[tokenID].Idx, tx.FromIdx) assert.Equal(t, tc.Users[from].Accounts[tokenID].Idx, tx.FromIdx)
if tx.Type != common.TxTypeExit { if tx.Type != common.TxTypeExit {
@ -170,7 +170,6 @@ func (tc *Context) checkL2TxParams(t *testing.T, tx common.L2Tx, typ common.TxTy
assert.Equal(t, amount, tx.Amount) assert.Equal(t, amount, tx.Amount)
} }
assert.Equal(t, batchNum, tx.BatchNum) assert.Equal(t, batchNum, tx.BatchNum)
assert.Equal(t, nonce, tx.Nonce)
} }
func TestGeneratePoolL2Txs(t *testing.T) { func TestGeneratePoolL2Txs(t *testing.T) {

Loading…
Cancel
Save