Test L2Txs, ExitTree in synchronizer

This commit is contained in:
Eduard S
2020-10-27 12:49:07 +01:00
parent c1b9b0be90
commit 558de2737e
8 changed files with 401 additions and 169 deletions

View File

@@ -3,7 +3,6 @@ package synchronizer
import (
"context"
"database/sql"
"fmt"
"github.com/ethereum/go-ethereum"
"github.com/hermeznetwork/hermez-node/common"
@@ -344,7 +343,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
}
batchNum := common.BatchNum(evtForgeBatch.BatchNum)
nextForgeL1TxsNumCpy := nextForgeL1TxsNum
var l1UserTxs []common.L1Tx
// Check if this is a L1Batch to get L1 Tx from it
if forgeBatchArgs.L1Batch {
@@ -353,28 +351,22 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
// that stateDB can process them.
// First try to find them in HistoryDB.
l1UserTxs, err := s.historyDB.GetL1UserTxs(nextForgeL1TxsNumCpy)
if len(l1UserTxs) == 0 {
// If not found in the DB, try to find them in
// this block. This could happen because in a
// block there could be multiple batches with
// L1Batch = true (although it's a very rare
// case).
// If not found in the DB and the block doesn't
// contain the l1UserTxs, it means that the
// L1UserTxs queue with toForgeL1TxsNum was
// closed empty, so we leave `l1UserTxs` as an
// empty slice.
for _, l1UserTx := range rollupData.l1UserTxs {
if *l1UserTx.ToForgeL1TxsNum == nextForgeL1TxsNumCpy {
l1UserTxs = append(l1UserTxs, l1UserTx)
}
}
}
l1UserTxs, err = s.historyDB.GetL1UserTxs(nextForgeL1TxsNum)
if err != nil {
return nil, err
}
nextForgeL1TxsNum++
// Apart from the DB, try to find them in this block.
// This could happen because in a block there could be
// multiple batches with L1Batch = true (although it's
// a very rare case). If not found in the DB and the
// block doesn't contain the l1UserTxs, it means that
// the L1UserTxs queue with toForgeL1TxsNum was closed
// empty, so we leave `l1UserTxs` as an empty slice.
for _, l1UserTx := range rollupData.l1UserTxs {
if *l1UserTx.ToForgeL1TxsNum == nextForgeL1TxsNum {
l1UserTxs = append(l1UserTxs, l1UserTx)
}
}
position = len(l1UserTxs)
}
@@ -393,7 +385,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
batchData.L1CoordinatorTxs = append(batchData.L1CoordinatorTxs, *l1Tx)
position++
fmt.Println("DGB l1coordtx")
// fmt.Println("DGB l1coordtx")
}
// Insert all the txs forged in this batch (l1UserTxs,
@@ -401,7 +393,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
// processed.
poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2TxsData) // TODO: This is a big ugly, find a better way
// TODO: Get createdAccounts from ProcessTxs()
// TODO: Get CollectedFees from ProcessTxs()
// TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs()
// ProcessTxs updates poolL2Txs adding: Nonce, TokenID
@@ -410,26 +401,37 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
return nil, err
}
// Set batchNum in exits
for i := range ptOut.ExitInfos {
exit := &ptOut.ExitInfos[i]
exit.BatchNum = batchNum
}
batchData.ExitTree = ptOut.ExitInfos
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // TODO: This is a big uggly, find a better way
if err != nil {
return nil, err
}
for i := range l2Txs {
_l2Tx := l2Txs[i]
_l2Tx.Position = position
_l2Tx.EthBlockNum = blockNum
_l2Tx.BatchNum = batchNum
l2Tx, err := common.NewL2Tx(&_l2Tx)
tx := &l2Txs[i]
tx.Position = position
tx.EthBlockNum = blockNum
tx.BatchNum = batchNum
nTx, err := common.NewL2Tx(tx)
if err != nil {
return nil, err
}
batchData.L2Txs = append(batchData.L2Txs, *l2Tx)
batchData.L2Txs = append(batchData.L2Txs, *nTx)
position++
}
batchData.ExitTree = ptOut.ExitInfos
for i := range ptOut.CreatedAccounts {
createdAccount := &ptOut.CreatedAccounts[i]
createdAccount.BatchNum = batchNum
}
batchData.CreatedAccounts = ptOut.CreatedAccounts
slotNum := int64(0)
if ethBlock.EthBlockNum >= s.auctionConstants.GenesisBlockNum {
@@ -449,9 +451,11 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*rollupData, error) {
ExitRoot: forgeBatchArgs.NewExitRoot,
SlotNum: slotNum,
}
nextForgeL1TxsNumCpy := nextForgeL1TxsNum
if forgeBatchArgs.L1Batch {
batch.ForgeL1TxsNum = &nextForgeL1TxsNumCpy
batchData.L1Batch = true
nextForgeL1TxsNum++
}
batchData.Batch = batch
rollupData.batches = append(rollupData.batches, *batchData)

View File

@@ -2,6 +2,7 @@ package synchronizer
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
@@ -21,6 +22,9 @@ import (
"github.com/stretchr/testify/require"
)
var tokenConsts = map[common.TokenID]eth.ERC20Consts{}
var forceExits = map[int64][]common.ExitInfo{} // ForgeL1TxsNum -> []exit
type timer struct {
time int64
}
@@ -31,6 +35,167 @@ func (t *timer) Time() int64 {
return currentTime
}
// Check Sync output and HistoryDB state against expected values generated by
// til
func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBlock *common.BlockData) {
// Check Blocks
dbBlocks, err := s.historyDB.GetAllBlocks()
require.Nil(t, err)
assert.Equal(t, blockNum, len(dbBlocks))
assert.Equal(t, int64(blockNum), dbBlocks[blockNum-1].EthBlockNum)
assert.NotEqual(t, dbBlocks[blockNum-1].Hash, dbBlocks[blockNum-2].Hash)
assert.Greater(t, dbBlocks[blockNum-1].Timestamp.Unix(), dbBlocks[blockNum-2].Timestamp.Unix())
// Check Tokens
assert.Equal(t, len(block.AddedTokens), len(syncBlock.AddedTokens))
dbTokens, err := s.historyDB.GetAllTokens()
require.Nil(t, err)
for i, token := range block.AddedTokens {
dbToken := dbTokens[i]
syncToken := syncBlock.AddedTokens[i]
assert.Equal(t, block.Block.EthBlockNum, syncToken.EthBlockNum)
assert.Equal(t, token.TokenID, syncToken.TokenID)
assert.Equal(t, token.EthAddr, syncToken.EthAddr)
tokenConst := tokenConsts[token.TokenID]
assert.Equal(t, tokenConst.Name, syncToken.Name)
assert.Equal(t, tokenConst.Symbol, syncToken.Symbol)
assert.Equal(t, tokenConst.Decimals, syncToken.Decimals)
var tokenCpy historydb.TokenWithUSD
//nolint:gosec
require.Nil(t, copier.Copy(&tokenCpy, &token)) // copy common.Token to historydb.TokenWithUSD
require.Nil(t, copier.Copy(&tokenCpy, &tokenConst)) // copy common.Token to historydb.TokenWithUSD
tokenCpy.ItemID = dbToken.ItemID // we don't care about ItemID
assert.Equal(t, tokenCpy, dbToken)
}
// Check L1UserTxs
assert.Equal(t, len(block.L1UserTxs), len(syncBlock.L1UserTxs))
dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs()
require.Nil(t, err)
// Ignore BatchNum in syncBlock.L1UserTxs because this value is set by the HistoryDB
for i := range syncBlock.L1UserTxs {
syncBlock.L1UserTxs[i].BatchNum = block.L1UserTxs[i].BatchNum
}
assert.Equal(t, block.L1UserTxs, syncBlock.L1UserTxs)
for _, tx := range block.L1UserTxs {
var dbTx *common.L1Tx
// Find tx in DB output
for _, _dbTx := range dbL1UserTxs {
if *tx.ToForgeL1TxsNum == *_dbTx.ToForgeL1TxsNum &&
tx.Position == _dbTx.Position {
dbTx = new(common.L1Tx)
*dbTx = _dbTx
break
}
}
assert.Equal(t, &tx, dbTx) //nolint:gosec
}
// Check Batches
assert.Equal(t, len(block.Batches), len(syncBlock.Batches))
dbBatches, err := s.historyDB.GetAllBatches()
require.Nil(t, err)
dbL1CoordinatorTxs, err := s.historyDB.GetAllL1CoordinatorTxs()
require.Nil(t, err)
// fmt.Printf("DBG dbL1CoordinatorTxs: %+v\n", dbL1CoordinatorTxs)
dbL2Txs, err := s.historyDB.GetAllL2Txs()
require.Nil(t, err)
// fmt.Printf("DBG dbL2Txs: %+v\n", dbL2Txs)
dbExits, err := s.historyDB.GetAllExits()
require.Nil(t, err)
// dbL1CoordinatorTxs := []common.L1Tx{}
for i, batch := range block.Batches {
var dbBatch *common.Batch
// Find batch in DB output
for _, _dbBatch := range dbBatches {
if batch.Batch.BatchNum == _dbBatch.BatchNum {
dbBatch = new(common.Batch)
*dbBatch = _dbBatch
break
}
}
syncBatch := syncBlock.Batches[i]
// We don't care about TotalFeesUSD. Use the syncBatch that
// has a TotalFeesUSD inserted by the HistoryDB
batch.Batch.TotalFeesUSD = syncBatch.Batch.TotalFeesUSD
batch.CreatedAccounts = syncBatch.CreatedAccounts // til doesn't output CreatedAccounts
// Test field by field to facilitate debugging of errors
assert.Equal(t, batch.L1CoordinatorTxs, syncBatch.L1CoordinatorTxs)
assert.Equal(t, batch.L2Txs, syncBatch.L2Txs)
// In exit tree, we only check AccountIdx and Balance, because
// it's what we have precomputed before.
for j := range batch.ExitTree {
exit := &batch.ExitTree[j]
assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx)
assert.Equal(t, exit.Balance, syncBatch.ExitTree[j].Balance)
*exit = syncBatch.ExitTree[j]
}
assert.Equal(t, batch.Batch, syncBatch.Batch)
assert.Equal(t, batch, syncBatch)
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
// Check L1CoordinatorTxs from DB
for _, tx := range batch.L1CoordinatorTxs {
var dbTx *common.L1Tx
// Find tx in DB output
for _, _dbTx := range dbL1CoordinatorTxs {
if *tx.BatchNum == *_dbTx.BatchNum &&
tx.Position == _dbTx.Position {
dbTx = new(common.L1Tx)
*dbTx = _dbTx
break
}
}
assert.Equal(t, &tx, dbTx) //nolint:gosec
}
// Check L2Txs from DB
for _, tx := range batch.L2Txs {
var dbTx *common.L2Tx
// Find tx in DB output
for _, _dbTx := range dbL2Txs {
if tx.BatchNum == _dbTx.BatchNum &&
tx.Position == _dbTx.Position {
dbTx = new(common.L2Tx)
*dbTx = _dbTx
break
}
}
assert.Equal(t, &tx, dbTx) //nolint:gosec
}
// Check Exits from DB
for _, exit := range batch.ExitTree {
var dbExit *common.ExitInfo
// Find exit in DB output
for _, _dbExit := range dbExits {
if exit.BatchNum == _dbExit.BatchNum &&
exit.AccountIdx == _dbExit.AccountIdx {
dbExit = new(common.ExitInfo)
*dbExit = _dbExit
break
}
}
// Compare MerkleProof in JSON because unmarshaled 0
// big.Int leaves the internal big.Int array at nil,
// and gives trouble when comparing big.Int with
// internal big.Int array != nil but empty.
mtp, err := json.Marshal(exit.MerkleProof)
require.Nil(t, err)
dbMtp, err := json.Marshal(dbExit.MerkleProof)
require.Nil(t, err)
assert.Equal(t, mtp, dbMtp)
dbExit.MerkleProof = exit.MerkleProof
assert.Equal(t, &exit, dbExit) //nolint:gosec
}
}
}
func TestSync(t *testing.T) {
//
// Setup
@@ -89,6 +254,10 @@ func TestSync(t *testing.T) {
// Generate blockchain and smart contract data, and fill the test smart contracts
//
// TODO: Test CreateAccountDepositTransfer
// TODO: Test ForceTransfer
// TODO: Test ForceExit
// Generate blockchain data with til
set1 := `
Type: Blockchain
@@ -97,28 +266,44 @@ func TestSync(t *testing.T) {
AddToken(2)
AddToken(3)
CreateAccountDeposit(1) A: 20 // Idx=256+1
CreateAccountDeposit(2) A: 20 // Idx=256+2
CreateAccountDeposit(1) B: 5 // Idx=256+3
CreateAccountDeposit(1) C: 5 // Idx=256+4
CreateAccountDeposit(1) D: 5 // Idx=256+5
CreateAccountDeposit(1) C: 20 // Idx=256+2=258
CreateAccountDeposit(2) A: 20 // Idx=256+3=259
CreateAccountDeposit(1) D: 5 // Idx=256+4=260
CreateAccountDeposit(2) B: 5 // Idx=256+5=261
CreateAccountDeposit(2) C: 5 // Idx=256+6=262
CreateAccountCoordinator(2) B // Idx=256+0
CreateAccountCoordinator(1) A // Idx=256+0=256
CreateAccountCoordinator(1) B // Idx=256+1=257
> batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs
> batchL1 // forge defined L1UserTxs, freeze L1UserTxs{nil}
> block
> block // blockNum=2
Transfer(1) C-A: 10 (10)
Exit(1) D: 3 (5)
> batch
> block // blockNum=3
`
tc := til.NewContext(eth.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set1)
require.Nil(t, err)
require.Equal(t, 1, len(blocks))
require.Equal(t, 3, len(blocks[0].AddedTokens))
require.Equal(t, 5, len(blocks[0].L1UserTxs))
require.Equal(t, 2, len(blocks[0].Batches))
// Sanity check
require.Equal(t, 2, len(blocks))
// blocks 0 (blockNum=2)
i := 0
require.Equal(t, 2, int(blocks[i].Block.EthBlockNum))
require.Equal(t, 3, len(blocks[i].AddedTokens))
require.Equal(t, 5, len(blocks[i].L1UserTxs))
require.Equal(t, 2, len(blocks[i].Batches))
require.Equal(t, 2, len(blocks[i].Batches[0].L1CoordinatorTxs))
// blocks 1 (blockNum=3)
i = 1
require.Equal(t, 3, int(blocks[i].Block.EthBlockNum))
require.Equal(t, 1, len(blocks[i].Batches))
require.Equal(t, 2, len(blocks[i].Batches[0].L2Txs))
tokenConsts := map[common.TokenID]eth.ERC20Consts{}
// Generate extra required data
for _, block := range blocks {
for _, token := range block.AddedTokens {
@@ -167,21 +352,25 @@ func TestSync(t *testing.T) {
client.CtlMineBlock()
}
//
// Sync to synchronize the current state from the test smart contracts
//
syncBlock, discards, err = s.Sync2(ctx, nil)
require.Nil(t, err)
require.Nil(t, discards)
require.NotNil(t, syncBlock)
assert.Equal(t, int64(2), syncBlock.Block.EthBlockNum)
// Fill extra fields not generated by til in til block
openToForge := int64(0)
toForgeL1TxsNum := int64(0)
l1UserTxsLen := map[int64]int{} // ForgeL1TxsNum -> len(L1UserTxs)
for i := range blocks {
block := &blocks[i]
// Count number of L1UserTxs in each queue, to figure out later
// position of L1CoordinatorTxs and L2Txs
for j := range block.L1UserTxs {
tx := &block.L1UserTxs[j]
l1UserTxsLen[*tx.ToForgeL1TxsNum]++
if tx.Type == common.TxTypeForceExit {
forceExits[*tx.ToForgeL1TxsNum] = append(forceExits[*tx.ToForgeL1TxsNum],
common.ExitInfo{
AccountIdx: tx.FromIdx,
Balance: tx.Amount,
})
}
}
for j := range block.Batches {
batch := &block.Batches[j]
if batch.L1Batch {
@@ -208,94 +397,90 @@ func TestSync(t *testing.T) {
}
batchNum := batch.Batch.BatchNum
for j := range batch.L1CoordinatorTxs {
tx := &batch.L1CoordinatorTxs[j]
for k := range batch.L1CoordinatorTxs {
tx := &batch.L1CoordinatorTxs[k]
tx.BatchNum = &batchNum
tx.EthBlockNum = batch.Batch.EthBlockNum
}
}
}
// Fill expected positions in L1CoordinatorTxs and L2Txs
for i := range blocks {
block := &blocks[i]
for j := range block.Batches {
batch := &block.Batches[j]
position := 0
if batch.L1Batch {
position = l1UserTxsLen[*batch.Batch.ForgeL1TxsNum]
}
for k := range batch.L1CoordinatorTxs {
tx := &batch.L1CoordinatorTxs[k]
tx.Position = position
position++
nTx, err := common.NewL1Tx(tx)
require.Nil(t, err)
*tx = *nTx
}
for k := range batch.L2Txs {
tx := &batch.L2Txs[k]
tx.Position = position
position++
nTx, err := common.NewL2Tx(tx)
require.Nil(t, err)
*tx = *nTx
}
}
}
block := blocks[0]
// Fill ExitTree (only AccountIdx and Balance)
for i := range blocks {
block := &blocks[i]
for j := range block.Batches {
batch := &block.Batches[j]
if batch.L1Batch {
for forgeL1TxsNum, exits := range forceExits {
if forgeL1TxsNum == *batch.Batch.ForgeL1TxsNum {
batch.ExitTree = append(batch.ExitTree, exits...)
}
}
}
for k := range batch.L2Txs {
tx := &batch.L2Txs[k]
if tx.Type == common.TxTypeExit {
batch.ExitTree = append(batch.ExitTree, common.ExitInfo{
AccountIdx: tx.FromIdx,
Balance: tx.Amount,
})
}
}
}
}
//
// Check Sync output and HistoryDB state against expected values
// generated by til
// Sync to synchronize the current state from the test smart contracts,
// and check the outcome
//
// Check Blocks
dbBlocks, err = s.historyDB.GetAllBlocks()
// Block 2
syncBlock, discards, err = s.Sync2(ctx, nil)
require.Nil(t, err)
assert.Equal(t, 2, len(dbBlocks))
assert.Equal(t, int64(2), dbBlocks[1].EthBlockNum)
assert.NotEqual(t, dbBlocks[1].Hash, dbBlocks[0].Hash)
assert.Greater(t, dbBlocks[1].Timestamp.Unix(), dbBlocks[0].Timestamp.Unix())
require.Nil(t, discards)
require.NotNil(t, syncBlock)
assert.Equal(t, int64(2), syncBlock.Block.EthBlockNum)
// Check Tokens
assert.Equal(t, len(block.AddedTokens), len(syncBlock.AddedTokens))
dbTokens, err := s.historyDB.GetAllTokens()
checkSyncBlock(t, s, 2, &blocks[0], syncBlock)
// Block 3
syncBlock, discards, err = s.Sync2(ctx, nil)
require.Nil(t, err)
assert.Equal(t, len(block.AddedTokens), len(dbTokens))
for i, token := range block.AddedTokens {
dbToken := dbTokens[i]
syncToken := syncBlock.AddedTokens[i]
require.Nil(t, discards)
require.NotNil(t, syncBlock)
assert.Equal(t, int64(3), syncBlock.Block.EthBlockNum)
assert.Equal(t, block.Block.EthBlockNum, syncToken.EthBlockNum)
assert.Equal(t, token.TokenID, syncToken.TokenID)
assert.Equal(t, token.EthAddr, syncToken.EthAddr)
tokenConst := tokenConsts[token.TokenID]
assert.Equal(t, tokenConst.Name, syncToken.Name)
assert.Equal(t, tokenConst.Symbol, syncToken.Symbol)
assert.Equal(t, tokenConst.Decimals, syncToken.Decimals)
var tokenCpy historydb.TokenWithUSD
//nolint:gosec
require.Nil(t, copier.Copy(&tokenCpy, &token)) // copy common.Token to historydb.TokenWithUSD
require.Nil(t, copier.Copy(&tokenCpy, &tokenConst)) // copy common.Token to historydb.TokenWithUSD
tokenCpy.ItemID = dbToken.ItemID // we don't care about ItemID
assert.Equal(t, tokenCpy, dbToken)
}
// Check L1UserTxs
assert.Equal(t, len(block.L1UserTxs), len(syncBlock.L1UserTxs))
dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs()
require.Nil(t, err)
assert.Equal(t, len(block.L1UserTxs), len(dbL1UserTxs))
// Ignore BatchNum in syncBlock.L1UserTxs because this value is set by the HistoryDB
for i := range syncBlock.L1UserTxs {
syncBlock.L1UserTxs[i].BatchNum = block.L1UserTxs[i].BatchNum
}
assert.Equal(t, block.L1UserTxs, syncBlock.L1UserTxs)
assert.Equal(t, block.L1UserTxs, dbL1UserTxs)
// Check Batches
assert.Equal(t, len(block.Batches), len(syncBlock.Batches))
dbBatches, err := s.historyDB.GetAllBatches()
require.Nil(t, err)
assert.Equal(t, len(block.Batches), len(dbBatches))
for i, batch := range block.Batches {
batchNum := batch.Batch.BatchNum
dbBatch := dbBatches[i]
syncBatch := syncBlock.Batches[i]
// We don't care about TotalFeesUSD. Use the syncBatch that
// has a TotalFeesUSD inserted by the HistoryDB
batch.Batch.TotalFeesUSD = syncBatch.Batch.TotalFeesUSD
batch.CreatedAccounts = syncBatch.CreatedAccounts // til doesn't output CreatedAccounts
// fmt.Printf("DBG Batch %d %+v\n", i, batch)
// fmt.Printf("DBG Batch Sync %d %+v\n", i, syncBatch)
// assert.Equal(t, batch.L1CoordinatorTxs, syncBatch.L1CoordinatorTxs)
fmt.Printf("DBG BatchNum: %d, LastIdx: %d\n", batchNum, batch.Batch.LastIdx)
assert.Equal(t, batch, syncBatch)
assert.Equal(t, batch.Batch, dbBatch)
}
// Check L1UserTxs in DB
checkSyncBlock(t, s, 3, &blocks[1], syncBlock)
// TODO: Reorg will be properly tested once we have the mock ethClient implemented
/*