@ -2,8 +2,6 @@ package synchronizer
import (
"context"
"crypto/ecdsa"
"encoding/binary"
"fmt"
"io/ioutil"
"math/big"
@ -11,14 +9,13 @@ import (
"testing"
ethCommon "github.com/ethereum/go-ethereum/common"
ethCrypto "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/test"
"github.com/iden3/go-iden3-crypto/babyjub "
"github.com/hermeznetwork/hermez-node/test/til "
"github.com/jinzhu/copier"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -34,13 +31,11 @@ func (t *timer) Time() int64 {
return currentTime
}
type tokenData struct {
TokenID common . TokenID
Addr ethCommon . Address
Consts eth . ERC20Consts
}
func TestSync ( t * testing . T ) {
//
// Setup
//
ctx := context . Background ( )
// Int State DB
dir , err := ioutil . TempDir ( "" , "tmpdb" )
@ -62,131 +57,242 @@ func TestSync(t *testing.T) {
// Init eth client
var timer timer
clientSetup := test . NewClientSetupExample ( )
bootCoordAddr := clientSetup . AuctionVariables . BootCoordinator
client := test . NewClient ( true , & timer , & ethCommon . Address { } , clientSetup )
// Create Synchronizer
s , err := NewSynchronizer ( client , historyDB , stateDB )
require . Nil ( t , err )
//
// First Sync from an initial state
//
// Test Sync for rollup genesis block
blockData , _ , err := s . Sync2 ( ctx , nil )
syncBlock , discards , err := s . Sync2 ( ctx , nil )
require . Nil ( t , err )
require . NotNil ( t , blockData )
assert . Equal ( t , int64 ( 1 ) , blockData . Block . EthBlockNum )
blocks , err := s . historyDB . GetBlocks ( 0 , 9999 )
require . Nil ( t , discards )
require . NotNil ( t , syncBlock )
assert . Equal ( t , int64 ( 1 ) , syncBlock . Block . EthBlockNum )
dbBlocks , err := s . historyDB . GetAllBlocks ( )
require . Nil ( t , err )
assert . Equal ( t , 1 , len ( blocks ) )
assert . Equal ( t , int64 ( 1 ) , blocks [ 0 ] . EthBlockNum )
assert . Equal ( t , 1 , len ( d bB locks) )
assert . Equal ( t , int64 ( 1 ) , d bB locks[ 0 ] . EthBlockNum )
/ *
// Test Sync for a block with new Tokens and L1UserTxs
// accounts := test.GenerateKeys(t, []string{"A", "B", "C", "D"})
l1UserTxs , _ , _ , _ := test . GenerateTestTxsFromSet ( t , `
A ( 1 ) : 10
A ( 2 ) : 20
B ( 1 ) : 5
C ( 1 ) : 8
D ( 3 ) : 15
> advance batch
` )
require . Greater ( t , len ( l1UserTxs [ 0 ] ) , 0 )
// require.Greater(t, len(tokens), 0)
for i := 1 ; i <= 3 ; i ++ {
_ , err := client . RollupAddToken ( ethCommon . BigToAddress ( big . NewInt ( int64 ( i * 10000 ) ) ) ,
clientSetup . RollupVariables . FeeAddToken )
require . Nil ( t , err )
}
// Sync again and expect no new blocks
syncBlock , discards , err = s . Sync2 ( ctx , nil )
require . Nil ( t , err )
require . Nil ( t , discards )
require . Nil ( t , syncBlock )
for i := range l1UserTxs [ 0 ] {
client . CtlAddL1TxUser ( & l1UserTxs [ 0 ] [ i ] )
}
client . CtlMineBlock ( )
//
// Generate blockchain and smart contract data, and fill the test smart contracts
//
err = s . Sync ( context . Background ( ) )
require . Nil ( t , err )
// Generate blockchain data with til
set1 := `
Type : Blockchain
getTokens , err := s . historyDB . GetTokens ( )
require . Nil ( t , err )
assert . Equal ( t , 3 , len ( getTokens ) )
* /
AddToken ( 1 )
AddToken ( 2 )
AddToken ( 3 )
// Generate tokens vector
numTokens := 3
tokens := make ( [ ] tokenData , numTokens )
for i := 1 ; i <= numTokens ; i ++ {
addr := ethCommon . BigToAddress ( big . NewInt ( int64 ( i * 10000 ) ) )
consts := eth . ERC20Consts {
Name : fmt . Sprintf ( "Token %d" , i ) ,
Symbol : fmt . Sprintf ( "TK%d" , i ) ,
Decimals : uint64 ( i * 2 ) ,
}
tokens [ i - 1 ] = tokenData { common . TokenID ( i ) , addr , consts }
}
CreateAccountDeposit ( 1 ) A : 20 // Idx=256+1
CreateAccountDeposit ( 2 ) A : 20 // Idx=256+2
CreateAccountDeposit ( 1 ) B : 5 // Idx=256+3
CreateAccountDeposit ( 1 ) C : 5 // Idx=256+4
CreateAccountDeposit ( 1 ) D : 5 // Idx=256+5
numUsers := 4
keys := make ( [ ] * userKeys , numUsers )
for i := range keys {
keys [ i ] = genKeys ( i )
}
CreateAccountDepositCoordinator ( 2 ) B // Idx=256+0
// Generate some L1UserTxs of type deposit
l1UserTxs := make ( [ ] * common . L1Tx , 5 )
for i := range l1UserTxs {
l1UserTxs [ i ] = & common . L1Tx {
FromIdx : common . Idx ( 0 ) ,
FromEthAddr : keys [ i % numUsers ] . Addr ,
FromBJJ : keys [ i % numUsers ] . BJJPK ,
Amount : big . NewInt ( 0 ) ,
LoadAmount : big . NewInt ( ( int64 ( i ) + 1 ) * 1000 ) ,
TokenID : common . TokenID ( i % numTokens + 1 ) ,
> batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs
> batchL1 // forge defined L1UserTxs, freeze L1UserTxs{nil}
> block
`
tc := til . NewContext ( eth . RollupConstMaxL1UserTx )
blocks , err := tc . GenerateBlocks ( set1 )
require . Nil ( t , err )
require . Equal ( t , 1 , len ( blocks ) )
require . Equal ( t , 3 , len ( blocks [ 0 ] . AddedTokens ) )
require . Equal ( t , 5 , len ( blocks [ 0 ] . L1UserTxs ) )
require . Equal ( t , 2 , len ( blocks [ 0 ] . Batches ) )
tokenConsts := map [ common . TokenID ] eth . ERC20Consts { }
// Generate extra required data
for _ , block := range blocks {
for _ , token := range block . AddedTokens {
consts := eth . ERC20Consts {
Name : fmt . Sprintf ( "Token %d" , token . TokenID ) ,
Symbol : fmt . Sprintf ( "TK%d" , token . TokenID ) ,
Decimals : 18 ,
}
tokenConsts [ token . TokenID ] = consts
client . CtlAddERC20 ( token . EthAddr , consts )
}
}
// Add tokens to ethereum, and to rollup
for _ , token := range tokens {
client . CtlAddERC20 ( token . Addr , token . Consts )
_ , err := client . RollupAddTokenSimple ( token . Addr , clientSetup . RollupVariables . FeeAddToken )
require . Nil ( t , err )
// Add block data to the smart contracts
for _ , block := range blocks {
for _ , token := range block . AddedTokens {
_ , err := client . RollupAddTokenSimple ( token . EthAddr , clientSetup . RollupVariables . FeeAddToken )
require . Nil ( t , err )
}
for _ , tx := range block . L1UserTxs {
client . CtlSetAddr ( tx . FromEthAddr )
_ , err := client . RollupL1UserTxERC20ETH ( tx . FromBJJ , int64 ( tx . FromIdx ) , tx . LoadAmount , tx . Amount ,
uint32 ( tx . TokenID ) , int64 ( tx . ToIdx ) )
require . Nil ( t , err )
}
client . CtlSetAddr ( bootCoordAddr )
for _ , batch := range block . Batches {
_ , err := client . RollupForgeBatch ( & eth . RollupForgeBatchArgs {
NewLastIdx : batch . Batch . LastIdx ,
NewStRoot : batch . Batch . StateRoot ,
NewExitRoot : batch . Batch . ExitRoot ,
L1CoordinatorTxs : batch . L1CoordinatorTxs ,
L1CoordinatorTxsAuths : [ ] [ ] byte { } , // Intentionally empty
L2TxsData : batch . L2Txs ,
FeeIdxCoordinator : [ ] common . Idx { } , // TODO
// Circuit selector
VerifierIdx : 0 , // Intentionally empty
L1Batch : batch . L1Batch ,
ProofA : [ 2 ] * big . Int { } , // Intentionally empty
ProofB : [ 2 ] [ 2 ] * big . Int { } , // Intentionally empty
ProofC : [ 2 ] * big . Int { } , // Intentionally empty
} )
require . Nil ( t , err )
}
// Mine block and sync
client . CtlMineBlock ( )
}
// Add L1Txs to rollup
for i := range l1UserTxs {
tx := l1UserTxs [ i ]
_ , err := client . RollupL1UserTxERC20ETH ( tx . FromBJJ , int64 ( tx . FromIdx ) , tx . LoadAmount , tx . Amount ,
uint32 ( tx . TokenID ) , int64 ( tx . ToIdx ) )
require . Nil ( t , err )
//
// Sync to synchronize the current state from the test smart contracts
//
syncBlock , discards , err = s . Sync2 ( ctx , nil )
require . Nil ( t , err )
require . Nil ( t , discards )
require . NotNil ( t , syncBlock )
assert . Equal ( t , int64 ( 2 ) , syncBlock . Block . EthBlockNum )
// Fill extra fields not generated by til in til block
openToForge := int64 ( 0 )
toForgeL1TxsNum := int64 ( 0 )
for i := range blocks {
block := & blocks [ i ]
for j := range block . Batches {
batch := & block . Batches [ j ]
if batch . L1Batch {
// Set BatchNum for forged L1UserTxs to til blocks
bn := batch . Batch . BatchNum
for k := range blocks {
block := & blocks [ k ]
for l := range block . L1UserTxs {
tx := & block . L1UserTxs [ l ]
if * tx . ToForgeL1TxsNum == openToForge {
tx . BatchNum = & bn
}
}
}
openToForge ++
}
batch . Batch . EthBlockNum = block . Block . EthBlockNum
batch . Batch . ForgerAddr = bootCoordAddr // til doesn't fill the batch forger addr
if batch . L1Batch {
toForgeL1TxsNumCpy := toForgeL1TxsNum
batch . Batch . ForgeL1TxsNum = & toForgeL1TxsNumCpy // til doesn't fill the ForgeL1TxsNum
toForgeL1TxsNum ++
}
batchNum := batch . Batch . BatchNum
for j := range batch . L1CoordinatorTxs {
tx := & batch . L1CoordinatorTxs [ j ]
tx . BatchNum = & batchNum
tx . EthBlockNum = batch . Batch . EthBlockNum
nTx , err := common . NewL1Tx ( tx )
require . Nil ( t , err )
* tx = * nTx
}
}
}
// Mine block and sync
client . CtlMineBlock ( )
block := blocks [ 0 ]
blockData , _ , err = s . Sync2 ( ctx , nil )
//
// Check Sync output and HistoryDB state against expected values
// generated by til
//
// Check Blocks
dbBlocks , err = s . historyDB . GetAllBlocks ( )
require . Nil ( t , err )
require . NotNil ( t , blockData )
assert . Equal ( t , int64 ( 2 ) , blockData . Block . EthBlockNum )
assert . Equal ( t , 2 , len ( dbBlocks ) )
assert . Equal ( t , int64 ( 2 ) , dbBlocks [ 1 ] . EthBlockNum )
assert . NotEqual ( t , dbBlocks [ 1 ] . Hash , dbBlocks [ 0 ] . Hash )
assert . Greater ( t , dbBlocks [ 1 ] . Timestamp . Unix ( ) , dbBlocks [ 0 ] . Timestamp . Unix ( ) )
// Check tokens in DB
// Check Tokens
assert . Equal ( t , len ( block . AddedTokens ) , len ( syncBlock . AddedTokens ) )
dbTokens , err := s . historyDB . GetAllTokens ( )
require . Nil ( t , err )
assert . Equal ( t , len ( tokens ) , len ( dbTokens ) )
assert . Equal ( t , len ( tokens ) , len ( blockData . AddedTokens ) )
for i := range tokens {
token := tokens [ i ]
addToken := blockData . AddedTokens [ i ]
assert . Equal ( t , len ( block . AddedTokens ) , len ( dbTokens ) )
for i , token := range block . AddedTokens {
dbToken := dbTokens [ i ]
syncToken := syncBlock . AddedTokens [ i ]
assert . Equal ( t , block . Block . EthBlockNum , syncToken . EthBlockNum )
assert . Equal ( t , token . TokenID , syncToken . TokenID )
assert . Equal ( t , token . EthAddr , syncToken . EthAddr )
tokenConst := tokenConsts [ token . TokenID ]
assert . Equal ( t , tokenConst . Name , syncToken . Name )
assert . Equal ( t , tokenConst . Symbol , syncToken . Symbol )
assert . Equal ( t , tokenConst . Decimals , syncToken . Decimals )
var tokenCpy historydb . TokenRead
//nolint:gosec
require . Nil ( t , copier . Copy ( & tokenCpy , & token ) ) // copy common.Token to historydb.TokenRead
require . Nil ( t , copier . Copy ( & tokenCpy , & tokenConst ) ) // copy common.Token to historydb.TokenRead
tokenCpy . ItemID = dbToken . ItemID // we don't care about ItemID
assert . Equal ( t , tokenCpy , dbToken )
}
// Check L1UserTxs
assert . Equal ( t , len ( block . L1UserTxs ) , len ( syncBlock . L1UserTxs ) )
dbL1UserTxs , err := s . historyDB . GetAllL1UserTxs ( )
require . Nil ( t , err )
assert . Equal ( t , len ( block . L1UserTxs ) , len ( dbL1UserTxs ) )
// Ignore BatchNum in syncBlock.L1UserTxs because this value is set by the HistoryDB
for i := range syncBlock . L1UserTxs {
syncBlock . L1UserTxs [ i ] . BatchNum = block . L1UserTxs [ i ] . BatchNum
}
assert . Equal ( t , block . L1UserTxs , syncBlock . L1UserTxs )
assert . Equal ( t , block . L1UserTxs , dbL1UserTxs )
assert . Equal ( t , int64 ( 2 ) , addToken . EthBlockNum )
assert . Equal ( t , token . TokenID , addToken . TokenID )
assert . Equal ( t , token . Addr , addToken . EthAddr )
assert . Equal ( t , token . Consts . Name , addToken . Name )
assert . Equal ( t , token . Consts . Symbol , addToken . Symbol )
assert . Equal ( t , token . Consts . Decimals , addToken . Decimals )
var addTokenCpy historydb . TokenRead
require . Nil ( t , copier . Copy ( & addTokenCpy , & addToken ) ) // copy common.Token to historydb.TokenRead
addTokenCpy . ItemID = dbToken . ItemID // we don't care about ItemID
assert . Equal ( t , addTokenCpy , dbToken )
// Check Batches
assert . Equal ( t , len ( block . Batches ) , len ( syncBlock . Batches ) )
dbBatches , err := s . historyDB . GetAllBatches ( )
require . Nil ( t , err )
assert . Equal ( t , len ( block . Batches ) , len ( dbBatches ) )
for i , batch := range block . Batches {
batchNum := batch . Batch . BatchNum
dbBatch := dbBatches [ i ]
syncBatch := syncBlock . Batches [ i ]
// We don't care about TotalFeesUSD. Use the syncBatch that
// has a TotalFeesUSD inserted by the HistoryDB
batch . Batch . TotalFeesUSD = syncBatch . Batch . TotalFeesUSD
batch . CreatedAccounts = syncBatch . CreatedAccounts // til doesn't output CreatedAccounts
// fmt.Printf("DBG Batch %d %+v\n", i, batch)
// fmt.Printf("DBG Batch Sync %d %+v\n", i, syncBatch)
// assert.Equal(t, batch.L1CoordinatorTxs, syncBatch.L1CoordinatorTxs)
fmt . Printf ( "DBG BatchNum: %d, LastIdx: %d\n" , batchNum , batch . Batch . LastIdx )
assert . Equal ( t , batch , syncBatch )
assert . Equal ( t , batch . Batch , dbBatch )
}
// Check L1UserTxs in DB
@ -213,28 +319,3 @@ func TestSync(t *testing.T) {
require . Nil ( t , err )
* /
}
type userKeys struct {
BJJSK * babyjub . PrivateKey
BJJPK * babyjub . PublicKey
Addr ethCommon . Address
}
func genKeys ( i int ) * userKeys {
i ++ // i = 0 doesn't work for the ecdsa key generation
var sk babyjub . PrivateKey
binary . LittleEndian . PutUint64 ( sk [ : ] , uint64 ( i ) )
// eth address
var key ecdsa . PrivateKey
key . D = big . NewInt ( int64 ( i ) ) // only for testing
key . PublicKey . X , key . PublicKey . Y = ethCrypto . S256 ( ) . ScalarBaseMult ( key . D . Bytes ( ) )
key . Curve = ethCrypto . S256 ( )
addr := ethCrypto . PubkeyToAddress ( key . PublicKey )
return & userKeys {
BJJSK : & sk ,
BJJPK : sk . Public ( ) ,
Addr : addr ,
}
}