Merge pull request #185 from hermeznetwork/feature/integration2

Feature/integration2
This commit is contained in:
arnau
2020-10-08 12:51:10 +02:00
committed by GitHub
27 changed files with 470 additions and 260 deletions

View File

@@ -197,7 +197,7 @@ func TestMain(m *testing.M) {
genericTxs = append(genericTxs, l2tx.Tx()) genericTxs = append(genericTxs, l2tx.Tx())
} }
// Transform generic Txs to HistoryTx // Transform generic Txs to HistoryTx
historyTxs := []*historydb.HistoryTx{} historyTxs := []historydb.HistoryTx{}
for _, genericTx := range genericTxs { for _, genericTx := range genericTxs {
// find timestamp // find timestamp
var timestamp time.Time var timestamp time.Time
@@ -238,7 +238,7 @@ func TestMain(m *testing.M) {
*feeUSD = *usd * genericTx.Fee.Percentage() *feeUSD = *usd * genericTx.Fee.Percentage()
} }
} }
historyTxs = append(historyTxs, &historydb.HistoryTx{ historyTxs = append(historyTxs, historydb.HistoryTx{
IsL1: genericTx.IsL1, IsL1: genericTx.IsL1,
TxID: genericTx.TxID, TxID: genericTx.TxID,
Type: genericTx.Type, Type: genericTx.Type,

View File

@@ -69,7 +69,7 @@ type historyTxAPI struct {
Token common.Token `json:"token"` Token common.Token `json:"token"`
} }
func historyTxsToAPI(dbTxs []*historydb.HistoryTx) []historyTxAPI { func historyTxsToAPI(dbTxs []historydb.HistoryTx) []historyTxAPI {
apiTxs := []historyTxAPI{} apiTxs := []historyTxAPI{}
for i := 0; i < len(dbTxs); i++ { for i := 0; i < len(dbTxs); i++ {
apiTx := historyTxAPI{ apiTx := historyTxAPI{

View File

@@ -51,7 +51,7 @@ func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) e
} }
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []*common.L1Tx, pooll2txs []*common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) { func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
zkInputs, _, err := bb.localStateDB.ProcessTxs(l1usertxs, l1coordinatortxs, pooll2txs) zkInputs, _, err := bb.localStateDB.ProcessTxs(l1usertxs, l1coordinatortxs, pooll2txs)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -81,26 +81,42 @@ func NewL1Tx(l1Tx *L1Tx) (*L1Tx, error) {
} }
l1Tx.Type = txType l1Tx.Type = txType
var txid [TxIDLen]byte txID, err := l1Tx.CalcTxID()
if !l1Tx.UserOrigin { if err != nil {
txid[0] = TxIDPrefixL1CoordTx return nil, err
} }
var toForgeL1TxsNumBytes [8]byte l1Tx.TxID = *txID
var toForge uint64 = 0
if l1Tx.ToForgeL1TxsNum != nil {
toForge = uint64(*l1Tx.ToForgeL1TxsNum)
}
binary.BigEndian.PutUint64(toForgeL1TxsNumBytes[:], toForge)
copy(txid[1:9], toForgeL1TxsNumBytes[:])
var positionBytes [2]byte
binary.BigEndian.PutUint16(positionBytes[:], uint16(l1Tx.Position))
copy(txid[9:11], positionBytes[:])
l1Tx.TxID = TxID(txid)
return l1Tx, nil return l1Tx, nil
} }
// CalcTxID calculates the TxId of the L1Tx
func (tx *L1Tx) CalcTxID() (*TxID, error) {
var txID TxID
if tx.UserOrigin {
if tx.ToForgeL1TxsNum == nil {
return nil, fmt.Errorf("L1Tx.UserOrigin == true && L1Tx.ToForgeL1TxsNum == nil")
}
txID[0] = TxIDPrefixL1UserTx
var toForgeL1TxsNumBytes [8]byte
binary.BigEndian.PutUint64(toForgeL1TxsNumBytes[:], uint64(*tx.ToForgeL1TxsNum))
copy(txID[1:9], toForgeL1TxsNumBytes[:])
} else {
if tx.BatchNum == nil {
return nil, fmt.Errorf("L1Tx.UserOrigin == false && L1Tx.BatchNum == nil")
}
txID[0] = TxIDPrefixL1CoordTx
var batchNumBytes [8]byte
binary.BigEndian.PutUint64(batchNumBytes[:], uint64(*tx.BatchNum))
copy(txID[1:9], batchNumBytes[:])
}
var positionBytes [2]byte
binary.BigEndian.PutUint16(positionBytes[:], uint16(tx.Position))
copy(txID[9:11], positionBytes[:])
return &txID, nil
}
// Tx returns a *Tx from the L1Tx // Tx returns a *Tx from the L1Tx
func (tx *L1Tx) Tx() *Tx { func (tx *L1Tx) Tx() *Tx {
f := new(big.Float).SetInt(tx.Amount) f := new(big.Float).SetInt(tx.Amount)

View File

@@ -15,23 +15,40 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestNewL1Tx(t *testing.T) { func TestNewL1UserTx(t *testing.T) {
toForge := new(int64) toForge := int64(123456)
*toForge = 123456 fromIdx := Idx(300)
fromIdx := new(Idx)
*fromIdx = 300
l1Tx := &L1Tx{ l1Tx := &L1Tx{
ToForgeL1TxsNum: toForge, ToForgeL1TxsNum: &toForge,
Position: 71, Position: 71,
UserOrigin: true,
ToIdx: 301, ToIdx: 301,
TokenID: 5, TokenID: 5,
Amount: big.NewInt(1), Amount: big.NewInt(1),
LoadAmount: big.NewInt(2), LoadAmount: big.NewInt(2),
FromIdx: fromIdx, FromIdx: &fromIdx,
} }
l1Tx, err := NewL1Tx(l1Tx) l1Tx, err := NewL1Tx(l1Tx)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "0x01000000000001e240004700", l1Tx.TxID.String()) assert.Equal(t, "0x00000000000001e240004700", l1Tx.TxID.String())
}
func TestNewL1CoordinatorTx(t *testing.T) {
fromIdx := Idx(300)
batchNum := BatchNum(51966)
l1Tx := &L1Tx{
Position: 88,
UserOrigin: false,
ToIdx: 301,
TokenID: 5,
Amount: big.NewInt(1),
LoadAmount: big.NewInt(2),
FromIdx: &fromIdx,
BatchNum: &batchNum,
}
l1Tx, err := NewL1Tx(l1Tx)
assert.Nil(t, err)
assert.Equal(t, "0x01000000000000cafe005800", l1Tx.TxID.String())
} }
func TestL1TxByteParsers(t *testing.T) { func TestL1TxByteParsers(t *testing.T) {

View File

@@ -111,10 +111,10 @@ func (tx *L2Tx) PoolL2Tx() *PoolL2Tx {
// L2TxsToPoolL2Txs returns an array of []*PoolL2Tx from an array of []*L2Tx, // L2TxsToPoolL2Txs returns an array of []*PoolL2Tx from an array of []*L2Tx,
// where the PoolL2Tx only have the parameters of a L2Tx filled. // where the PoolL2Tx only have the parameters of a L2Tx filled.
func L2TxsToPoolL2Txs(txs []*L2Tx) []*PoolL2Tx { func L2TxsToPoolL2Txs(txs []*L2Tx) []PoolL2Tx {
var r []*PoolL2Tx var r []PoolL2Tx
for _, tx := range txs { for _, tx := range txs {
r = append(r, tx.PoolL2Tx()) r = append(r, *tx.PoolL2Tx())
} }
return r return r
} }

View File

@@ -250,15 +250,15 @@ func (tx *PoolL2Tx) Tx() (*Tx, error) {
}, nil }, nil
} }
// PoolL2TxsToL2Txs returns an array of []*L2Tx from an array of []*PoolL2Tx // PoolL2TxsToL2Txs returns an array of []L2Tx from an array of []PoolL2Tx
func PoolL2TxsToL2Txs(txs []*PoolL2Tx) ([]*L2Tx, error) { func PoolL2TxsToL2Txs(txs []PoolL2Tx) ([]L2Tx, error) {
var r []*L2Tx var r []L2Tx
for _, poolTx := range txs { for _, poolTx := range txs {
tx, err := poolTx.L2Tx() tx, err := poolTx.L2Tx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
r = append(r, tx) r = append(r, *tx)
} }
return r, nil return r, nil
} }

View File

@@ -31,8 +31,8 @@ type AuctionVars struct {
AllocationRatio AllocationRatio AllocationRatio AllocationRatio
} }
// WithdrawalDelayerVars contains the Withdrawal Delayer smart contract variables // WithdrawDelayerVars contains the Withdrawal Delayer smart contract variables
type WithdrawalDelayerVars struct { type WithdrawDelayerVars struct {
HermezRollupAddress eth.Address HermezRollupAddress eth.Address
HermezGovernanceDAOAddress eth.Address HermezGovernanceDAOAddress eth.Address
WhiteHackGroupAddress eth.Address WhiteHackGroupAddress eth.Address

View File

@@ -12,6 +12,11 @@ import (
) )
const ( const (
// TXIDPrefixL1UserTx is the prefix that determines that the TxID is
// for a L1UserTx
//nolinter:gomnd
TxIDPrefixL1UserTx = byte(0)
// TXIDPrefixL1CoordTx is the prefix that determines that the TxID is // TXIDPrefixL1CoordTx is the prefix that determines that the TxID is
// for a L1CoordinatorTx // for a L1CoordinatorTx
//nolinter:gomnd //nolinter:gomnd

View File

@@ -15,9 +15,9 @@ type BatchInfo struct {
serverProof ServerProofInterface serverProof ServerProofInterface
zkInputs *common.ZKInputs zkInputs *common.ZKInputs
proof *Proof proof *Proof
L1UserTxsExtra []*common.L1Tx L1UserTxsExtra []common.L1Tx
L1OperatorTxs []*common.L1Tx L1OperatorTxs []common.L1Tx
L2Txs []*common.PoolL2Tx L2Txs []common.PoolL2Tx
// FeesInfo // FeesInfo
ethTx *types.Transaction ethTx *types.Transaction
} }
@@ -33,7 +33,7 @@ func NewBatchInfo(batchNum common.BatchNum, serverProof ServerProofInterface) Ba
// SetTxsInfo sets the l1UserTxs, l1OperatorTxs and l2Txs to the BatchInfo data // SetTxsInfo sets the l1UserTxs, l1OperatorTxs and l2Txs to the BatchInfo data
// structure // structure
func (bi *BatchInfo) SetTxsInfo(l1UserTxsExtra, l1OperatorTxs []*common.L1Tx, l2Txs []*common.PoolL2Tx) { func (bi *BatchInfo) SetTxsInfo(l1UserTxsExtra, l1OperatorTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// TBD parameter: feesInfo // TBD parameter: feesInfo
bi.L1UserTxsExtra = l1UserTxsExtra bi.L1UserTxsExtra = l1UserTxsExtra
bi.L1OperatorTxs = l1OperatorTxs bi.L1OperatorTxs = l1OperatorTxs

View File

@@ -206,14 +206,14 @@ func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error
c.batchNum = c.batchNum + 1 c.batchNum = c.batchNum + 1
batchInfo := NewBatchInfo(c.batchNum, serverProof) // to accumulate metadata of the batch batchInfo := NewBatchInfo(c.batchNum, serverProof) // to accumulate metadata of the batch
var poolL2Txs []*common.PoolL2Tx var poolL2Txs []common.PoolL2Tx
// var feesInfo // var feesInfo
var l1UserTxsExtra, l1OperatorTxs []*common.L1Tx var l1UserTxsExtra, l1OperatorTxs []common.L1Tx
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if c.shouldL1L2Batch() { if c.shouldL1L2Batch() {
// 2a: L1+L2 txs // 2a: L1+L2 txs
// l1UserTxs, toForgeL1TxsNumber := c.hdb.GetNextL1UserTxs() // TODO once HistoryDB is ready, uncomment // l1UserTxs, toForgeL1TxsNumber := c.hdb.GetNextL1UserTxs() // TODO once HistoryDB is ready, uncomment
var l1UserTxs []*common.L1Tx = nil // tmp, depends on HistoryDB var l1UserTxs []common.L1Tx = nil // tmp, depends on HistoryDB
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
if err != nil { if err != nil {
return nil, err return nil, err
@@ -358,7 +358,7 @@ func (c *Coordinator) purgeRemoveByTimeout() error {
return nil // TODO return nil // TODO
} }
func (c *Coordinator) purgeInvalidDueToL2TxsSelection(l2Txs []*common.PoolL2Tx) error { func (c *Coordinator) purgeInvalidDueToL2TxsSelection(l2Txs []common.PoolL2Tx) error {
return nil // TODO return nil // TODO
} }

View File

@@ -8,6 +8,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
@@ -25,7 +26,7 @@ type HistoryDB struct {
// BlockData contains the information of a Block // BlockData contains the information of a Block
type BlockData struct { type BlockData struct {
block *common.Block Block *common.Block
// Rollup // Rollup
// L1UserTxs that were submitted in the block // L1UserTxs that were submitted in the block
L1UserTxs []common.L1Tx L1UserTxs []common.L1Tx
@@ -33,10 +34,10 @@ type BlockData struct {
RegisteredTokens []common.Token RegisteredTokens []common.Token
RollupVars *common.RollupVars RollupVars *common.RollupVars
// Auction // Auction
Bids []common.Bid Bids []common.Bid
Coordinators []common.Coordinator Coordinators []common.Coordinator
AuctionVars *common.AuctionVars AuctionVars *common.AuctionVars
// WithdrawalDelayer WithdrawDelayerVars *common.WithdrawDelayerVars
// TODO: enable when common.WithdrawalDelayerVars is Merged from Synchronizer PR // TODO: enable when common.WithdrawalDelayerVars is Merged from Synchronizer PR
// WithdrawalDelayerVars *common.WithdrawalDelayerVars // WithdrawalDelayerVars *common.WithdrawalDelayerVars
} }
@@ -53,6 +54,19 @@ type BatchData struct {
Batch *common.Batch Batch *common.Batch
} }
// NewBatchData creates an empty BatchData with the slices initialized.
func NewBatchData() *BatchData {
return &BatchData{
L1Batch: false,
L1UserTxs: make([]common.L1Tx, 0),
L1CoordinatorTxs: make([]common.L1Tx, 0),
L2Txs: make([]common.L2Tx, 0),
CreatedAccounts: make([]common.Account, 0),
ExitTree: make([]common.ExitInfo, 0),
Batch: &common.Batch{},
}
}
// NewHistoryDB initialize the DB // NewHistoryDB initialize the DB
func NewHistoryDB(db *sqlx.DB) *HistoryDB { func NewHistoryDB(db *sqlx.DB) *HistoryDB {
return &HistoryDB{db: db} return &HistoryDB{db: db}
@@ -92,14 +106,14 @@ func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
} }
// GetBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to // GetBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to
func (hdb *HistoryDB) GetBlocks(from, to int64) ([]*common.Block, error) { func (hdb *HistoryDB) GetBlocks(from, to int64) ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &blocks, hdb.db, &blocks,
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2;", "SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2;",
from, to, from, to,
) )
return blocks, err return db.SlicePtrsToSlice(blocks).([]common.Block), err
} }
// GetLastBlock retrieve the block with the highest block number from the DB // GetLastBlock retrieve the block with the highest block number from the DB
@@ -141,14 +155,14 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
} }
// GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to // GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]*common.Batch, error) { func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &batches, hdb.db, &batches,
"SELECT * FROM batch WHERE $1 <= batch_num AND batch_num < $2;", "SELECT * FROM batch WHERE $1 <= batch_num AND batch_num < $2;",
from, to, from, to,
) )
return batches, err return db.SlicePtrsToSlice(batches).([]common.Batch), err
} }
// GetLastBatchNum returns the BatchNum of the latest forged batch // GetLastBatchNum returns the BatchNum of the latest forged batch
@@ -201,13 +215,13 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
} }
// GetBids return the bids // GetBids return the bids
func (hdb *HistoryDB) GetBids() ([]*common.Bid, error) { func (hdb *HistoryDB) GetBids() ([]common.Bid, error) {
var bids []*common.Bid var bids []*common.Bid
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &bids, hdb.db, &bids,
"SELECT * FROM bid;", "SELECT * FROM bid;",
) )
return bids, err return db.SlicePtrsToSlice(bids).([]common.Bid), err
} }
// AddCoordinators insert Coordinators into the DB // AddCoordinators insert Coordinators into the DB
@@ -269,13 +283,13 @@ func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error
} }
// GetTokens returns a list of tokens from the DB // GetTokens returns a list of tokens from the DB
func (hdb *HistoryDB) GetTokens() ([]*common.Token, error) { func (hdb *HistoryDB) GetTokens() ([]common.Token, error) {
var tokens []*common.Token var tokens []*common.Token
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &tokens, hdb.db, &tokens,
"SELECT * FROM token ORDER BY token_id;", "SELECT * FROM token ORDER BY token_id;",
) )
return tokens, err return db.SlicePtrsToSlice(tokens).([]common.Token), err
} }
// GetTokenSymbols returns all the token symbols from the DB // GetTokenSymbols returns all the token symbols from the DB
@@ -315,13 +329,13 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
} }
// GetAccounts returns a list of accounts from the DB // GetAccounts returns a list of accounts from the DB
func (hdb *HistoryDB) GetAccounts() ([]*common.Account, error) { func (hdb *HistoryDB) GetAccounts() ([]common.Account, error) {
var accs []*common.Account var accs []*common.Account
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &accs, hdb.db, &accs,
"SELECT * FROM account ORDER BY idx;", "SELECT * FROM account ORDER BY idx;",
) )
return accs, err return db.SlicePtrsToSlice(accs).([]common.Account), err
} }
// AddL1Txs inserts L1 txs to the DB. USD and LoadAmountUSD will be set automatically before storing the tx. // AddL1Txs inserts L1 txs to the DB. USD and LoadAmountUSD will be set automatically before storing the tx.
@@ -384,14 +398,14 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []common.Tx) error {
} }
// GetTxs returns a list of txs from the DB // GetTxs returns a list of txs from the DB
func (hdb *HistoryDB) GetTxs() ([]*common.Tx, error) { func (hdb *HistoryDB) GetTxs() ([]common.Tx, error) {
var txs []*common.Tx var txs []*common.Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &txs, hdb.db, &txs,
`SELECT * FROM tx `SELECT * FROM tx
ORDER BY (batch_num, position) ASC`, ORDER BY (batch_num, position) ASC`,
) )
return txs, err return db.SlicePtrsToSlice(txs).([]common.Tx), err
} }
// GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct // GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct
@@ -399,7 +413,7 @@ func (hdb *HistoryDB) GetHistoryTxs(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKey, ethAddr *ethCommon.Address, bjj *babyjub.PublicKey,
tokenID, idx, batchNum *uint, txType *common.TxType, tokenID, idx, batchNum *uint, txType *common.TxType,
offset, limit *uint, last bool, offset, limit *uint, last bool,
) ([]*HistoryTx, int, error) { ) ([]HistoryTx, int, error) {
if ethAddr != nil && bjj != nil { if ethAddr != nil && bjj != nil {
return nil, 0, errors.New("ethAddr and bjj are incompatible") return nil, 0, errors.New("ethAddr and bjj are incompatible")
} }
@@ -481,14 +495,15 @@ func (hdb *HistoryDB) GetHistoryTxs(
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
txs := []*HistoryTx{} txsPtrs := []*HistoryTx{}
if err := meddler.QueryAll(hdb.db, &txs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
return nil, 0, err return nil, 0, err
} }
txs := db.SlicePtrsToSlice(txsPtrs).([]HistoryTx)
if len(txs) == 0 { if len(txs) == 0 {
return nil, 0, sql.ErrNoRows return nil, 0, sql.ErrNoRows
} else if last { } else if last {
tmp := []*HistoryTx{} tmp := []HistoryTx{}
for i := len(txs) - 1; i >= 0; i-- { for i := len(txs) - 1; i >= 0; i-- {
tmp = append(tmp, txs[i]) tmp = append(tmp, txs[i])
} }
@@ -536,27 +551,22 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *BlockData) (err error) {
} }
defer func() { defer func() {
if err != nil { if err != nil {
err = txn.Rollback() errRollback := txn.Rollback()
if errRollback != nil {
log.Errorw("Rollback", "err", errRollback)
}
} }
}() }()
// Add block // Add block
err = hdb.addBlock(txn, blockData.block) err = hdb.addBlock(txn, blockData.Block)
if err != nil { if err != nil {
return err return err
} }
// Add l1 Txs // Add Coordinators
if len(blockData.L1UserTxs) > 0 { if len(blockData.Coordinators) > 0 {
err = hdb.addL1Txs(txn, blockData.L1UserTxs) err = hdb.addCoordinators(txn, blockData.Coordinators)
if err != nil {
return err
}
}
// Add Tokens
if len(blockData.RegisteredTokens) > 0 {
err = hdb.addTokens(txn, blockData.RegisteredTokens)
if err != nil { if err != nil {
return err return err
} }
@@ -570,9 +580,17 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *BlockData) (err error) {
} }
} }
// Add Coordinators // Add Tokens
if len(blockData.Coordinators) > 0 { if len(blockData.RegisteredTokens) > 0 {
err = hdb.addCoordinators(txn, blockData.Coordinators) err = hdb.addTokens(txn, blockData.RegisteredTokens)
if err != nil {
return err
}
}
// Add l1 Txs
if len(blockData.L1UserTxs) > 0 {
err = hdb.addL1Txs(txn, blockData.L1UserTxs)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -60,8 +60,8 @@ func TestBlocks(t *testing.T) {
assert.Equal(t, len(blocks), len(fetchedBlocks)) assert.Equal(t, len(blocks), len(fetchedBlocks))
// Compare generated vs getted blocks // Compare generated vs getted blocks
assert.NoError(t, err) assert.NoError(t, err)
for i, fetchedBlock := range fetchedBlocks { for i := range fetchedBlocks {
assertEqualBlock(t, &blocks[i], fetchedBlock) assertEqualBlock(t, &blocks[i], &fetchedBlocks[i])
} }
// Get blocks from the DB one by one // Get blocks from the DB one by one
for i := fromBlock; i < toBlock; i++ { for i := fromBlock; i < toBlock; i++ {
@@ -100,7 +100,7 @@ func TestBatches(t *testing.T) {
fetchedBatches, err := historyDB.GetBatches(0, common.BatchNum(nBatches)) fetchedBatches, err := historyDB.GetBatches(0, common.BatchNum(nBatches))
assert.NoError(t, err) assert.NoError(t, err)
for i, fetchedBatch := range fetchedBatches { for i, fetchedBatch := range fetchedBatches {
assert.Equal(t, batches[i], *fetchedBatch) assert.Equal(t, batches[i], fetchedBatch)
} }
// Test GetLastBatchNum // Test GetLastBatchNum
fetchedLastBatchNum, err := historyDB.GetLastBatchNum() fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
@@ -132,7 +132,7 @@ func TestBids(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Compare fetched bids vs generated bids // Compare fetched bids vs generated bids
for i, bid := range fetchedBids { for i, bid := range fetchedBids {
assert.Equal(t, bids[i], *bid) assert.Equal(t, bids[i], bid)
} }
} }
@@ -191,7 +191,7 @@ func TestAccounts(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Compare fetched accounts vs generated accounts // Compare fetched accounts vs generated accounts
for i, acc := range fetchedAccs { for i, acc := range fetchedAccs {
assert.Equal(t, accs[i], *acc) assert.Equal(t, accs[i], acc)
} }
} }

View File

@@ -7,6 +7,8 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
@@ -133,14 +135,14 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
} }
// GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee // GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee
func (l2db *L2DB) GetPendingTxs() ([]*common.PoolL2Tx, error) { func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx var txs []*common.PoolL2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
l2db.db, &txs, l2db.db, &txs,
selectPoolTx+"WHERE state = $1 AND token.usd IS NOT NULL", selectPoolTx+"WHERE state = $1 AND token.usd IS NOT NULL",
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
) )
return txs, err return db.SlicePtrsToSlice(txs).([]common.PoolL2Tx), err
} }
// StartForging updates the state of the transactions that will begin the forging process. // StartForging updates the state of the transactions that will begin the forging process.
@@ -212,7 +214,10 @@ func (l2db *L2DB) CheckNonces(updatedAccounts []common.Account, batchNum common.
defer func() { defer func() {
// Rollback the transaction if there was an error. // Rollback the transaction if there was an error.
if err != nil { if err != nil {
err = txn.Rollback() errRollback := txn.Rollback()
if errRollback != nil {
log.Errorw("Rollback", "err", errRollback)
}
} }
}() }()
for i := 0; i < len(updatedAccounts); i++ { for i := 0; i < len(updatedAccounts); i++ {
@@ -257,7 +262,10 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
defer func() { defer func() {
// Rollback the transaction if there was an error. // Rollback the transaction if there was an error.
if err != nil { if err != nil {
err = txn.Rollback() errRollback := txn.Rollback()
if errRollback != nil {
log.Errorw("Rollback", "err", errRollback)
}
} }
}() }()
// Delete pending txs that have been in the pool after the TTL if maxTxs is reached // Delete pending txs that have been in the pool after the TTL if maxTxs is reached

View File

@@ -111,8 +111,8 @@ func TestGetPending(t *testing.T) {
fetchedTxs, err := l2DB.GetPendingTxs() fetchedTxs, err := l2DB.GetPendingTxs()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs)) assert.Equal(t, len(pendingTxs), len(fetchedTxs))
for i, fetchedTx := range fetchedTxs { for i := range fetchedTxs {
assertTx(t, pendingTxs[i], fetchedTx) assertTx(t, pendingTxs[i], &fetchedTxs[i])
} }
} }

View File

@@ -36,7 +36,7 @@ type processedExit struct {
// type==TypeSynchronizer, assumes that the call is done from the Synchronizer, // type==TypeSynchronizer, assumes that the call is done from the Synchronizer,
// returns common.ExitTreeLeaf that is later used by the Synchronizer to update // returns common.ExitTreeLeaf that is later used by the Synchronizer to update
// the HistoryDB, and adds Nonce & TokenID to the L2Txs. // the HistoryDB, and adds Nonce & TokenID to the L2Txs.
func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.PoolL2Tx) (*common.ZKInputs, []common.ExitInfo, error) { func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (*common.ZKInputs, []common.ExitInfo, error) {
var err error var err error
var exitTree *merkletree.MerkleTree var exitTree *merkletree.MerkleTree
@@ -68,7 +68,8 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
} }
// assumption: l1usertx are sorted by L1Tx.Position // assumption: l1usertx are sorted by L1Tx.Position
for _, tx := range l1usertxs { for i := range l1usertxs {
tx := &l1usertxs[i]
exitIdx, exitAccount, newExit, err := s.processL1Tx(exitTree, tx) exitIdx, exitAccount, newExit, err := s.processL1Tx(exitTree, tx)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@@ -85,7 +86,8 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
s.i++ s.i++
} }
} }
for _, tx := range l1coordinatortxs { for i := range l1coordinatortxs {
tx := &l1coordinatortxs[i]
exitIdx, exitAccount, newExit, err := s.processL1Tx(exitTree, tx) exitIdx, exitAccount, newExit, err := s.processL1Tx(exitTree, tx)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@@ -105,7 +107,8 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
s.i++ s.i++
} }
} }
for _, tx := range l2txs { for i := range l2txs {
tx := &l2txs[i]
exitIdx, exitAccount, newExit, err := s.processL2Tx(exitTree, tx) exitIdx, exitAccount, newExit, err := s.processL2Tx(exitTree, tx)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@@ -194,7 +197,7 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
} }
// getTokenIDsBigInt returns the list of TokenIDs in *big.Int format // getTokenIDsBigInt returns the list of TokenIDs in *big.Int format
func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.PoolL2Tx) ([]*big.Int, error) { func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) ([]*big.Int, error) {
tokenIDs := make(map[common.TokenID]bool) tokenIDs := make(map[common.TokenID]bool)
for i := 0; i < len(l1usertxs); i++ { for i := 0; i < len(l1usertxs); i++ {
tokenIDs[l1usertxs[i].TokenID] = true tokenIDs[l1usertxs[i].TokenID] = true

View File

@@ -40,7 +40,7 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Info("successfully runt ", nMigrations, " migrations") log.Info("successfully ran ", nMigrations, " migrations")
return db, nil return db, nil
} }
@@ -52,7 +52,10 @@ func initMeddler() {
// BulkInsert performs a bulk insert with a single statement into the specified table. Example: // BulkInsert performs a bulk insert with a single statement into the specified table. Example:
// `db.BulkInsert(myDB, "INSERT INTO block (eth_block_num, timestamp, hash) VALUES %s", blocks[:])` // `db.BulkInsert(myDB, "INSERT INTO block (eth_block_num, timestamp, hash) VALUES %s", blocks[:])`
// Note that all the columns must be specified in the query, and they must be in the same order as in the table. // Note that all the columns must be specified in the query, and they must be
// in the same order as in the table.
// Note that the fields in the structs need to be defined in the same order as
// in the table columns.
func BulkInsert(db meddler.DB, q string, args interface{}) error { func BulkInsert(db meddler.DB, q string, args interface{}) error {
arrayValue := reflect.ValueOf(args) arrayValue := reflect.ValueOf(args)
arrayLen := arrayValue.Len() arrayLen := arrayValue.Len()
@@ -150,3 +153,27 @@ func (b BigIntNullMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}
} }
return base64.StdEncoding.EncodeToString(field.Bytes()), nil return base64.StdEncoding.EncodeToString(field.Bytes()), nil
} }
// SliceToSlicePtrs converts any []Foo to []*Foo
func SliceToSlicePtrs(slice interface{}) interface{} {
v := reflect.ValueOf(slice)
vLen := v.Len()
typ := v.Type().Elem()
res := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(typ)), vLen, vLen)
for i := 0; i < vLen; i++ {
res.Index(i).Set(v.Index(i).Addr())
}
return res.Interface()
}
// SlicePtrsToSlice converts any []*Foo to []Foo
func SlicePtrsToSlice(slice interface{}) interface{} {
v := reflect.ValueOf(slice)
vLen := v.Len()
typ := v.Type().Elem().Elem()
res := reflect.MakeSlice(reflect.SliceOf(typ), vLen, vLen)
for i := 0; i < vLen; i++ {
res.Index(i).Set(v.Index(i).Elem())
}
return res.Interface()
}

35
db/utils_test.go Normal file
View File

@@ -0,0 +1,35 @@
package db
import (
"testing"
"github.com/stretchr/testify/assert"
)
type foo struct {
V int
}
func TestSliceToSlicePtrs(t *testing.T) {
n := 16
a := make([]foo, n)
for i := 0; i < n; i++ {
a[i] = foo{V: i}
}
b := SliceToSlicePtrs(a).([]*foo)
for i := 0; i < len(a); i++ {
assert.Equal(t, a[i], *b[i])
}
}
func TestSlicePtrsToSlice(t *testing.T) {
n := 16
a := make([]*foo, n)
for i := 0; i < n; i++ {
a[i] = &foo{V: i}
}
b := SlicePtrsToSlice(a).([]foo)
for i := 0; i < len(a); i++ {
assert.Equal(t, *a[i], b[i])
}
}

2
go.mod
View File

@@ -27,3 +27,5 @@ require (
golang.org/x/tools/gopls v0.5.0 // indirect golang.org/x/tools/gopls v0.5.0 // indirect
gopkg.in/go-playground/validator.v9 v9.29.1 gopkg.in/go-playground/validator.v9 v9.29.1
) )
// replace github.com/russross/meddler => /home/dev/git/iden3/hermez/meddler

View File

@@ -1,6 +1,7 @@
package node package node
import ( import (
"context"
"time" "time"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
@@ -83,7 +84,10 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
} }
client := eth.NewClient(ethClient, nil, nil, nil) client := eth.NewClient(ethClient, nil, nil, nil)
sync := synchronizer.NewSynchronizer(client, historyDB, stateDB) sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB)
if err != nil {
return nil, err
}
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
if mode == ModeCoordinator { if mode == ModeCoordinator {
@@ -223,7 +227,7 @@ func (n *Node) StartSynchronizer() {
log.Info("Coordinator stopped") log.Info("Coordinator stopped")
return return
case <-time.After(n.cfg.Synchronizer.SyncLoopInterval.Duration): case <-time.After(n.cfg.Synchronizer.SyncLoopInterval.Duration):
if err := n.sync.Sync(); err != nil { if err := n.sync.Sync(context.TODO()); err != nil {
log.Errorw("Synchronizer.Sync", "error", err) log.Errorw("Synchronizer.Sync", "error", err)
} }
} }

View File

@@ -20,93 +20,102 @@ var (
// rollupData contains information returned by the Rollup SC // rollupData contains information returned by the Rollup SC
type rollupData struct { type rollupData struct {
l1Txs []*common.L1Tx l1UserTxs []common.L1Tx
batches []*BatchData batches []historydb.BatchData
// withdrawals []*common.ExitInfo // withdrawals []*common.ExitInfo
registeredTokens []*common.Token registeredTokens []common.Token
rollupVars *common.RollupVars vars *common.RollupVars
} }
// NewRollupData creates an empty rollupData with the slices initialized. // NewRollupData creates an empty rollupData with the slices initialized.
func newRollupData() rollupData { func newRollupData() rollupData {
return rollupData{ return rollupData{
l1Txs: make([]*common.L1Tx, 0), l1UserTxs: make([]common.L1Tx, 0),
batches: make([]*BatchData, 0), batches: make([]historydb.BatchData, 0),
// withdrawals: make([]*common.ExitInfo, 0), // withdrawals: make([]*common.ExitInfo, 0),
registeredTokens: make([]*common.Token, 0), registeredTokens: make([]common.Token, 0),
} }
} }
// auctionData contains information returned by the Action SC // auctionData contains information returned by the Action SC
type auctionData struct { type auctionData struct {
bids []*common.Bid bids []common.Bid
coordinators []*common.Coordinator coordinators []common.Coordinator
auctionVars *common.AuctionVars vars *common.AuctionVars
} }
// newAuctionData creates an empty auctionData with the slices initialized. // newAuctionData creates an empty auctionData with the slices initialized.
func newAuctionData() *auctionData { func newAuctionData() *auctionData {
return &auctionData{ return &auctionData{
bids: make([]*common.Bid, 0), bids: make([]common.Bid, 0),
coordinators: make([]*common.Coordinator, 0), coordinators: make([]common.Coordinator, 0),
} }
} }
type wdelayerData struct {
vars *common.WithdrawDelayerVars
}
// BatchData contains information about Batches from the contracts // BatchData contains information about Batches from the contracts
type BatchData struct { // type BatchData struct {
l1UserTxs []*common.L1Tx // l1UserTxs []*common.L1Tx
l1CoordinatorTxs []*common.L1Tx // l1CoordinatorTxs []*common.L1Tx
l2Txs []*common.L2Tx // l2Txs []*common.L2Tx
createdAccounts []*common.Account // createdAccounts []*common.Account
exitTree []common.ExitInfo // exitTree []*common.ExitInfo
batch *common.Batch // batch *common.Batch
} // }
// NewBatchData creates an empty BatchData with the slices initialized. // NewBatchData creates an empty BatchData with the slices initialized.
func NewBatchData() *BatchData { // func NewBatchData() *BatchData {
return &BatchData{ // return &BatchData{
l1UserTxs: make([]*common.L1Tx, 0), // l1UserTxs: make([]*common.L1Tx, 0),
l1CoordinatorTxs: make([]*common.L1Tx, 0), // l1CoordinatorTxs: make([]*common.L1Tx, 0),
l2Txs: make([]*common.L2Tx, 0), // l2Txs: make([]*common.L2Tx, 0),
createdAccounts: make([]*common.Account, 0), // createdAccounts: make([]*common.Account, 0),
exitTree: make([]common.ExitInfo, 0), // exitTree: make([]*common.ExitInfo, 0),
} // }
} // }
// BlockData contains information about Blocks from the contracts // BlockData contains information about Blocks from the contracts
type BlockData struct { // type blockData struct {
block *common.Block // Block *common.Block
// Rollup // // Rollup
l1Txs []*common.L1Tx // TODO: Answer: User? Coordinator? Both? // L1Txs []*common.L1Tx // TODO: Answer: User? Coordinator? Both?
batches []*BatchData // TODO: Also contains L1Txs! // Batches []*BatchData // TODO: Also contains L1Txs!
// withdrawals []*common.ExitInfo // TODO // // withdrawals []*common.ExitInfo // TODO
registeredTokens []*common.Token // RegisteredTokens []common.Token
rollupVars *common.RollupVars // RollupVars *common.RollupVars
// Auction // // Auction
bids []*common.Bid // Bids []*common.Bid
coordinators []*common.Coordinator // Coordinators []*common.Coordinator
auctionVars *common.AuctionVars // AuctionVars *common.AuctionVars
// WithdrawalDelayer // // WithdrawalDelayer
withdrawalDelayerVars *common.WithdrawalDelayerVars // WithdrawalDelayerVars *common.WithdrawalDelayerVars
} // }
// Synchronizer implements the Synchronizer type // Synchronizer implements the Synchronizer type
type Synchronizer struct { type Synchronizer struct {
ethClient *eth.Client ethClient eth.ClientInterface
historyDB *historydb.HistoryDB auctionConstants eth.AuctionConstants
stateDB *statedb.StateDB historyDB *historydb.HistoryDB
firstSavedBlock *common.Block stateDB *statedb.StateDB
mux sync.Mutex firstSavedBlock *common.Block
mux sync.Mutex
} }
// NewSynchronizer creates a new Synchronizer // NewSynchronizer creates a new Synchronizer
func NewSynchronizer(ethClient *eth.Client, historyDB *historydb.HistoryDB, stateDB *statedb.StateDB) *Synchronizer { func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.HistoryDB, stateDB *statedb.StateDB) (*Synchronizer, error) {
s := &Synchronizer{ auctionConstants, err := ethClient.AuctionConstants()
ethClient: ethClient, if err != nil {
historyDB: historyDB, return nil, err
stateDB: stateDB,
} }
return s return &Synchronizer{
ethClient: ethClient,
auctionConstants: *auctionConstants,
historyDB: historyDB,
stateDB: stateDB,
}, nil
} }
// TODO: Be smart about locking: only lock during the read/write operations // TODO: Be smart about locking: only lock during the read/write operations
@@ -116,7 +125,7 @@ func NewSynchronizer(ethClient *eth.Client, historyDB *historydb.HistoryDB, stat
// TODO: Add argument: maximum number of blocks to process // TODO: Add argument: maximum number of blocks to process
// TODO: Check reorgs in the middle of syncing a block. Probably make // TODO: Check reorgs in the middle of syncing a block. Probably make
// rollupSync, auctionSync and withdrawalSync return the block hash. // rollupSync, auctionSync and withdrawalSync return the block hash.
func (s *Synchronizer) Sync() error { func (s *Synchronizer) Sync(ctx context.Context) error {
// Avoid new sync while performing one // Avoid new sync while performing one
s.mux.Lock() s.mux.Lock()
defer s.mux.Unlock() defer s.mux.Unlock()
@@ -130,11 +139,10 @@ func (s *Synchronizer) Sync() error {
} }
// If we don't have any stored block, we must do a full sync starting from the rollup genesis block // If we don't have any stored block, we must do a full sync starting from the rollup genesis block
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
// TODO: Query rollup constants and genesis information, store them nextBlockNum = s.auctionConstants.GenesisBlockNum
nextBlockNum = 1234 // TODO: Replace this with genesisBlockNum
} else { } else {
// Get the latest block we have in History DB from blockchain to detect a reorg // Get the latest block we have in History DB from blockchain to detect a reorg
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), lastSavedBlock.EthBlockNum) ethBlock, err := s.ethClient.EthBlockByNumber(ctx, lastSavedBlock.EthBlockNum)
if err != nil { if err != nil {
return err return err
} }
@@ -165,7 +173,7 @@ func (s *Synchronizer) Sync() error {
log.Debugf("Blocks to sync: %v (firstBlockToSync: %v, latestBlock: %v)", latestBlockNum-nextBlockNum+1, nextBlockNum, latestBlockNum) log.Debugf("Blocks to sync: %v (firstBlockToSync: %v, latestBlock: %v)", latestBlockNum-nextBlockNum+1, nextBlockNum, latestBlockNum)
for nextBlockNum < latestBlockNum { for nextBlockNum <= latestBlockNum {
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), nextBlockNum) ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), nextBlockNum)
if err != nil { if err != nil {
return err return err
@@ -195,34 +203,40 @@ func (s *Synchronizer) Sync() error {
} }
// Group all the block data into the structs to save into HistoryDB // Group all the block data into the structs to save into HistoryDB
var blockData BlockData var blockData historydb.BlockData
blockData.block = ethBlock blockData.Block = ethBlock
if rollupData != nil { if rollupData != nil {
blockData.l1Txs = rollupData.l1Txs blockData.L1UserTxs = rollupData.l1UserTxs
blockData.batches = rollupData.batches blockData.Batches = rollupData.batches
// blockData.withdrawals = rollupData.withdrawals // TODO // blockData.withdrawals = rollupData.withdrawals // TODO
blockData.registeredTokens = rollupData.registeredTokens blockData.RegisteredTokens = rollupData.registeredTokens
blockData.rollupVars = rollupData.rollupVars blockData.RollupVars = rollupData.vars
} }
if auctionData != nil { if auctionData != nil {
blockData.bids = auctionData.bids blockData.Bids = auctionData.bids
blockData.coordinators = auctionData.coordinators blockData.Coordinators = auctionData.coordinators
blockData.auctionVars = auctionData.auctionVars blockData.AuctionVars = auctionData.vars
} }
if wdelayerData != nil { if wdelayerData != nil {
blockData.withdrawalDelayerVars = wdelayerData blockData.WithdrawDelayerVars = wdelayerData.vars
} }
// Add rollupData and auctionData once the method is updated // Add rollupData and auctionData once the method is updated
// TODO: Save Whole Struct -> AddBlockSCData(blockData) // TODO: Save Whole Struct -> AddBlockSCData(blockData)
err = s.historyDB.AddBlock(blockData.block) log.Debugw("Sync()", "block", blockData)
// err = s.historyDB.AddBlock(blockData.Block)
// if err != nil {
// return err
// }
err = s.historyDB.AddBlockSCData(&blockData)
if err != nil { if err != nil {
return err return err
} }
nextBlockNum++
} }
return nil return nil
@@ -358,18 +372,18 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
// } // }
// Get L1UserTX // Get L1UserTX
rollupData.l1Txs, err = getL1UserTx(rollupEvents.L1UserTx, blockNum) rollupData.l1UserTxs, err = getL1UserTx(rollupEvents.L1UserTx, blockNum)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Get ForgeBatch events to get the L1CoordinatorTxs // Get ForgeBatch events to get the L1CoordinatorTxs
for _, fbEvent := range rollupEvents.ForgeBatch { for _, evtForgeBatch := range rollupEvents.ForgeBatch {
batchData := NewBatchData() batchData := historydb.NewBatchData()
position := 0 position := 0
// Get the input for each Tx // Get the input for each Tx
forgeBatchArgs, err := s.ethClient.RollupForgeBatchArgs(fbEvent.EthTxHash) forgeBatchArgs, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -399,14 +413,14 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
l1CoordinatorTx.UserOrigin = false l1CoordinatorTx.UserOrigin = false
l1CoordinatorTx.EthBlockNum = blockNum l1CoordinatorTx.EthBlockNum = blockNum
bn := new(common.BatchNum) bn := new(common.BatchNum)
*bn = common.BatchNum(fbEvent.BatchNum) *bn = common.BatchNum(evtForgeBatch.BatchNum)
l1CoordinatorTx.BatchNum = bn l1CoordinatorTx.BatchNum = bn
l1CoordinatorTx, err = common.NewL1Tx(l1CoordinatorTx) l1Tx, err := common.NewL1Tx(l1CoordinatorTx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
batchData.l1CoordinatorTxs = append(batchData.l1CoordinatorTxs, l1CoordinatorTx) batchData.L1CoordinatorTxs = append(batchData.L1CoordinatorTxs, *l1Tx)
// Check if we have to register an account // Check if we have to register an account
// if l1CoordinatorTx.FromIdx == 0 { // if l1CoordinatorTx.FromIdx == 0 {
@@ -435,7 +449,7 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
// TODO: Get createdAccounts from ProcessTxs() // TODO: Get createdAccounts from ProcessTxs()
// TODO: Get CollectedFees from ProcessTxs() // TODO: Get CollectedFees from ProcessTxs()
// TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs() // TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs()
_, exitInfo, err := s.stateDB.ProcessTxs(batchData.l1UserTxs, batchData.l1CoordinatorTxs, poolL2Txs) _, exitInfo, err := s.stateDB.ProcessTxs(batchData.L1UserTxs, batchData.L1CoordinatorTxs, poolL2Txs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -444,13 +458,13 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
batchData.l2Txs = append(batchData.l2Txs, l2Txs...) batchData.L2Txs = append(batchData.L2Txs, l2Txs...)
batchData.exitTree = exitInfo batchData.ExitTree = exitInfo
// Get Batch information // Get Batch information
batch := &common.Batch{ batch := &common.Batch{
BatchNum: common.BatchNum(fbEvent.BatchNum), BatchNum: common.BatchNum(evtForgeBatch.BatchNum),
EthBlockNum: blockNum, EthBlockNum: blockNum,
// ForgerAddr: , TODO: Get it from ethClient -> Add ForgerAddr to RollupEventForgeBatch // ForgerAddr: , TODO: Get it from ethClient -> Add ForgerAddr to RollupEventForgeBatch
// CollectedFees: , TODO: Clarify where to get them if they are still needed // CollectedFees: , TODO: Clarify where to get them if they are still needed
@@ -460,19 +474,23 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
ForgeL1TxsNum: &forgeL1TxsNum, ForgeL1TxsNum: &forgeL1TxsNum,
// SlotNum: TODO: Calculate once ethClient provides the info // calculate from blockNum + ethClient Constants // SlotNum: TODO: Calculate once ethClient provides the info // calculate from blockNum + ethClient Constants
} }
batchData.batch = batch batchData.Batch = batch
rollupData.batches = append(rollupData.batches, batchData) rollupData.batches = append(rollupData.batches, *batchData)
} }
// Get Registered Tokens // Get Registered Tokens
for _, eAddToken := range rollupEvents.AddToken { for _, evtAddToken := range rollupEvents.AddToken {
var token *common.Token var token common.Token
token.TokenID = common.TokenID(eAddToken.TokenID) token.TokenID = common.TokenID(evtAddToken.TokenID)
token.EthAddr = eAddToken.Address token.EthAddr = evtAddToken.Address
token.EthBlockNum = blockNum token.EthBlockNum = blockNum
// TODO: Add external information consulting SC about it using Address // TODO: Add external information consulting SC about it using Address
token.Name = "TODO"
token.Symbol = "TODO"
token.Decimals = 8 // TODO
rollupData.registeredTokens = append(rollupData.registeredTokens, token) rollupData.registeredTokens = append(rollupData.registeredTokens, token)
} }
@@ -498,22 +516,22 @@ func (s *Synchronizer) auctionSync(blockNum int64) (*auctionData, error) {
} }
// Get bids // Get bids
for _, eNewBid := range auctionEvents.NewBid { for _, evtNewBid := range auctionEvents.NewBid {
bid := &common.Bid{ bid := common.Bid{
SlotNum: common.SlotNum(eNewBid.Slot), SlotNum: common.SlotNum(evtNewBid.Slot),
BidValue: eNewBid.BidAmount, BidValue: evtNewBid.BidAmount,
Bidder: eNewBid.Bidder, Bidder: evtNewBid.Bidder,
EthBlockNum: blockNum, EthBlockNum: blockNum,
} }
auctionData.bids = append(auctionData.bids, bid) auctionData.bids = append(auctionData.bids, bid)
} }
// Get Coordinators // Get Coordinators
for _, eNewCoordinator := range auctionEvents.SetCoordinator { for _, evtSetCoordinator := range auctionEvents.SetCoordinator {
coordinator := &common.Coordinator{ coordinator := common.Coordinator{
Bidder: eNewCoordinator.BidderAddress, Bidder: evtSetCoordinator.BidderAddress,
Forger: eNewCoordinator.ForgerAddress, Forger: evtSetCoordinator.ForgerAddress,
URL: eNewCoordinator.CoordinatorURL, URL: evtSetCoordinator.CoordinatorURL,
} }
auctionData.coordinators = append(auctionData.coordinators, coordinator) auctionData.coordinators = append(auctionData.coordinators, coordinator)
} }
@@ -537,7 +555,7 @@ func (s *Synchronizer) auctionSync(blockNum int64) (*auctionData, error) {
} }
// wdelayerSync gets information from the Withdrawal Delayer Contract // wdelayerSync gets information from the Withdrawal Delayer Contract
func (s *Synchronizer) wdelayerSync(blockNum int64) (*common.WithdrawalDelayerVars, error) { func (s *Synchronizer) wdelayerSync(blockNum int64) (*wdelayerData, error) {
// TODO: VARS // TODO: VARS
// TODO: CONSTANTS // TODO: CONSTANTS
@@ -559,24 +577,23 @@ func (s *Synchronizer) wdelayerSync(blockNum int64) (*common.WithdrawalDelayerVa
// return forgeBatchArgs.NewLastIdx + 1, nil // return forgeBatchArgs.NewLastIdx + 1, nil
// } // }
func getL1UserTx(l1UserTxEvents []eth.RollupEventL1UserTx, blockNum int64) ([]*common.L1Tx, error) { func getL1UserTx(eventsL1UserTx []eth.RollupEventL1UserTx, blockNum int64) ([]common.L1Tx, error) {
l1Txs := make([]*common.L1Tx, 0) l1Txs := make([]common.L1Tx, 0)
for _, eL1UserTx := range l1UserTxEvents { for _, evtL1UserTx := range eventsL1UserTx {
// Fill aditional Tx fields // Fill aditional Tx fields
toForge := new(int64) toForge := evtL1UserTx.ToForgeL1TxsNum
*toForge = eL1UserTx.ToForgeL1TxsNum evtL1UserTx.L1Tx.ToForgeL1TxsNum = &toForge
eL1UserTx.L1Tx.ToForgeL1TxsNum = toForge evtL1UserTx.L1Tx.Position = evtL1UserTx.Position
eL1UserTx.L1Tx.Position = eL1UserTx.Position evtL1UserTx.L1Tx.UserOrigin = true
eL1UserTx.L1Tx.UserOrigin = true evtL1UserTx.L1Tx.EthBlockNum = blockNum
eL1UserTx.L1Tx.EthBlockNum = blockNum nL1Tx, err := common.NewL1Tx(&evtL1UserTx.L1Tx)
nL1Tx, err := common.NewL1Tx(&eL1UserTx.L1Tx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
eL1UserTx.L1Tx = *nL1Tx evtL1UserTx.L1Tx = *nL1Tx
l1Txs = append(l1Txs, &eL1UserTx.L1Tx) l1Txs = append(l1Txs, evtL1UserTx.L1Tx)
} }
return l1Txs, nil return l1Txs, nil
} }

View File

@@ -1,25 +1,37 @@
package synchronizer package synchronizer
import ( import (
"context"
"io/ioutil" "io/ioutil"
"math/big"
"os" "os"
"testing" "testing"
"github.com/ethereum/go-ethereum/ethclient" ethCommon "github.com/ethereum/go-ethereum/common"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func Test(t *testing.T) { type timer struct {
time int64
}
func (t *timer) Time() int64 {
currentTime := t.time
t.time++
return currentTime
}
func TestSync(t *testing.T) {
// Int State DB // Int State DB
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err) require.Nil(t, err)
sdb, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32) stateDB, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32)
assert.Nil(t, err) assert.Nil(t, err)
// Init History DB // Init History DB
@@ -27,23 +39,56 @@ func Test(t *testing.T) {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.Nil(t, err) require.Nil(t, err)
historyDB := historydb.NewHistoryDB(db) historyDB := historydb.NewHistoryDB(db)
err = historyDB.Reorg(0) // Clear DB
err = historyDB.Reorg(-1)
assert.Nil(t, err) assert.Nil(t, err)
// Init eth client // Init eth client
ehtClientDialURL := os.Getenv("ETHCLIENT_DIAL_URL") var timer timer
ethClient, err := ethclient.Dial(ehtClientDialURL) clientSetup := test.NewClientSetupExample()
require.Nil(t, err) client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
client := eth.NewClient(ethClient, nil, nil, nil)
// Create Synchronizer // Create Synchronizer
s := NewSynchronizer(client, historyDB, sdb) s, err := NewSynchronizer(client, historyDB, stateDB)
require.NotNil(t, s) require.Nil(t, err)
// Test Sync // Test Sync for ethereum genesis block
// err = s.Sync() err = s.Sync(context.Background())
// require.Nil(t, err) require.Nil(t, err)
blocks, err := s.historyDB.GetBlocks(0, 9999)
require.Nil(t, err)
assert.Equal(t, int64(0), blocks[0].EthBlockNum)
// Test Sync for a block with new Tokens and L1UserTxs
// accounts := test.GenerateKeys(t, []string{"A", "B", "C", "D"})
l1UserTxs, _, _, _ := test.GenerateTestTxsFromSet(t, `
A (1): 10
A (2): 20
B (1): 5
C (1): 8
D (3): 15
> advance batch
`)
require.Greater(t, len(l1UserTxs[0]), 0)
// require.Greater(t, len(tokens), 0)
for i := 1; i <= 3; i++ {
_, err := client.RollupAddToken(ethCommon.BigToAddress(big.NewInt(int64(i * 10000))))
require.Nil(t, err)
}
for i := range l1UserTxs[0] {
client.CtlAddL1TxUser(&l1UserTxs[0][i])
}
client.CtlMineBlock()
err = s.Sync(context.Background())
require.Nil(t, err)
getTokens, err := s.historyDB.GetTokens()
require.Nil(t, err)
assert.Equal(t, 3, len(getTokens))
// TODO: Reorg will be properly tested once we have the mock ethClient implemented // TODO: Reorg will be properly tested once we have the mock ethClient implemented
/* /*

View File

@@ -329,10 +329,11 @@ func NewClient(l bool, timer Timer, addr *ethCommon.Address, setup *ClientSetup)
blockCurrent := Block{ blockCurrent := Block{
Rollup: &RollupBlock{ Rollup: &RollupBlock{
State: eth.RollupState{ State: eth.RollupState{
StateRoot: big.NewInt(0), StateRoot: big.NewInt(0),
ExitRoots: make([]*big.Int, 0), ExitRoots: make([]*big.Int, 0),
ExitNullifierMap: make(map[[256 / 8]byte]bool), ExitNullifierMap: make(map[[256 / 8]byte]bool),
TokenList: make([]ethCommon.Address, 0), // TokenID = 0 is ETH. Set first entry in TokenList with 0x0 address for ETH.
TokenList: []ethCommon.Address{{}},
TokenMap: make(map[ethCommon.Address]bool), TokenMap: make(map[ethCommon.Address]bool),
MapL1TxQueue: mapL1TxQueue, MapL1TxQueue: mapL1TxQueue,
LastL1L2Batch: 0, LastL1L2Batch: 0,
@@ -597,7 +598,11 @@ func (c *Client) CtlAddL1TxUser(l1Tx *common.L1Tx) {
panic("l1Tx.TokenID + 1 > len(r.State.TokenList)") panic("l1Tx.TokenID + 1 > len(r.State.TokenList)")
} }
queue.L1TxQueue = append(queue.L1TxQueue, *l1Tx) queue.L1TxQueue = append(queue.L1TxQueue, *l1Tx)
r.Events.L1UserTx = append(r.Events.L1UserTx, eth.RollupEventL1UserTx{L1Tx: *l1Tx}) r.Events.L1UserTx = append(r.Events.L1UserTx, eth.RollupEventL1UserTx{
L1Tx: *l1Tx,
ToForgeL1TxsNum: r.State.LastToForgeL1TxsNum,
Position: len(queue.L1TxQueue) - 1,
})
} }
type transactionData struct { type transactionData struct {

View File

@@ -152,6 +152,12 @@ func GenL1Txs(
LoadAmountUSD: lUSD, LoadAmountUSD: lUSD,
EthBlockNum: blocks[i%len(blocks)].EthBlockNum, EthBlockNum: blocks[i%len(blocks)].EthBlockNum,
} }
if tx.UserOrigin {
n := nextTxsNum
tx.ToForgeL1TxsNum = &n
} else {
tx.BatchNum = &batches[i%len(batches)].BatchNum
}
nTx, err := common.NewL1Tx(&tx) nTx, err := common.NewL1Tx(&tx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -163,7 +169,8 @@ func GenL1Txs(
setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs) setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs)
} else { } else {
// Add unforged txs // Add unforged txs
tx.ToForgeL1TxsNum = nextTxsNum n := nextTxsNum
tx.ToForgeL1TxsNum = &n
tx.UserOrigin = true tx.UserOrigin = true
setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs) setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs)
} }
@@ -172,13 +179,13 @@ func GenL1Txs(
} }
// GetNextToForgeNumAndBatch returns the next BatchNum and ForgeL1TxsNum to be added // GetNextToForgeNumAndBatch returns the next BatchNum and ForgeL1TxsNum to be added
func GetNextToForgeNumAndBatch(batches []common.Batch) (common.BatchNum, *int64) { func GetNextToForgeNumAndBatch(batches []common.Batch) (common.BatchNum, int64) {
batchNum := batches[len(batches)-1].BatchNum + 1 batchNum := batches[len(batches)-1].BatchNum + 1
toForgeL1TxsNum := new(int64) var toForgeL1TxsNum int64
found := false found := false
for i := len(batches) - 1; i >= 0; i-- { for i := len(batches) - 1; i >= 0; i-- {
if batches[i].ForgeL1TxsNum != nil { if batches[i].ForgeL1TxsNum != nil {
*toForgeL1TxsNum = *batches[i].ForgeL1TxsNum + 1 toForgeL1TxsNum = *batches[i].ForgeL1TxsNum + 1
found = true found = true
break break
} }

View File

@@ -52,16 +52,16 @@ func GenerateKeys(t *testing.T, accNames []string) map[string]*Account {
// GenerateTestTxs generates L1Tx & PoolL2Tx in a deterministic way for the // GenerateTestTxs generates L1Tx & PoolL2Tx in a deterministic way for the
// given Instructions. // given Instructions.
func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, [][]*common.L1Tx, [][]*common.PoolL2Tx, []common.Token) { func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]common.L1Tx, [][]common.L1Tx, [][]common.PoolL2Tx, []common.Token) {
accounts := GenerateKeys(t, instructions.Accounts) accounts := GenerateKeys(t, instructions.Accounts)
l1CreatedAccounts := make(map[string]*Account) l1CreatedAccounts := make(map[string]*Account)
var batchL1Txs []*common.L1Tx var batchL1Txs []common.L1Tx
var batchCoordinatorL1Txs []*common.L1Tx var batchCoordinatorL1Txs []common.L1Tx
var batchPoolL2Txs []*common.PoolL2Tx var batchPoolL2Txs []common.PoolL2Tx
var l1Txs [][]*common.L1Tx var l1Txs [][]common.L1Tx
var coordinatorL1Txs [][]*common.L1Tx var coordinatorL1Txs [][]common.L1Tx
var poolL2Txs [][]*common.PoolL2Tx var poolL2Txs [][]common.PoolL2Tx
idx := 256 idx := 256
for _, inst := range instructions.Instructions { for _, inst := range instructions.Instructions {
switch inst.Type { switch inst.Type {
@@ -71,10 +71,11 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
FromEthAddr: accounts[idxTokenIDToString(inst.From, inst.TokenID)].Addr, FromEthAddr: accounts[idxTokenIDToString(inst.From, inst.TokenID)].Addr,
FromBJJ: accounts[idxTokenIDToString(inst.From, inst.TokenID)].BJJ.Public(), FromBJJ: accounts[idxTokenIDToString(inst.From, inst.TokenID)].BJJ.Public(),
TokenID: inst.TokenID, TokenID: inst.TokenID,
Amount: big.NewInt(0),
LoadAmount: big.NewInt(int64(inst.Amount)), LoadAmount: big.NewInt(int64(inst.Amount)),
Type: common.TxTypeCreateAccountDeposit, Type: common.TxTypeCreateAccountDeposit,
} }
batchL1Txs = append(batchL1Txs, &tx) batchL1Txs = append(batchL1Txs, tx)
if accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx == common.Idx(0) { // if account.Idx is not set yet, set it and increment idx if accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx == common.Idx(0) { // if account.Idx is not set yet, set it and increment idx
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx = common.Idx(idx) accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx = common.Idx(idx)
@@ -93,7 +94,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
} }
accounts[idxTokenIDToString(inst.To, inst.TokenID)].Idx = common.Idx(idx) accounts[idxTokenIDToString(inst.To, inst.TokenID)].Idx = common.Idx(idx)
l1CreatedAccounts[idxTokenIDToString(inst.To, inst.TokenID)] = accounts[idxTokenIDToString(inst.To, inst.TokenID)] l1CreatedAccounts[idxTokenIDToString(inst.To, inst.TokenID)] = accounts[idxTokenIDToString(inst.To, inst.TokenID)]
batchCoordinatorL1Txs = append(batchCoordinatorL1Txs, &tx) batchCoordinatorL1Txs = append(batchCoordinatorL1Txs, tx)
idx++ idx++
} }
toIdx := new(common.Idx) toIdx := new(common.Idx)
@@ -132,7 +133,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
tx.Signature = sig tx.Signature = sig
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Nonce++ accounts[idxTokenIDToString(inst.From, inst.TokenID)].Nonce++
batchPoolL2Txs = append(batchPoolL2Txs, &tx) batchPoolL2Txs = append(batchPoolL2Txs, tx)
case common.TxTypeExit, common.TxTypeForceExit: case common.TxTypeExit, common.TxTypeForceExit:
fromIdx := new(common.Idx) fromIdx := new(common.Idx)
@@ -144,14 +145,14 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
Amount: big.NewInt(int64(inst.Amount)), Amount: big.NewInt(int64(inst.Amount)),
Type: common.TxTypeExit, Type: common.TxTypeExit,
} }
batchL1Txs = append(batchL1Txs, &tx) batchL1Txs = append(batchL1Txs, tx)
case TypeNewBatch: case TypeNewBatch:
l1Txs = append(l1Txs, batchL1Txs) l1Txs = append(l1Txs, batchL1Txs)
coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs) coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs)
poolL2Txs = append(poolL2Txs, batchPoolL2Txs) poolL2Txs = append(poolL2Txs, batchPoolL2Txs)
batchL1Txs = []*common.L1Tx{} batchL1Txs = []common.L1Tx{}
batchCoordinatorL1Txs = []*common.L1Tx{} batchCoordinatorL1Txs = []common.L1Tx{}
batchPoolL2Txs = []*common.PoolL2Tx{} batchPoolL2Txs = []common.PoolL2Tx{}
default: default:
continue continue
} }
@@ -184,7 +185,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
// GenerateTestTxsFromSet reurns the L1 & L2 transactions for a given Set of // GenerateTestTxsFromSet reurns the L1 & L2 transactions for a given Set of
// Instructions code // Instructions code
func GenerateTestTxsFromSet(t *testing.T, set string) ([][]*common.L1Tx, [][]*common.L1Tx, [][]*common.PoolL2Tx, []common.Token) { func GenerateTestTxsFromSet(t *testing.T, set string) ([][]common.L1Tx, [][]common.L1Tx, [][]common.PoolL2Tx, []common.Token) {
parser := NewParser(strings.NewReader(set)) parser := NewParser(strings.NewReader(set))
instructions, err := parser.Parse() instructions, err := parser.Parse()
require.Nil(t, err) require.Nil(t, err)

View File

@@ -16,7 +16,7 @@ import (
) )
// txs implements the interface Sort for an array of Tx // txs implements the interface Sort for an array of Tx
type txs []*common.PoolL2Tx type txs []common.PoolL2Tx
func (t txs) Len() int { func (t txs) Len() int {
return len(t) return len(t)
@@ -68,7 +68,7 @@ func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
} }
// GetL2TxSelection returns a selection of the L2Txs for the next batch, from the L2DB pool // GetL2TxSelection returns a selection of the L2Txs for the next batch, from the L2DB pool
func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]*common.PoolL2Tx, error) { func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]common.PoolL2Tx, error) {
// get pending l2-tx from tx-pool // get pending l2-tx from tx-pool
l2TxsRaw, err := txsel.l2db.GetPendingTxs() // once l2db ready, maybe use parameter 'batchNum' l2TxsRaw, err := txsel.l2db.GetPendingTxs() // once l2db ready, maybe use parameter 'batchNum'
if err != nil { if err != nil {
@@ -98,7 +98,7 @@ func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]*common.P
} }
// GetL1L2TxSelection returns the selection of L1 + L2 txs // GetL1L2TxSelection returns the selection of L1 + L2 txs
func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*common.L1Tx) ([]*common.L1Tx, []*common.L1Tx, []*common.PoolL2Tx, error) { func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []common.L1Tx) ([]common.L1Tx, []common.L1Tx, []common.PoolL2Tx, error) {
// apply l1-user-tx to localAccountDB // apply l1-user-tx to localAccountDB
// create new leaves // create new leaves
// update balances // update balances
@@ -111,7 +111,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
} }
var validTxs txs var validTxs txs
var l1CoordinatorTxs []*common.L1Tx var l1CoordinatorTxs []common.L1Tx
positionL1 := len(l1Txs) positionL1 := len(l1Txs)
// if tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB, if so, // if tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB, if so,
@@ -181,7 +181,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
validTxs = append(validTxs, l2TxsRaw[i]) validTxs = append(validTxs, l2TxsRaw[i])
} }
// create L1CoordinatorTx for the accountCreation // create L1CoordinatorTx for the accountCreation
l1CoordinatorTx := &common.L1Tx{ l1CoordinatorTx := common.L1Tx{
Position: positionL1, Position: positionL1,
UserOrigin: false, UserOrigin: false,
FromEthAddr: accAuth.EthAddr, FromEthAddr: accAuth.EthAddr,
@@ -210,7 +210,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
log.Warn("l2TxsRaw[i].ToEthAddr should not be nil") log.Warn("l2TxsRaw[i].ToEthAddr should not be nil")
continue continue
} }
l1CoordinatorTx := &common.L1Tx{ l1CoordinatorTx := common.L1Tx{
Position: positionL1, Position: positionL1,
UserOrigin: false, UserOrigin: false,
FromEthAddr: *l2TxsRaw[i].ToEthAddr, FromEthAddr: *l2TxsRaw[i].ToEthAddr,
@@ -254,7 +254,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
return l1Txs, l1CoordinatorTxs, l2Txs, nil return l1Txs, l1CoordinatorTxs, l2Txs, nil
} }
func checkAlreadyPendingToCreate(l1CoordinatorTxs []*common.L1Tx, addr *ethCommon.Address, bjj *babyjub.PublicKey) bool { func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, addr *ethCommon.Address, bjj *babyjub.PublicKey) bool {
if addr == nil { if addr == nil {
log.Warn("The provided addr is nil") log.Warn("The provided addr is nil")
return false return false

View File

@@ -35,9 +35,9 @@ func initTest(t *testing.T, testSet string) *TxSelector {
return txsel return txsel
} }
func addL2Txs(t *testing.T, txsel *TxSelector, poolL2Txs []*common.PoolL2Tx) { func addL2Txs(t *testing.T, txsel *TxSelector, poolL2Txs []common.PoolL2Tx) {
for i := 0; i < len(poolL2Txs); i++ { for i := 0; i < len(poolL2Txs); i++ {
err := txsel.l2db.AddTxTest(poolL2Txs[i]) err := txsel.l2db.AddTxTest(&poolL2Txs[i])
require.Nil(t, err) require.Nil(t, err)
} }
} }