Browse Source

Merge pull request #185 from hermeznetwork/feature/integration2

Feature/integration2
feature/sql-semaphore1
arnau 4 years ago
committed by GitHub
parent
commit
5ec18f0378
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 466 additions and 256 deletions
  1. +2
    -2
      api/api_test.go
  2. +1
    -1
      api/dbtoapistructs.go
  3. +1
    -1
      batchbuilder/batchbuilder.go
  4. +30
    -14
      common/l1tx.go
  5. +25
    -8
      common/l1tx_test.go
  6. +3
    -3
      common/l2tx.go
  7. +4
    -4
      common/pooll2tx.go
  8. +2
    -2
      common/scvars.go
  9. +5
    -0
      common/tx.go
  10. +4
    -4
      coordinator/batch.go
  11. +4
    -4
      coordinator/coordinator.go
  12. +53
    -35
      db/historydb/historydb.go
  13. +5
    -5
      db/historydb/historydb_test.go
  14. +12
    -4
      db/l2db/l2db.go
  15. +2
    -2
      db/l2db/l2db_test.go
  16. +8
    -5
      db/statedb/txprocessors.go
  17. +29
    -2
      db/utils.go
  18. +35
    -0
      db/utils_test.go
  19. +2
    -0
      go.mod
  20. +6
    -2
      node/node.go
  21. +128
    -111
      synchronizer/synchronizer.go
  22. +59
    -14
      synchronizer/synchronizer_test.go
  23. +10
    -5
      test/ethclient.go
  24. +11
    -4
      test/historydb.go
  25. +16
    -15
      test/txs.go
  26. +7
    -7
      txselector/txselector.go
  27. +2
    -2
      txselector/txselector_test.go

+ 2
- 2
api/api_test.go

@ -197,7 +197,7 @@ func TestMain(m *testing.M) {
genericTxs = append(genericTxs, l2tx.Tx())
}
// Transform generic Txs to HistoryTx
historyTxs := []*historydb.HistoryTx{}
historyTxs := []historydb.HistoryTx{}
for _, genericTx := range genericTxs {
// find timestamp
var timestamp time.Time
@ -238,7 +238,7 @@ func TestMain(m *testing.M) {
*feeUSD = *usd * genericTx.Fee.Percentage()
}
}
historyTxs = append(historyTxs, &historydb.HistoryTx{
historyTxs = append(historyTxs, historydb.HistoryTx{
IsL1: genericTx.IsL1,
TxID: genericTx.TxID,
Type: genericTx.Type,

+ 1
- 1
api/dbtoapistructs.go

@ -69,7 +69,7 @@ type historyTxAPI struct {
Token common.Token `json:"token"`
}
func historyTxsToAPI(dbTxs []*historydb.HistoryTx) []historyTxAPI {
func historyTxsToAPI(dbTxs []historydb.HistoryTx) []historyTxAPI {
apiTxs := []historyTxAPI{}
for i := 0; i < len(dbTxs); i++ {
apiTx := historyTxAPI{

+ 1
- 1
batchbuilder/batchbuilder.go

@ -51,7 +51,7 @@ func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) e
}
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []*common.L1Tx, pooll2txs []*common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
zkInputs, _, err := bb.localStateDB.ProcessTxs(l1usertxs, l1coordinatortxs, pooll2txs)
if err != nil {
return nil, err

+ 30
- 14
common/l1tx.go

@ -81,24 +81,40 @@ func NewL1Tx(l1Tx *L1Tx) (*L1Tx, error) {
}
l1Tx.Type = txType
var txid [TxIDLen]byte
if !l1Tx.UserOrigin {
txid[0] = TxIDPrefixL1CoordTx
}
var toForgeL1TxsNumBytes [8]byte
var toForge uint64 = 0
if l1Tx.ToForgeL1TxsNum != nil {
toForge = uint64(*l1Tx.ToForgeL1TxsNum)
txID, err := l1Tx.CalcTxID()
if err != nil {
return nil, err
}
binary.BigEndian.PutUint64(toForgeL1TxsNumBytes[:], toForge)
copy(txid[1:9], toForgeL1TxsNumBytes[:])
l1Tx.TxID = *txID
return l1Tx, nil
}
// CalcTxID calculates the TxId of the L1Tx
func (tx *L1Tx) CalcTxID() (*TxID, error) {
var txID TxID
if tx.UserOrigin {
if tx.ToForgeL1TxsNum == nil {
return nil, fmt.Errorf("L1Tx.UserOrigin == true && L1Tx.ToForgeL1TxsNum == nil")
}
txID[0] = TxIDPrefixL1UserTx
var toForgeL1TxsNumBytes [8]byte
binary.BigEndian.PutUint64(toForgeL1TxsNumBytes[:], uint64(*tx.ToForgeL1TxsNum))
copy(txID[1:9], toForgeL1TxsNumBytes[:])
} else {
if tx.BatchNum == nil {
return nil, fmt.Errorf("L1Tx.UserOrigin == false && L1Tx.BatchNum == nil")
}
txID[0] = TxIDPrefixL1CoordTx
var batchNumBytes [8]byte
binary.BigEndian.PutUint64(batchNumBytes[:], uint64(*tx.BatchNum))
copy(txID[1:9], batchNumBytes[:])
}
var positionBytes [2]byte
binary.BigEndian.PutUint16(positionBytes[:], uint16(l1Tx.Position))
copy(txid[9:11], positionBytes[:])
l1Tx.TxID = TxID(txid)
binary.BigEndian.PutUint16(positionBytes[:], uint16(tx.Position))
copy(txID[9:11], positionBytes[:])
return l1Tx, nil
return &txID, nil
}
// Tx returns a *Tx from the L1Tx

+ 25
- 8
common/l1tx_test.go

@ -15,23 +15,40 @@ import (
"github.com/stretchr/testify/require"
)
func TestNewL1Tx(t *testing.T) {
toForge := new(int64)
*toForge = 123456
fromIdx := new(Idx)
*fromIdx = 300
func TestNewL1UserTx(t *testing.T) {
toForge := int64(123456)
fromIdx := Idx(300)
l1Tx := &L1Tx{
ToForgeL1TxsNum: toForge,
ToForgeL1TxsNum: &toForge,
Position: 71,
UserOrigin: true,
ToIdx: 301,
TokenID: 5,
Amount: big.NewInt(1),
LoadAmount: big.NewInt(2),
FromIdx: fromIdx,
FromIdx: &fromIdx,
}
l1Tx, err := NewL1Tx(l1Tx)
assert.Nil(t, err)
assert.Equal(t, "0x00000000000001e240004700", l1Tx.TxID.String())
}
func TestNewL1CoordinatorTx(t *testing.T) {
fromIdx := Idx(300)
batchNum := BatchNum(51966)
l1Tx := &L1Tx{
Position: 88,
UserOrigin: false,
ToIdx: 301,
TokenID: 5,
Amount: big.NewInt(1),
LoadAmount: big.NewInt(2),
FromIdx: &fromIdx,
BatchNum: &batchNum,
}
l1Tx, err := NewL1Tx(l1Tx)
assert.Nil(t, err)
assert.Equal(t, "0x01000000000001e240004700", l1Tx.TxID.String())
assert.Equal(t, "0x01000000000000cafe005800", l1Tx.TxID.String())
}
func TestL1TxByteParsers(t *testing.T) {

+ 3
- 3
common/l2tx.go

@ -111,10 +111,10 @@ func (tx *L2Tx) PoolL2Tx() *PoolL2Tx {
// L2TxsToPoolL2Txs returns an array of []*PoolL2Tx from an array of []*L2Tx,
// where the PoolL2Tx only have the parameters of a L2Tx filled.
func L2TxsToPoolL2Txs(txs []*L2Tx) []*PoolL2Tx {
var r []*PoolL2Tx
func L2TxsToPoolL2Txs(txs []*L2Tx) []PoolL2Tx {
var r []PoolL2Tx
for _, tx := range txs {
r = append(r, tx.PoolL2Tx())
r = append(r, *tx.PoolL2Tx())
}
return r
}

+ 4
- 4
common/pooll2tx.go

@ -250,15 +250,15 @@ func (tx *PoolL2Tx) Tx() (*Tx, error) {
}, nil
}
// PoolL2TxsToL2Txs returns an array of []*L2Tx from an array of []*PoolL2Tx
func PoolL2TxsToL2Txs(txs []*PoolL2Tx) ([]*L2Tx, error) {
var r []*L2Tx
// PoolL2TxsToL2Txs returns an array of []L2Tx from an array of []PoolL2Tx
func PoolL2TxsToL2Txs(txs []PoolL2Tx) ([]L2Tx, error) {
var r []L2Tx
for _, poolTx := range txs {
tx, err := poolTx.L2Tx()
if err != nil {
return nil, err
}
r = append(r, tx)
r = append(r, *tx)
}
return r, nil
}

+ 2
- 2
common/scvars.go

@ -31,8 +31,8 @@ type AuctionVars struct {
AllocationRatio AllocationRatio
}
// WithdrawalDelayerVars contains the Withdrawal Delayer smart contract variables
type WithdrawalDelayerVars struct {
// WithdrawDelayerVars contains the Withdrawal Delayer smart contract variables
type WithdrawDelayerVars struct {
HermezRollupAddress eth.Address
HermezGovernanceDAOAddress eth.Address
WhiteHackGroupAddress eth.Address

+ 5
- 0
common/tx.go

@ -12,6 +12,11 @@ import (
)
const (
// TXIDPrefixL1UserTx is the prefix that determines that the TxID is
// for a L1UserTx
//nolinter:gomnd
TxIDPrefixL1UserTx = byte(0)
// TXIDPrefixL1CoordTx is the prefix that determines that the TxID is
// for a L1CoordinatorTx
//nolinter:gomnd

+ 4
- 4
coordinator/batch.go

@ -15,9 +15,9 @@ type BatchInfo struct {
serverProof ServerProofInterface
zkInputs *common.ZKInputs
proof *Proof
L1UserTxsExtra []*common.L1Tx
L1OperatorTxs []*common.L1Tx
L2Txs []*common.PoolL2Tx
L1UserTxsExtra []common.L1Tx
L1OperatorTxs []common.L1Tx
L2Txs []common.PoolL2Tx
// FeesInfo
ethTx *types.Transaction
}
@ -33,7 +33,7 @@ func NewBatchInfo(batchNum common.BatchNum, serverProof ServerProofInterface) Ba
// SetTxsInfo sets the l1UserTxs, l1OperatorTxs and l2Txs to the BatchInfo data
// structure
func (bi *BatchInfo) SetTxsInfo(l1UserTxsExtra, l1OperatorTxs []*common.L1Tx, l2Txs []*common.PoolL2Tx) {
func (bi *BatchInfo) SetTxsInfo(l1UserTxsExtra, l1OperatorTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// TBD parameter: feesInfo
bi.L1UserTxsExtra = l1UserTxsExtra
bi.L1OperatorTxs = l1OperatorTxs

+ 4
- 4
coordinator/coordinator.go

@ -206,14 +206,14 @@ func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error
c.batchNum = c.batchNum + 1
batchInfo := NewBatchInfo(c.batchNum, serverProof) // to accumulate metadata of the batch
var poolL2Txs []*common.PoolL2Tx
var poolL2Txs []common.PoolL2Tx
// var feesInfo
var l1UserTxsExtra, l1OperatorTxs []*common.L1Tx
var l1UserTxsExtra, l1OperatorTxs []common.L1Tx
// 1. Decide if we forge L2Tx or L1+L2Tx
if c.shouldL1L2Batch() {
// 2a: L1+L2 txs
// l1UserTxs, toForgeL1TxsNumber := c.hdb.GetNextL1UserTxs() // TODO once HistoryDB is ready, uncomment
var l1UserTxs []*common.L1Tx = nil // tmp, depends on HistoryDB
var l1UserTxs []common.L1Tx = nil // tmp, depends on HistoryDB
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
if err != nil {
return nil, err
@ -358,7 +358,7 @@ func (c *Coordinator) purgeRemoveByTimeout() error {
return nil // TODO
}
func (c *Coordinator) purgeInvalidDueToL2TxsSelection(l2Txs []*common.PoolL2Tx) error {
func (c *Coordinator) purgeInvalidDueToL2TxsSelection(l2Txs []common.PoolL2Tx) error {
return nil // TODO
}

+ 53
- 35
db/historydb/historydb.go

@ -8,6 +8,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx"
@ -25,7 +26,7 @@ type HistoryDB struct {
// BlockData contains the information of a Block
type BlockData struct {
block *common.Block
Block *common.Block
// Rollup
// L1UserTxs that were submitted in the block
L1UserTxs []common.L1Tx
@ -33,10 +34,10 @@ type BlockData struct {
RegisteredTokens []common.Token
RollupVars *common.RollupVars
// Auction
Bids []common.Bid
Coordinators []common.Coordinator
AuctionVars *common.AuctionVars
// WithdrawalDelayer
Bids []common.Bid
Coordinators []common.Coordinator
AuctionVars *common.AuctionVars
WithdrawDelayerVars *common.WithdrawDelayerVars
// TODO: enable when common.WithdrawalDelayerVars is Merged from Synchronizer PR
// WithdrawalDelayerVars *common.WithdrawalDelayerVars
}
@ -53,6 +54,19 @@ type BatchData struct {
Batch *common.Batch
}
// NewBatchData creates an empty BatchData with the slices initialized.
func NewBatchData() *BatchData {
return &BatchData{
L1Batch: false,
L1UserTxs: make([]common.L1Tx, 0),
L1CoordinatorTxs: make([]common.L1Tx, 0),
L2Txs: make([]common.L2Tx, 0),
CreatedAccounts: make([]common.Account, 0),
ExitTree: make([]common.ExitInfo, 0),
Batch: &common.Batch{},
}
}
// NewHistoryDB initialize the DB
func NewHistoryDB(db *sqlx.DB) *HistoryDB {
return &HistoryDB{db: db}
@ -92,14 +106,14 @@ func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
}
// GetBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to
func (hdb *HistoryDB) GetBlocks(from, to int64) ([]*common.Block, error) {
func (hdb *HistoryDB) GetBlocks(from, to int64) ([]common.Block, error) {
var blocks []*common.Block
err := meddler.QueryAll(
hdb.db, &blocks,
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2;",
from, to,
)
return blocks, err
return db.SlicePtrsToSlice(blocks).([]common.Block), err
}
// GetLastBlock retrieve the block with the highest block number from the DB
@ -141,14 +155,14 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
}
// GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]*common.Batch, error) {
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
var batches []*common.Batch
err := meddler.QueryAll(
hdb.db, &batches,
"SELECT * FROM batch WHERE $1 <= batch_num AND batch_num < $2;",
from, to,
)
return batches, err
return db.SlicePtrsToSlice(batches).([]common.Batch), err
}
// GetLastBatchNum returns the BatchNum of the latest forged batch
@ -201,13 +215,13 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
}
// GetBids return the bids
func (hdb *HistoryDB) GetBids() ([]*common.Bid, error) {
func (hdb *HistoryDB) GetBids() ([]common.Bid, error) {
var bids []*common.Bid
err := meddler.QueryAll(
hdb.db, &bids,
"SELECT * FROM bid;",
)
return bids, err
return db.SlicePtrsToSlice(bids).([]common.Bid), err
}
// AddCoordinators insert Coordinators into the DB
@ -269,13 +283,13 @@ func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error
}
// GetTokens returns a list of tokens from the DB
func (hdb *HistoryDB) GetTokens() ([]*common.Token, error) {
func (hdb *HistoryDB) GetTokens() ([]common.Token, error) {
var tokens []*common.Token
err := meddler.QueryAll(
hdb.db, &tokens,
"SELECT * FROM token ORDER BY token_id;",
)
return tokens, err
return db.SlicePtrsToSlice(tokens).([]common.Token), err
}
// GetTokenSymbols returns all the token symbols from the DB
@ -315,13 +329,13 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
}
// GetAccounts returns a list of accounts from the DB
func (hdb *HistoryDB) GetAccounts() ([]*common.Account, error) {
func (hdb *HistoryDB) GetAccounts() ([]common.Account, error) {
var accs []*common.Account
err := meddler.QueryAll(
hdb.db, &accs,
"SELECT * FROM account ORDER BY idx;",
)
return accs, err
return db.SlicePtrsToSlice(accs).([]common.Account), err
}
// AddL1Txs inserts L1 txs to the DB. USD and LoadAmountUSD will be set automatically before storing the tx.
@ -384,14 +398,14 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []common.Tx) error {
}
// GetTxs returns a list of txs from the DB
func (hdb *HistoryDB) GetTxs() ([]*common.Tx, error) {
func (hdb *HistoryDB) GetTxs() ([]common.Tx, error) {
var txs []*common.Tx
err := meddler.QueryAll(
hdb.db, &txs,
`SELECT * FROM tx
ORDER BY (batch_num, position) ASC`,
)
return txs, err
return db.SlicePtrsToSlice(txs).([]common.Tx), err
}
// GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct
@ -399,7 +413,7 @@ func (hdb *HistoryDB) GetHistoryTxs(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKey,
tokenID, idx, batchNum *uint, txType *common.TxType,
offset, limit *uint, last bool,
) ([]*HistoryTx, int, error) {
) ([]HistoryTx, int, error) {
if ethAddr != nil && bjj != nil {
return nil, 0, errors.New("ethAddr and bjj are incompatible")
}
@ -481,14 +495,15 @@ func (hdb *HistoryDB) GetHistoryTxs(
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr)
// log.Debug(query)
txs := []*HistoryTx{}
if err := meddler.QueryAll(hdb.db, &txs, query, args...); err != nil {
txsPtrs := []*HistoryTx{}
if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
return nil, 0, err
}
txs := db.SlicePtrsToSlice(txsPtrs).([]HistoryTx)
if len(txs) == 0 {
return nil, 0, sql.ErrNoRows
} else if last {
tmp := []*HistoryTx{}
tmp := []HistoryTx{}
for i := len(txs) - 1; i >= 0; i-- {
tmp = append(tmp, txs[i])
}
@ -536,43 +551,46 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *BlockData) (err error) {
}
defer func() {
if err != nil {
err = txn.Rollback()
errRollback := txn.Rollback()
if errRollback != nil {
log.Errorw("Rollback", "err", errRollback)
}
}
}()
// Add block
err = hdb.addBlock(txn, blockData.block)
err = hdb.addBlock(txn, blockData.Block)
if err != nil {
return err
}
// Add l1 Txs
if len(blockData.L1UserTxs) > 0 {
err = hdb.addL1Txs(txn, blockData.L1UserTxs)
// Add Coordinators
if len(blockData.Coordinators) > 0 {
err = hdb.addCoordinators(txn, blockData.Coordinators)
if err != nil {
return err
}
}
// Add Tokens
if len(blockData.RegisteredTokens) > 0 {
err = hdb.addTokens(txn, blockData.RegisteredTokens)
// Add Bids
if len(blockData.Bids) > 0 {
err = hdb.addBids(txn, blockData.Bids)
if err != nil {
return err
}
}
// Add Bids
if len(blockData.Bids) > 0 {
err = hdb.addBids(txn, blockData.Bids)
// Add Tokens
if len(blockData.RegisteredTokens) > 0 {
err = hdb.addTokens(txn, blockData.RegisteredTokens)
if err != nil {
return err
}
}
// Add Coordinators
if len(blockData.Coordinators) > 0 {
err = hdb.addCoordinators(txn, blockData.Coordinators)
// Add l1 Txs
if len(blockData.L1UserTxs) > 0 {
err = hdb.addL1Txs(txn, blockData.L1UserTxs)
if err != nil {
return err
}

+ 5
- 5
db/historydb/historydb_test.go

@ -60,8 +60,8 @@ func TestBlocks(t *testing.T) {
assert.Equal(t, len(blocks), len(fetchedBlocks))
// Compare generated vs getted blocks
assert.NoError(t, err)
for i, fetchedBlock := range fetchedBlocks {
assertEqualBlock(t, &blocks[i], fetchedBlock)
for i := range fetchedBlocks {
assertEqualBlock(t, &blocks[i], &fetchedBlocks[i])
}
// Get blocks from the DB one by one
for i := fromBlock; i < toBlock; i++ {
@ -100,7 +100,7 @@ func TestBatches(t *testing.T) {
fetchedBatches, err := historyDB.GetBatches(0, common.BatchNum(nBatches))
assert.NoError(t, err)
for i, fetchedBatch := range fetchedBatches {
assert.Equal(t, batches[i], *fetchedBatch)
assert.Equal(t, batches[i], fetchedBatch)
}
// Test GetLastBatchNum
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
@ -132,7 +132,7 @@ func TestBids(t *testing.T) {
assert.NoError(t, err)
// Compare fetched bids vs generated bids
for i, bid := range fetchedBids {
assert.Equal(t, bids[i], *bid)
assert.Equal(t, bids[i], bid)
}
}
@ -191,7 +191,7 @@ func TestAccounts(t *testing.T) {
assert.NoError(t, err)
// Compare fetched accounts vs generated accounts
for i, acc := range fetchedAccs {
assert.Equal(t, accs[i], *acc)
assert.Equal(t, accs[i], acc)
}
}

+ 12
- 4
db/l2db/l2db.go

@ -7,6 +7,8 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx"
@ -133,14 +135,14 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
}
// GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee
func (l2db *L2DB) GetPendingTxs() ([]*common.PoolL2Tx, error) {
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx
err := meddler.QueryAll(
l2db.db, &txs,
selectPoolTx+"WHERE state = $1 AND token.usd IS NOT NULL",
common.PoolL2TxStatePending,
)
return txs, err
return db.SlicePtrsToSlice(txs).([]common.PoolL2Tx), err
}
// StartForging updates the state of the transactions that will begin the forging process.
@ -212,7 +214,10 @@ func (l2db *L2DB) CheckNonces(updatedAccounts []common.Account, batchNum common.
defer func() {
// Rollback the transaction if there was an error.
if err != nil {
err = txn.Rollback()
errRollback := txn.Rollback()
if errRollback != nil {
log.Errorw("Rollback", "err", errRollback)
}
}
}()
for i := 0; i < len(updatedAccounts); i++ {
@ -257,7 +262,10 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
defer func() {
// Rollback the transaction if there was an error.
if err != nil {
err = txn.Rollback()
errRollback := txn.Rollback()
if errRollback != nil {
log.Errorw("Rollback", "err", errRollback)
}
}
}()
// Delete pending txs that have been in the pool after the TTL if maxTxs is reached

+ 2
- 2
db/l2db/l2db_test.go

@ -111,8 +111,8 @@ func TestGetPending(t *testing.T) {
fetchedTxs, err := l2DB.GetPendingTxs()
assert.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
for i, fetchedTx := range fetchedTxs {
assertTx(t, pendingTxs[i], fetchedTx)
for i := range fetchedTxs {
assertTx(t, pendingTxs[i], &fetchedTxs[i])
}
}

+ 8
- 5
db/statedb/txprocessors.go

@ -36,7 +36,7 @@ type processedExit struct {
// type==TypeSynchronizer, assumes that the call is done from the Synchronizer,
// returns common.ExitTreeLeaf that is later used by the Synchronizer to update
// the HistoryDB, and adds Nonce & TokenID to the L2Txs.
func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.PoolL2Tx) (*common.ZKInputs, []common.ExitInfo, error) {
func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (*common.ZKInputs, []common.ExitInfo, error) {
var err error
var exitTree *merkletree.MerkleTree
@ -68,7 +68,8 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
}
// assumption: l1usertx are sorted by L1Tx.Position
for _, tx := range l1usertxs {
for i := range l1usertxs {
tx := &l1usertxs[i]
exitIdx, exitAccount, newExit, err := s.processL1Tx(exitTree, tx)
if err != nil {
return nil, nil, err
@ -85,7 +86,8 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
s.i++
}
}
for _, tx := range l1coordinatortxs {
for i := range l1coordinatortxs {
tx := &l1coordinatortxs[i]
exitIdx, exitAccount, newExit, err := s.processL1Tx(exitTree, tx)
if err != nil {
return nil, nil, err
@ -105,7 +107,8 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
s.i++
}
}
for _, tx := range l2txs {
for i := range l2txs {
tx := &l2txs[i]
exitIdx, exitAccount, newExit, err := s.processL2Tx(exitTree, tx)
if err != nil {
return nil, nil, err
@ -194,7 +197,7 @@ func (s *StateDB) ProcessTxs(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs [
}
// getTokenIDsBigInt returns the list of TokenIDs in *big.Int format
func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.PoolL2Tx) ([]*big.Int, error) {
func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) ([]*big.Int, error) {
tokenIDs := make(map[common.TokenID]bool)
for i := 0; i < len(l1usertxs); i++ {
tokenIDs[l1usertxs[i].TokenID] = true

+ 29
- 2
db/utils.go

@ -40,7 +40,7 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
if err != nil {
return nil, err
}
log.Info("successfully runt ", nMigrations, " migrations")
log.Info("successfully ran ", nMigrations, " migrations")
return db, nil
}
@ -52,7 +52,10 @@ func initMeddler() {
// BulkInsert performs a bulk insert with a single statement into the specified table. Example:
// `db.BulkInsert(myDB, "INSERT INTO block (eth_block_num, timestamp, hash) VALUES %s", blocks[:])`
// Note that all the columns must be specified in the query, and they must be in the same order as in the table.
// Note that all the columns must be specified in the query, and they must be
// in the same order as in the table.
// Note that the fields in the structs need to be defined in the same order as
// in the table columns.
func BulkInsert(db meddler.DB, q string, args interface{}) error {
arrayValue := reflect.ValueOf(args)
arrayLen := arrayValue.Len()
@ -150,3 +153,27 @@ func (b BigIntNullMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}
}
return base64.StdEncoding.EncodeToString(field.Bytes()), nil
}
// SliceToSlicePtrs converts any []Foo to []*Foo
func SliceToSlicePtrs(slice interface{}) interface{} {
v := reflect.ValueOf(slice)
vLen := v.Len()
typ := v.Type().Elem()
res := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(typ)), vLen, vLen)
for i := 0; i < vLen; i++ {
res.Index(i).Set(v.Index(i).Addr())
}
return res.Interface()
}
// SlicePtrsToSlice converts any []*Foo to []Foo
func SlicePtrsToSlice(slice interface{}) interface{} {
v := reflect.ValueOf(slice)
vLen := v.Len()
typ := v.Type().Elem().Elem()
res := reflect.MakeSlice(reflect.SliceOf(typ), vLen, vLen)
for i := 0; i < vLen; i++ {
res.Index(i).Set(v.Index(i).Elem())
}
return res.Interface()
}

+ 35
- 0
db/utils_test.go

@ -0,0 +1,35 @@
package db
import (
"testing"
"github.com/stretchr/testify/assert"
)
type foo struct {
V int
}
func TestSliceToSlicePtrs(t *testing.T) {
n := 16
a := make([]foo, n)
for i := 0; i < n; i++ {
a[i] = foo{V: i}
}
b := SliceToSlicePtrs(a).([]*foo)
for i := 0; i < len(a); i++ {
assert.Equal(t, a[i], *b[i])
}
}
func TestSlicePtrsToSlice(t *testing.T) {
n := 16
a := make([]*foo, n)
for i := 0; i < n; i++ {
a[i] = &foo{V: i}
}
b := SlicePtrsToSlice(a).([]foo)
for i := 0; i < len(a); i++ {
assert.Equal(t, *a[i], b[i])
}
}

+ 2
- 0
go.mod

@ -27,3 +27,5 @@ require (
golang.org/x/tools/gopls v0.5.0 // indirect
gopkg.in/go-playground/validator.v9 v9.29.1
)
// replace github.com/russross/meddler => /home/dev/git/iden3/hermez/meddler

+ 6
- 2
node/node.go

@ -1,6 +1,7 @@
package node
import (
"context"
"time"
"github.com/ethereum/go-ethereum/ethclient"
@ -83,7 +84,10 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
}
client := eth.NewClient(ethClient, nil, nil, nil)
sync := synchronizer.NewSynchronizer(client, historyDB, stateDB)
sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB)
if err != nil {
return nil, err
}
var coord *coordinator.Coordinator
if mode == ModeCoordinator {
@ -223,7 +227,7 @@ func (n *Node) StartSynchronizer() {
log.Info("Coordinator stopped")
return
case <-time.After(n.cfg.Synchronizer.SyncLoopInterval.Duration):
if err := n.sync.Sync(); err != nil {
if err := n.sync.Sync(context.TODO()); err != nil {
log.Errorw("Synchronizer.Sync", "error", err)
}
}

+ 128
- 111
synchronizer/synchronizer.go

@ -20,93 +20,102 @@ var (
// rollupData contains information returned by the Rollup SC
type rollupData struct {
l1Txs []*common.L1Tx
batches []*BatchData
l1UserTxs []common.L1Tx
batches []historydb.BatchData
// withdrawals []*common.ExitInfo
registeredTokens []*common.Token
rollupVars *common.RollupVars
registeredTokens []common.Token
vars *common.RollupVars
}
// NewRollupData creates an empty rollupData with the slices initialized.
func newRollupData() rollupData {
return rollupData{
l1Txs: make([]*common.L1Tx, 0),
batches: make([]*BatchData, 0),
l1UserTxs: make([]common.L1Tx, 0),
batches: make([]historydb.BatchData, 0),
// withdrawals: make([]*common.ExitInfo, 0),
registeredTokens: make([]*common.Token, 0),
registeredTokens: make([]common.Token, 0),
}
}
// auctionData contains information returned by the Action SC
type auctionData struct {
bids []*common.Bid
coordinators []*common.Coordinator
auctionVars *common.AuctionVars
bids []common.Bid
coordinators []common.Coordinator
vars *common.AuctionVars
}
// newAuctionData creates an empty auctionData with the slices initialized.
func newAuctionData() *auctionData {
return &auctionData{
bids: make([]*common.Bid, 0),
coordinators: make([]*common.Coordinator, 0),
bids: make([]common.Bid, 0),
coordinators: make([]common.Coordinator, 0),
}
}
// BatchData contains information about Batches from the contracts
type BatchData struct {
l1UserTxs []*common.L1Tx
l1CoordinatorTxs []*common.L1Tx
l2Txs []*common.L2Tx
createdAccounts []*common.Account
exitTree []common.ExitInfo
batch *common.Batch
type wdelayerData struct {
vars *common.WithdrawDelayerVars
}
// BatchData contains information about Batches from the contracts
// type BatchData struct {
// l1UserTxs []*common.L1Tx
// l1CoordinatorTxs []*common.L1Tx
// l2Txs []*common.L2Tx
// createdAccounts []*common.Account
// exitTree []*common.ExitInfo
// batch *common.Batch
// }
// NewBatchData creates an empty BatchData with the slices initialized.
func NewBatchData() *BatchData {
return &BatchData{
l1UserTxs: make([]*common.L1Tx, 0),
l1CoordinatorTxs: make([]*common.L1Tx, 0),
l2Txs: make([]*common.L2Tx, 0),
createdAccounts: make([]*common.Account, 0),
exitTree: make([]common.ExitInfo, 0),
}
}
// func NewBatchData() *BatchData {
// return &BatchData{
// l1UserTxs: make([]*common.L1Tx, 0),
// l1CoordinatorTxs: make([]*common.L1Tx, 0),
// l2Txs: make([]*common.L2Tx, 0),
// createdAccounts: make([]*common.Account, 0),
// exitTree: make([]*common.ExitInfo, 0),
// }
// }
// BlockData contains information about Blocks from the contracts
type BlockData struct {
block *common.Block
// Rollup
l1Txs []*common.L1Tx // TODO: Answer: User? Coordinator? Both?
batches []*BatchData // TODO: Also contains L1Txs!
// withdrawals []*common.ExitInfo // TODO
registeredTokens []*common.Token
rollupVars *common.RollupVars
// Auction
bids []*common.Bid
coordinators []*common.Coordinator
auctionVars *common.AuctionVars
// WithdrawalDelayer
withdrawalDelayerVars *common.WithdrawalDelayerVars
}
// type blockData struct {
// Block *common.Block
// // Rollup
// L1Txs []*common.L1Tx // TODO: Answer: User? Coordinator? Both?
// Batches []*BatchData // TODO: Also contains L1Txs!
// // withdrawals []*common.ExitInfo // TODO
// RegisteredTokens []common.Token
// RollupVars *common.RollupVars
// // Auction
// Bids []*common.Bid
// Coordinators []*common.Coordinator
// AuctionVars *common.AuctionVars
// // WithdrawalDelayer
// WithdrawalDelayerVars *common.WithdrawalDelayerVars
// }
// Synchronizer implements the Synchronizer type
type Synchronizer struct {
ethClient *eth.Client
historyDB *historydb.HistoryDB
stateDB *statedb.StateDB
firstSavedBlock *common.Block
mux sync.Mutex
ethClient eth.ClientInterface
auctionConstants eth.AuctionConstants
historyDB *historydb.HistoryDB
stateDB *statedb.StateDB
firstSavedBlock *common.Block
mux sync.Mutex
}
// NewSynchronizer creates a new Synchronizer
func NewSynchronizer(ethClient *eth.Client, historyDB *historydb.HistoryDB, stateDB *statedb.StateDB) *Synchronizer {
s := &Synchronizer{
ethClient: ethClient,
historyDB: historyDB,
stateDB: stateDB,
func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.HistoryDB, stateDB *statedb.StateDB) (*Synchronizer, error) {
auctionConstants, err := ethClient.AuctionConstants()
if err != nil {
return nil, err
}
return s
return &Synchronizer{
ethClient: ethClient,
auctionConstants: *auctionConstants,
historyDB: historyDB,
stateDB: stateDB,
}, nil
}
// TODO: Be smart about locking: only lock during the read/write operations
@ -116,7 +125,7 @@ func NewSynchronizer(ethClient *eth.Client, historyDB *historydb.HistoryDB, stat
// TODO: Add argument: maximum number of blocks to process
// TODO: Check reorgs in the middle of syncing a block. Probably make
// rollupSync, auctionSync and withdrawalSync return the block hash.
func (s *Synchronizer) Sync() error {
func (s *Synchronizer) Sync(ctx context.Context) error {
// Avoid new sync while performing one
s.mux.Lock()
defer s.mux.Unlock()
@ -130,11 +139,10 @@ func (s *Synchronizer) Sync() error {
}
// If we don't have any stored block, we must do a full sync starting from the rollup genesis block
if err == sql.ErrNoRows {
// TODO: Query rollup constants and genesis information, store them
nextBlockNum = 1234 // TODO: Replace this with genesisBlockNum
nextBlockNum = s.auctionConstants.GenesisBlockNum
} else {
// Get the latest block we have in History DB from blockchain to detect a reorg
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), lastSavedBlock.EthBlockNum)
ethBlock, err := s.ethClient.EthBlockByNumber(ctx, lastSavedBlock.EthBlockNum)
if err != nil {
return err
}
@ -165,7 +173,7 @@ func (s *Synchronizer) Sync() error {
log.Debugf("Blocks to sync: %v (firstBlockToSync: %v, latestBlock: %v)", latestBlockNum-nextBlockNum+1, nextBlockNum, latestBlockNum)
for nextBlockNum < latestBlockNum {
for nextBlockNum <= latestBlockNum {
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), nextBlockNum)
if err != nil {
return err
@ -195,34 +203,40 @@ func (s *Synchronizer) Sync() error {
}
// Group all the block data into the structs to save into HistoryDB
var blockData BlockData
var blockData historydb.BlockData
blockData.block = ethBlock
blockData.Block = ethBlock
if rollupData != nil {
blockData.l1Txs = rollupData.l1Txs
blockData.batches = rollupData.batches
blockData.L1UserTxs = rollupData.l1UserTxs
blockData.Batches = rollupData.batches
// blockData.withdrawals = rollupData.withdrawals // TODO
blockData.registeredTokens = rollupData.registeredTokens
blockData.rollupVars = rollupData.rollupVars
blockData.RegisteredTokens = rollupData.registeredTokens
blockData.RollupVars = rollupData.vars
}
if auctionData != nil {
blockData.bids = auctionData.bids
blockData.coordinators = auctionData.coordinators
blockData.auctionVars = auctionData.auctionVars
blockData.Bids = auctionData.bids
blockData.Coordinators = auctionData.coordinators
blockData.AuctionVars = auctionData.vars
}
if wdelayerData != nil {
blockData.withdrawalDelayerVars = wdelayerData
blockData.WithdrawDelayerVars = wdelayerData.vars
}
// Add rollupData and auctionData once the method is updated
// TODO: Save Whole Struct -> AddBlockSCData(blockData)
err = s.historyDB.AddBlock(blockData.block)
log.Debugw("Sync()", "block", blockData)
// err = s.historyDB.AddBlock(blockData.Block)
// if err != nil {
// return err
// }
err = s.historyDB.AddBlockSCData(&blockData)
if err != nil {
return err
}
nextBlockNum++
}
return nil
@ -358,18 +372,18 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
// }
// Get L1UserTX
rollupData.l1Txs, err = getL1UserTx(rollupEvents.L1UserTx, blockNum)
rollupData.l1UserTxs, err = getL1UserTx(rollupEvents.L1UserTx, blockNum)
if err != nil {
return nil, err
}
// Get ForgeBatch events to get the L1CoordinatorTxs
for _, fbEvent := range rollupEvents.ForgeBatch {
batchData := NewBatchData()
for _, evtForgeBatch := range rollupEvents.ForgeBatch {
batchData := historydb.NewBatchData()
position := 0
// Get the input for each Tx
forgeBatchArgs, err := s.ethClient.RollupForgeBatchArgs(fbEvent.EthTxHash)
forgeBatchArgs, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash)
if err != nil {
return nil, err
}
@ -399,14 +413,14 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
l1CoordinatorTx.UserOrigin = false
l1CoordinatorTx.EthBlockNum = blockNum
bn := new(common.BatchNum)
*bn = common.BatchNum(fbEvent.BatchNum)
*bn = common.BatchNum(evtForgeBatch.BatchNum)
l1CoordinatorTx.BatchNum = bn
l1CoordinatorTx, err = common.NewL1Tx(l1CoordinatorTx)
l1Tx, err := common.NewL1Tx(l1CoordinatorTx)
if err != nil {
return nil, err
}
batchData.l1CoordinatorTxs = append(batchData.l1CoordinatorTxs, l1CoordinatorTx)
batchData.L1CoordinatorTxs = append(batchData.L1CoordinatorTxs, *l1Tx)
// Check if we have to register an account
// if l1CoordinatorTx.FromIdx == 0 {
@ -435,7 +449,7 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
// TODO: Get createdAccounts from ProcessTxs()
// TODO: Get CollectedFees from ProcessTxs()
// TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs()
_, exitInfo, err := s.stateDB.ProcessTxs(batchData.l1UserTxs, batchData.l1CoordinatorTxs, poolL2Txs)
_, exitInfo, err := s.stateDB.ProcessTxs(batchData.L1UserTxs, batchData.L1CoordinatorTxs, poolL2Txs)
if err != nil {
return nil, err
}
@ -444,13 +458,13 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
if err != nil {
return nil, err
}
batchData.l2Txs = append(batchData.l2Txs, l2Txs...)
batchData.L2Txs = append(batchData.L2Txs, l2Txs...)
batchData.exitTree = exitInfo
batchData.ExitTree = exitInfo
// Get Batch information
batch := &common.Batch{
BatchNum: common.BatchNum(fbEvent.BatchNum),
BatchNum: common.BatchNum(evtForgeBatch.BatchNum),
EthBlockNum: blockNum,
// ForgerAddr: , TODO: Get it from ethClient -> Add ForgerAddr to RollupEventForgeBatch
// CollectedFees: , TODO: Clarify where to get them if they are still needed
@ -460,19 +474,23 @@ func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
ForgeL1TxsNum: &forgeL1TxsNum,
// SlotNum: TODO: Calculate once ethClient provides the info // calculate from blockNum + ethClient Constants
}
batchData.batch = batch
rollupData.batches = append(rollupData.batches, batchData)
batchData.Batch = batch
rollupData.batches = append(rollupData.batches, *batchData)
}
// Get Registered Tokens
for _, eAddToken := range rollupEvents.AddToken {
var token *common.Token
for _, evtAddToken := range rollupEvents.AddToken {
var token common.Token
token.TokenID = common.TokenID(eAddToken.TokenID)
token.EthAddr = eAddToken.Address
token.TokenID = common.TokenID(evtAddToken.TokenID)
token.EthAddr = evtAddToken.Address
token.EthBlockNum = blockNum
// TODO: Add external information consulting SC about it using Address
token.Name = "TODO"
token.Symbol = "TODO"
token.Decimals = 8 // TODO
rollupData.registeredTokens = append(rollupData.registeredTokens, token)
}
@ -498,22 +516,22 @@ func (s *Synchronizer) auctionSync(blockNum int64) (*auctionData, error) {
}
// Get bids
for _, eNewBid := range auctionEvents.NewBid {
bid := &common.Bid{
SlotNum: common.SlotNum(eNewBid.Slot),
BidValue: eNewBid.BidAmount,
Bidder: eNewBid.Bidder,
for _, evtNewBid := range auctionEvents.NewBid {
bid := common.Bid{
SlotNum: common.SlotNum(evtNewBid.Slot),
BidValue: evtNewBid.BidAmount,
Bidder: evtNewBid.Bidder,
EthBlockNum: blockNum,
}
auctionData.bids = append(auctionData.bids, bid)
}
// Get Coordinators
for _, eNewCoordinator := range auctionEvents.SetCoordinator {
coordinator := &common.Coordinator{
Bidder: eNewCoordinator.BidderAddress,
Forger: eNewCoordinator.ForgerAddress,
URL: eNewCoordinator.CoordinatorURL,
for _, evtSetCoordinator := range auctionEvents.SetCoordinator {
coordinator := common.Coordinator{
Bidder: evtSetCoordinator.BidderAddress,
Forger: evtSetCoordinator.ForgerAddress,
URL: evtSetCoordinator.CoordinatorURL,
}
auctionData.coordinators = append(auctionData.coordinators, coordinator)
}
@ -537,7 +555,7 @@ func (s *Synchronizer) auctionSync(blockNum int64) (*auctionData, error) {
}
// wdelayerSync gets information from the Withdrawal Delayer Contract
func (s *Synchronizer) wdelayerSync(blockNum int64) (*common.WithdrawalDelayerVars, error) {
func (s *Synchronizer) wdelayerSync(blockNum int64) (*wdelayerData, error) {
// TODO: VARS
// TODO: CONSTANTS
@ -559,24 +577,23 @@ func (s *Synchronizer) wdelayerSync(blockNum int64) (*common.WithdrawalDelayerVa
// return forgeBatchArgs.NewLastIdx + 1, nil
// }
func getL1UserTx(l1UserTxEvents []eth.RollupEventL1UserTx, blockNum int64) ([]*common.L1Tx, error) {
l1Txs := make([]*common.L1Tx, 0)
func getL1UserTx(eventsL1UserTx []eth.RollupEventL1UserTx, blockNum int64) ([]common.L1Tx, error) {
l1Txs := make([]common.L1Tx, 0)
for _, eL1UserTx := range l1UserTxEvents {
for _, evtL1UserTx := range eventsL1UserTx {
// Fill aditional Tx fields
toForge := new(int64)
*toForge = eL1UserTx.ToForgeL1TxsNum
eL1UserTx.L1Tx.ToForgeL1TxsNum = toForge
eL1UserTx.L1Tx.Position = eL1UserTx.Position
eL1UserTx.L1Tx.UserOrigin = true
eL1UserTx.L1Tx.EthBlockNum = blockNum
nL1Tx, err := common.NewL1Tx(&eL1UserTx.L1Tx)
toForge := evtL1UserTx.ToForgeL1TxsNum
evtL1UserTx.L1Tx.ToForgeL1TxsNum = &toForge
evtL1UserTx.L1Tx.Position = evtL1UserTx.Position
evtL1UserTx.L1Tx.UserOrigin = true
evtL1UserTx.L1Tx.EthBlockNum = blockNum
nL1Tx, err := common.NewL1Tx(&evtL1UserTx.L1Tx)
if err != nil {
return nil, err
}
eL1UserTx.L1Tx = *nL1Tx
evtL1UserTx.L1Tx = *nL1Tx
l1Txs = append(l1Txs, &eL1UserTx.L1Tx)
l1Txs = append(l1Txs, evtL1UserTx.L1Tx)
}
return l1Txs, nil
}

+ 59
- 14
synchronizer/synchronizer_test.go

@ -1,25 +1,37 @@
package synchronizer
import (
"context"
"io/ioutil"
"math/big"
"os"
"testing"
"github.com/ethereum/go-ethereum/ethclient"
ethCommon "github.com/ethereum/go-ethereum/common"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test(t *testing.T) {
type timer struct {
time int64
}
func (t *timer) Time() int64 {
currentTime := t.time
t.time++
return currentTime
}
func TestSync(t *testing.T) {
// Int State DB
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
sdb, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32)
stateDB, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32)
assert.Nil(t, err)
// Init History DB
@ -27,23 +39,56 @@ func Test(t *testing.T) {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.Nil(t, err)
historyDB := historydb.NewHistoryDB(db)
err = historyDB.Reorg(0)
// Clear DB
err = historyDB.Reorg(-1)
assert.Nil(t, err)
// Init eth client
ehtClientDialURL := os.Getenv("ETHCLIENT_DIAL_URL")
ethClient, err := ethclient.Dial(ehtClientDialURL)
var timer timer
clientSetup := test.NewClientSetupExample()
client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
// Create Synchronizer
s, err := NewSynchronizer(client, historyDB, stateDB)
require.Nil(t, err)
client := eth.NewClient(ethClient, nil, nil, nil)
// Test Sync for ethereum genesis block
err = s.Sync(context.Background())
require.Nil(t, err)
// Create Synchronizer
s := NewSynchronizer(client, historyDB, sdb)
require.NotNil(t, s)
blocks, err := s.historyDB.GetBlocks(0, 9999)
require.Nil(t, err)
assert.Equal(t, int64(0), blocks[0].EthBlockNum)
// Test Sync for a block with new Tokens and L1UserTxs
// accounts := test.GenerateKeys(t, []string{"A", "B", "C", "D"})
l1UserTxs, _, _, _ := test.GenerateTestTxsFromSet(t, `
A (1): 10
A (2): 20
B (1): 5
C (1): 8
D (3): 15
> advance batch
`)
require.Greater(t, len(l1UserTxs[0]), 0)
// require.Greater(t, len(tokens), 0)
for i := 1; i <= 3; i++ {
_, err := client.RollupAddToken(ethCommon.BigToAddress(big.NewInt(int64(i * 10000))))
require.Nil(t, err)
}
// Test Sync
// err = s.Sync()
// require.Nil(t, err)
for i := range l1UserTxs[0] {
client.CtlAddL1TxUser(&l1UserTxs[0][i])
}
client.CtlMineBlock()
err = s.Sync(context.Background())
require.Nil(t, err)
getTokens, err := s.historyDB.GetTokens()
require.Nil(t, err)
assert.Equal(t, 3, len(getTokens))
// TODO: Reorg will be properly tested once we have the mock ethClient implemented
/*

+ 10
- 5
test/ethclient.go

@ -329,10 +329,11 @@ func NewClient(l bool, timer Timer, addr *ethCommon.Address, setup *ClientSetup)
blockCurrent := Block{
Rollup: &RollupBlock{
State: eth.RollupState{
StateRoot: big.NewInt(0),
ExitRoots: make([]*big.Int, 0),
ExitNullifierMap: make(map[[256 / 8]byte]bool),
TokenList: make([]ethCommon.Address, 0),
StateRoot: big.NewInt(0),
ExitRoots: make([]*big.Int, 0),
ExitNullifierMap: make(map[[256 / 8]byte]bool),
// TokenID = 0 is ETH. Set first entry in TokenList with 0x0 address for ETH.
TokenList: []ethCommon.Address{{}},
TokenMap: make(map[ethCommon.Address]bool),
MapL1TxQueue: mapL1TxQueue,
LastL1L2Batch: 0,
@ -597,7 +598,11 @@ func (c *Client) CtlAddL1TxUser(l1Tx *common.L1Tx) {
panic("l1Tx.TokenID + 1 > len(r.State.TokenList)")
}
queue.L1TxQueue = append(queue.L1TxQueue, *l1Tx)
r.Events.L1UserTx = append(r.Events.L1UserTx, eth.RollupEventL1UserTx{L1Tx: *l1Tx})
r.Events.L1UserTx = append(r.Events.L1UserTx, eth.RollupEventL1UserTx{
L1Tx: *l1Tx,
ToForgeL1TxsNum: r.State.LastToForgeL1TxsNum,
Position: len(queue.L1TxQueue) - 1,
})
}
type transactionData struct {

+ 11
- 4
test/historydb.go

@ -152,6 +152,12 @@ func GenL1Txs(
LoadAmountUSD: lUSD,
EthBlockNum: blocks[i%len(blocks)].EthBlockNum,
}
if tx.UserOrigin {
n := nextTxsNum
tx.ToForgeL1TxsNum = &n
} else {
tx.BatchNum = &batches[i%len(batches)].BatchNum
}
nTx, err := common.NewL1Tx(&tx)
if err != nil {
panic(err)
@ -163,7 +169,8 @@ func GenL1Txs(
setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs)
} else {
// Add unforged txs
tx.ToForgeL1TxsNum = nextTxsNum
n := nextTxsNum
tx.ToForgeL1TxsNum = &n
tx.UserOrigin = true
setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs)
}
@ -172,13 +179,13 @@ func GenL1Txs(
}
// GetNextToForgeNumAndBatch returns the next BatchNum and ForgeL1TxsNum to be added
func GetNextToForgeNumAndBatch(batches []common.Batch) (common.BatchNum, *int64) {
func GetNextToForgeNumAndBatch(batches []common.Batch) (common.BatchNum, int64) {
batchNum := batches[len(batches)-1].BatchNum + 1
toForgeL1TxsNum := new(int64)
var toForgeL1TxsNum int64
found := false
for i := len(batches) - 1; i >= 0; i-- {
if batches[i].ForgeL1TxsNum != nil {
*toForgeL1TxsNum = *batches[i].ForgeL1TxsNum + 1
toForgeL1TxsNum = *batches[i].ForgeL1TxsNum + 1
found = true
break
}

+ 16
- 15
test/txs.go

@ -52,16 +52,16 @@ func GenerateKeys(t *testing.T, accNames []string) map[string]*Account {
// GenerateTestTxs generates L1Tx & PoolL2Tx in a deterministic way for the
// given Instructions.
func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx, [][]*common.L1Tx, [][]*common.PoolL2Tx, []common.Token) {
func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]common.L1Tx, [][]common.L1Tx, [][]common.PoolL2Tx, []common.Token) {
accounts := GenerateKeys(t, instructions.Accounts)
l1CreatedAccounts := make(map[string]*Account)
var batchL1Txs []*common.L1Tx
var batchCoordinatorL1Txs []*common.L1Tx
var batchPoolL2Txs []*common.PoolL2Tx
var l1Txs [][]*common.L1Tx
var coordinatorL1Txs [][]*common.L1Tx
var poolL2Txs [][]*common.PoolL2Tx
var batchL1Txs []common.L1Tx
var batchCoordinatorL1Txs []common.L1Tx
var batchPoolL2Txs []common.PoolL2Tx
var l1Txs [][]common.L1Tx
var coordinatorL1Txs [][]common.L1Tx
var poolL2Txs [][]common.PoolL2Tx
idx := 256
for _, inst := range instructions.Instructions {
switch inst.Type {
@ -71,10 +71,11 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
FromEthAddr: accounts[idxTokenIDToString(inst.From, inst.TokenID)].Addr,
FromBJJ: accounts[idxTokenIDToString(inst.From, inst.TokenID)].BJJ.Public(),
TokenID: inst.TokenID,
Amount: big.NewInt(0),
LoadAmount: big.NewInt(int64(inst.Amount)),
Type: common.TxTypeCreateAccountDeposit,
}
batchL1Txs = append(batchL1Txs, &tx)
batchL1Txs = append(batchL1Txs, tx)
if accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx == common.Idx(0) { // if account.Idx is not set yet, set it and increment idx
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx = common.Idx(idx)
@ -93,7 +94,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
}
accounts[idxTokenIDToString(inst.To, inst.TokenID)].Idx = common.Idx(idx)
l1CreatedAccounts[idxTokenIDToString(inst.To, inst.TokenID)] = accounts[idxTokenIDToString(inst.To, inst.TokenID)]
batchCoordinatorL1Txs = append(batchCoordinatorL1Txs, &tx)
batchCoordinatorL1Txs = append(batchCoordinatorL1Txs, tx)
idx++
}
toIdx := new(common.Idx)
@ -132,7 +133,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
tx.Signature = sig
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Nonce++
batchPoolL2Txs = append(batchPoolL2Txs, &tx)
batchPoolL2Txs = append(batchPoolL2Txs, tx)
case common.TxTypeExit, common.TxTypeForceExit:
fromIdx := new(common.Idx)
@ -144,14 +145,14 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
Amount: big.NewInt(int64(inst.Amount)),
Type: common.TxTypeExit,
}
batchL1Txs = append(batchL1Txs, &tx)
batchL1Txs = append(batchL1Txs, tx)
case TypeNewBatch:
l1Txs = append(l1Txs, batchL1Txs)
coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs)
poolL2Txs = append(poolL2Txs, batchPoolL2Txs)
batchL1Txs = []*common.L1Tx{}
batchCoordinatorL1Txs = []*common.L1Tx{}
batchPoolL2Txs = []*common.PoolL2Tx{}
batchL1Txs = []common.L1Tx{}
batchCoordinatorL1Txs = []common.L1Tx{}
batchPoolL2Txs = []common.PoolL2Tx{}
default:
continue
}
@ -184,7 +185,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
// GenerateTestTxsFromSet reurns the L1 & L2 transactions for a given Set of
// Instructions code
func GenerateTestTxsFromSet(t *testing.T, set string) ([][]*common.L1Tx, [][]*common.L1Tx, [][]*common.PoolL2Tx, []common.Token) {
func GenerateTestTxsFromSet(t *testing.T, set string) ([][]common.L1Tx, [][]common.L1Tx, [][]common.PoolL2Tx, []common.Token) {
parser := NewParser(strings.NewReader(set))
instructions, err := parser.Parse()
require.Nil(t, err)

+ 7
- 7
txselector/txselector.go

@ -16,7 +16,7 @@ import (
)
// txs implements the interface Sort for an array of Tx
type txs []*common.PoolL2Tx
type txs []common.PoolL2Tx
func (t txs) Len() int {
return len(t)
@ -68,7 +68,7 @@ func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
}
// GetL2TxSelection returns a selection of the L2Txs for the next batch, from the L2DB pool
func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]*common.PoolL2Tx, error) {
func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]common.PoolL2Tx, error) {
// get pending l2-tx from tx-pool
l2TxsRaw, err := txsel.l2db.GetPendingTxs() // once l2db ready, maybe use parameter 'batchNum'
if err != nil {
@ -98,7 +98,7 @@ func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]*common.P
}
// GetL1L2TxSelection returns the selection of L1 + L2 txs
func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*common.L1Tx) ([]*common.L1Tx, []*common.L1Tx, []*common.PoolL2Tx, error) {
func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []common.L1Tx) ([]common.L1Tx, []common.L1Tx, []common.PoolL2Tx, error) {
// apply l1-user-tx to localAccountDB
// create new leaves
// update balances
@ -111,7 +111,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
}
var validTxs txs
var l1CoordinatorTxs []*common.L1Tx
var l1CoordinatorTxs []common.L1Tx
positionL1 := len(l1Txs)
// if tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB, if so,
@ -181,7 +181,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
validTxs = append(validTxs, l2TxsRaw[i])
}
// create L1CoordinatorTx for the accountCreation
l1CoordinatorTx := &common.L1Tx{
l1CoordinatorTx := common.L1Tx{
Position: positionL1,
UserOrigin: false,
FromEthAddr: accAuth.EthAddr,
@ -210,7 +210,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
log.Warn("l2TxsRaw[i].ToEthAddr should not be nil")
continue
}
l1CoordinatorTx := &common.L1Tx{
l1CoordinatorTx := common.L1Tx{
Position: positionL1,
UserOrigin: false,
FromEthAddr: *l2TxsRaw[i].ToEthAddr,
@ -254,7 +254,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1Txs []*c
return l1Txs, l1CoordinatorTxs, l2Txs, nil
}
func checkAlreadyPendingToCreate(l1CoordinatorTxs []*common.L1Tx, addr *ethCommon.Address, bjj *babyjub.PublicKey) bool {
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, addr *ethCommon.Address, bjj *babyjub.PublicKey) bool {
if addr == nil {
log.Warn("The provided addr is nil")
return false

+ 2
- 2
txselector/txselector_test.go

@ -35,9 +35,9 @@ func initTest(t *testing.T, testSet string) *TxSelector {
return txsel
}
func addL2Txs(t *testing.T, txsel *TxSelector, poolL2Txs []*common.PoolL2Tx) {
func addL2Txs(t *testing.T, txsel *TxSelector, poolL2Txs []common.PoolL2Tx) {
for i := 0; i < len(poolL2Txs); i++ {
err := txsel.l2db.AddTxTest(poolL2Txs[i])
err := txsel.l2db.AddTxTest(&poolL2Txs[i])
require.Nil(t, err)
}
}

Loading…
Cancel
Save