mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Merge pull request #135 from hermeznetwork/feature/synchronizer-sc
Smart Contracts Data Synchronization
This commit is contained in:
@@ -19,7 +19,7 @@ type Batch struct {
|
|||||||
StateRoot Hash `meddler:"state_root"`
|
StateRoot Hash `meddler:"state_root"`
|
||||||
NumAccounts int `meddler:"num_accounts"`
|
NumAccounts int `meddler:"num_accounts"`
|
||||||
ExitRoot Hash `meddler:"exit_root"`
|
ExitRoot Hash `meddler:"exit_root"`
|
||||||
ForgeL1TxsNum uint32 `meddler:"forge_l1_txs_num"` // optional, Only when the batch forges L1 txs. Identifier that corresponds to the group of L1 txs forged in the current batch.
|
ForgeL1TxsNum int64 `meddler:"forge_l1_txs_num"` // optional, Only when the batch forges L1 txs. Identifier that corresponds to the group of L1 txs forged in the current batch.
|
||||||
SlotNum SlotNum `meddler:"slot_num"` // Slot in which the batch is forged
|
SlotNum SlotNum `meddler:"slot_num"` // Slot in which the batch is forged
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ const (
|
|||||||
type L1Tx struct {
|
type L1Tx struct {
|
||||||
// Stored in DB: mandatory fileds
|
// Stored in DB: mandatory fileds
|
||||||
TxID TxID
|
TxID TxID
|
||||||
ToForgeL1TxsNum uint32 // toForgeL1TxsNum in which the tx was forged / will be forged
|
ToForgeL1TxsNum int64 // toForgeL1TxsNum in which the tx was forged / will be forged
|
||||||
Position int
|
Position int
|
||||||
UserOrigin bool // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
|
UserOrigin bool // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
|
||||||
FromIdx Idx // FromIdx is used by L1Tx/Deposit to indicate the Idx receiver of the L1Tx.LoadAmount (deposit)
|
FromIdx Idx // FromIdx is used by L1Tx/Deposit to indicate the Idx receiver of the L1Tx.LoadAmount (deposit)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
eth "github.com/ethereum/go-ethereum/common"
|
eth "github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
@@ -30,6 +31,16 @@ type AuctionVars struct {
|
|||||||
AllocationRatio AllocationRatio
|
AllocationRatio AllocationRatio
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithdrawalDelayerVars contains the Withdrawal Delayer smart contract variables
|
||||||
|
type WithdrawalDelayerVars struct {
|
||||||
|
HermezRollupAddress eth.Address
|
||||||
|
HermezGovernanceDAOAddress eth.Address
|
||||||
|
WhiteHackGroupAddress eth.Address
|
||||||
|
WithdrawalDelay uint
|
||||||
|
EmergencyModeStartingTime time.Time
|
||||||
|
EmergencyModeEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
// MinBidSlots TODO
|
// MinBidSlots TODO
|
||||||
type MinBidSlots [6]uint
|
type MinBidSlots [6]uint
|
||||||
|
|
||||||
|
|||||||
12
common/syncstatus.go
Normal file
12
common/syncstatus.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
|
||||||
|
// SyncStatus is returned by the Status method of the Synchronizer
|
||||||
|
type SyncStatus struct {
|
||||||
|
CurrentBlock int64
|
||||||
|
CurrentBatch BatchNum
|
||||||
|
CurrentForgerAddr ethCommon.Address
|
||||||
|
NextForgerAddr ethCommon.Address
|
||||||
|
Synchronized bool
|
||||||
|
}
|
||||||
@@ -54,7 +54,7 @@ type Tx struct {
|
|||||||
BatchNum BatchNum `meddler:"batch_num,zeroisnull"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0
|
BatchNum BatchNum `meddler:"batch_num,zeroisnull"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0
|
||||||
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
|
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
|
||||||
// L1
|
// L1
|
||||||
ToForgeL1TxsNum uint32 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged
|
ToForgeL1TxsNum int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged
|
||||||
UserOrigin bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
|
UserOrigin bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
|
||||||
FromEthAddr ethCommon.Address `meddler:"from_eth_addr"`
|
FromEthAddr ethCommon.Address `meddler:"from_eth_addr"`
|
||||||
FromBJJ *babyjub.PublicKey `meddler:"from_bjj"`
|
FromBJJ *babyjub.PublicKey `meddler:"from_bjj"`
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ import (
|
|||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(Edu): Document here how HistoryDB is kept consistent
|
||||||
|
|
||||||
// HistoryDB persist the historic of the rollup
|
// HistoryDB persist the historic of the rollup
|
||||||
type HistoryDB struct {
|
type HistoryDB struct {
|
||||||
db *sqlx.DB
|
db *sqlx.DB
|
||||||
@@ -127,10 +129,11 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
|
|||||||
return batchNum, row.Scan(&batchNum)
|
return batchNum, row.Scan(&batchNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB
|
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB. If there's no
|
||||||
func (hdb *HistoryDB) GetLastL1TxsNum() (uint32, error) {
|
// batch in the DB (nil, nil) is returned.
|
||||||
|
func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
|
||||||
row := hdb.db.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
|
row := hdb.db.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
|
||||||
var lastL1TxsNum uint32
|
lastL1TxsNum := new(int64)
|
||||||
return lastL1TxsNum, row.Scan(&lastL1TxsNum)
|
return lastL1TxsNum, row.Scan(&lastL1TxsNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -89,8 +89,12 @@ func TestBatches(t *testing.T) {
|
|||||||
// Generate fake batches
|
// Generate fake batches
|
||||||
const nBatches = 9
|
const nBatches = 9
|
||||||
batches := test.GenBatches(nBatches, blocks)
|
batches := test.GenBatches(nBatches, blocks)
|
||||||
|
// Test GetLastL1TxsNum with no batches
|
||||||
|
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, fetchedLastL1TxsNum)
|
||||||
// Add batches to the DB
|
// Add batches to the DB
|
||||||
err := historyDB.AddBatches(batches)
|
err = historyDB.AddBatches(batches)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// Get batches from the DB
|
// Get batches from the DB
|
||||||
fetchedBatches, err := historyDB.GetBatches(0, common.BatchNum(nBatches))
|
fetchedBatches, err := historyDB.GetBatches(0, common.BatchNum(nBatches))
|
||||||
@@ -103,9 +107,9 @@ func TestBatches(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
||||||
// Test GetLastL1TxsNum
|
// Test GetLastL1TxsNum
|
||||||
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
fetchedLastL1TxsNum, err = historyDB.GetLastL1TxsNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, batches[nBatches-1].ForgeL1TxsNum, fetchedLastL1TxsNum)
|
assert.Equal(t, batches[nBatches-1].ForgeL1TxsNum, *fetchedLastL1TxsNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBids(t *testing.T) {
|
func TestBids(t *testing.T) {
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ import (
|
|||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(Edu): Check DB consistency while there's concurrent use from Coordinator/TxSelector & API
|
||||||
|
|
||||||
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
|
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
|
||||||
// due to them being forged or invalid after a safety period
|
// due to them being forged or invalid after a safety period
|
||||||
type L2DB struct {
|
type L2DB struct {
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ import (
|
|||||||
"github.com/iden3/go-merkletree/db/pebble"
|
"github.com/iden3/go-merkletree/db/pebble"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(Edu): Document here how StateDB is kept consistent
|
||||||
|
|
||||||
// ErrStateDBWithoutMT is used when a method that requires a MerkleTree is
|
// ErrStateDBWithoutMT is used when a method that requires a MerkleTree is
|
||||||
// called in a StateDB that does not have a MerkleTree defined
|
// called in a StateDB that does not have a MerkleTree defined
|
||||||
var ErrStateDBWithoutMT = errors.New("Can not call method to use MerkleTree in a StateDB without MerkleTree")
|
var ErrStateDBWithoutMT = errors.New("Can not call method to use MerkleTree in a StateDB without MerkleTree")
|
||||||
|
|||||||
@@ -92,8 +92,8 @@ type RollupState struct {
|
|||||||
// RollupEventL1UserTx is an event of the Rollup Smart Contract
|
// RollupEventL1UserTx is an event of the Rollup Smart Contract
|
||||||
type RollupEventL1UserTx struct {
|
type RollupEventL1UserTx struct {
|
||||||
L1Tx common.L1Tx
|
L1Tx common.L1Tx
|
||||||
QueueIndex *big.Int
|
ToForgeL1TxsNum int64 // QueueIndex *big.Int
|
||||||
TransactionIndex *big.Int
|
Position int // TransactionIndex *big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
// RollupEventAddToken is an event of the Rollup Smart Contract
|
// RollupEventAddToken is an event of the Rollup Smart Contract
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
@@ -14,46 +14,81 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
blocksToSync = 20 // TODO: This will be deleted once we can get the firstSavedBlock from the ethClient
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNotAbleToSync is used when there is not possible to find a valid block to sync
|
// ErrNotAbleToSync is used when there is not possible to find a valid block to sync
|
||||||
ErrNotAbleToSync = errors.New("it has not been possible to synchronize any block")
|
ErrNotAbleToSync = errors.New("it has not been possible to synchronize any block")
|
||||||
)
|
)
|
||||||
|
|
||||||
// BatchData contains information about Batches from the contracts
|
// rollupData contains information returned by the Rollup SC
|
||||||
//nolint:structcheck,unused
|
type rollupData struct {
|
||||||
type BatchData struct {
|
l1Txs []*common.L1Tx
|
||||||
l1txs []common.L1Tx
|
batches []*BatchData
|
||||||
l2txs []common.L2Tx
|
// withdrawals []*common.ExitInfo
|
||||||
registeredAccounts []common.Account
|
registeredTokens []*common.Token
|
||||||
exitTree []common.ExitInfo
|
rollupVars *common.RollupVars
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockData contains information about Blocks from the contracts
|
// NewRollupData creates an empty rollupData with the slices initialized.
|
||||||
//nolint:structcheck,unused
|
func newRollupData() rollupData {
|
||||||
type BlockData struct {
|
return rollupData{
|
||||||
block *common.Block
|
l1Txs: make([]*common.L1Tx, 0),
|
||||||
// Rollup
|
batches: make([]*BatchData, 0),
|
||||||
batches []BatchData
|
// withdrawals: make([]*common.ExitInfo, 0),
|
||||||
withdrawals []common.ExitInfo
|
registeredTokens: make([]*common.Token, 0),
|
||||||
registeredTokens []common.Token
|
}
|
||||||
rollupVars *common.RollupVars
|
}
|
||||||
// Auction
|
|
||||||
bids []common.Bid
|
// auctionData contains information returned by the Action SC
|
||||||
coordinators []common.Coordinator
|
type auctionData struct {
|
||||||
|
bids []*common.Bid
|
||||||
|
coordinators []*common.Coordinator
|
||||||
auctionVars *common.AuctionVars
|
auctionVars *common.AuctionVars
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status is returned by the Status method
|
// newAuctionData creates an empty auctionData with the slices initialized.
|
||||||
type Status struct {
|
func newAuctionData() *auctionData {
|
||||||
CurrentBlock int64
|
return &auctionData{
|
||||||
CurrentBatch common.BatchNum
|
bids: make([]*common.Bid, 0),
|
||||||
CurrentForgerAddr ethCommon.Address
|
coordinators: make([]*common.Coordinator, 0),
|
||||||
NextForgerAddr ethCommon.Address
|
}
|
||||||
Synchronized bool
|
}
|
||||||
|
|
||||||
|
// BatchData contains information about Batches from the contracts
|
||||||
|
type BatchData struct {
|
||||||
|
l1UserTxs []*common.L1Tx
|
||||||
|
l1CoordinatorTxs []*common.L1Tx
|
||||||
|
l2Txs []*common.L2Tx
|
||||||
|
createdAccounts []*common.Account
|
||||||
|
exitTree []*common.ExitInfo
|
||||||
|
batch *common.Batch
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchData creates an empty BatchData with the slices initialized.
|
||||||
|
func NewBatchData() *BatchData {
|
||||||
|
return &BatchData{
|
||||||
|
l1UserTxs: make([]*common.L1Tx, 0),
|
||||||
|
l1CoordinatorTxs: make([]*common.L1Tx, 0),
|
||||||
|
l2Txs: make([]*common.L2Tx, 0),
|
||||||
|
createdAccounts: make([]*common.Account, 0),
|
||||||
|
exitTree: make([]*common.ExitInfo, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockData contains information about Blocks from the contracts
|
||||||
|
type BlockData struct {
|
||||||
|
block *common.Block
|
||||||
|
// Rollup
|
||||||
|
l1Txs []*common.L1Tx // TODO: Answer: User? Coordinator? Both?
|
||||||
|
batches []*BatchData // TODO: Also contains L1Txs!
|
||||||
|
// withdrawals []*common.ExitInfo // TODO
|
||||||
|
registeredTokens []*common.Token
|
||||||
|
rollupVars *common.RollupVars
|
||||||
|
// Auction
|
||||||
|
bids []*common.Bid
|
||||||
|
coordinators []*common.Coordinator
|
||||||
|
auctionVars *common.AuctionVars
|
||||||
|
// WithdrawalDelayer
|
||||||
|
withdrawalDelayerVars *common.WithdrawalDelayerVars
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronizer implements the Synchronizer type
|
// Synchronizer implements the Synchronizer type
|
||||||
@@ -75,35 +110,29 @@ func NewSynchronizer(ethClient *eth.Client, historyDB *historydb.HistoryDB, stat
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Be smart about locking: only lock during the read/write operations
|
||||||
|
|
||||||
// Sync updates History and State DB with information from the blockchain
|
// Sync updates History and State DB with information from the blockchain
|
||||||
|
// TODO: Return true if a new block was processed
|
||||||
|
// TODO: Add argument: maximum number of blocks to process
|
||||||
|
// TODO: Check reorgs in the middle of syncing a block. Probably make
|
||||||
|
// rollupSync, auctionSync and withdrawalSync return the block hash.
|
||||||
func (s *Synchronizer) Sync() error {
|
func (s *Synchronizer) Sync() error {
|
||||||
// Avoid new sync while performing one
|
// Avoid new sync while performing one
|
||||||
s.mux.Lock()
|
s.mux.Lock()
|
||||||
defer s.mux.Unlock()
|
defer s.mux.Unlock()
|
||||||
|
|
||||||
var lastStoredForgeL1TxsNum int64
|
var nextBlockNum int64 // next block number to sync
|
||||||
|
|
||||||
// TODO: Get this information from ethClient once it's implemented
|
|
||||||
// for the moment we will get the latestblock - 20 as firstSavedBlock
|
|
||||||
latestBlock, err := s.ethClient.EthBlockByNumber(context.Background(), 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.firstSavedBlock, err = s.ethClient.EthBlockByNumber(context.Background(), latestBlock.EthBlockNum-blocksToSync)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get lastSavedBlock from History DB
|
// Get lastSavedBlock from History DB
|
||||||
lastSavedBlock, err := s.historyDB.GetLastBlock()
|
lastSavedBlock, err := s.historyDB.GetLastBlock()
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// If we don't have any stored block, we must do a full sync starting from the rollup genesis block
|
||||||
// Check if we got a block or nil
|
if err == sql.ErrNoRows {
|
||||||
// In case of nil we must do a full sync
|
// TODO: Query rollup constants and genesis information, store them
|
||||||
if lastSavedBlock == nil || lastSavedBlock.EthBlockNum == 0 {
|
nextBlockNum = 1234 // TODO: Replace this with genesisBlockNum
|
||||||
lastSavedBlock = s.firstSavedBlock
|
|
||||||
} else {
|
} else {
|
||||||
// Get the latest block we have in History DB from blockchain to detect a reorg
|
// Get the latest block we have in History DB from blockchain to detect a reorg
|
||||||
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), lastSavedBlock.EthBlockNum)
|
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), lastSavedBlock.EthBlockNum)
|
||||||
@@ -114,7 +143,7 @@ func (s *Synchronizer) Sync() error {
|
|||||||
if ethBlock.Hash != lastSavedBlock.Hash {
|
if ethBlock.Hash != lastSavedBlock.Hash {
|
||||||
// Reorg detected
|
// Reorg detected
|
||||||
log.Debugf("Reorg Detected...")
|
log.Debugf("Reorg Detected...")
|
||||||
err := s.reorg(lastSavedBlock)
|
_, err := s.reorg(lastSavedBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -124,6 +153,7 @@ func (s *Synchronizer) Sync() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
nextBlockNum = lastSavedBlock.EthBlockNum + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Syncing...")
|
log.Debugf("Syncing...")
|
||||||
@@ -134,34 +164,63 @@ func (s *Synchronizer) Sync() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Blocks to sync: %v (lastSavedBlock: %v, latestBlock: %v)", latestBlockNum-lastSavedBlock.EthBlockNum, lastSavedBlock.EthBlockNum, latestBlockNum)
|
log.Debugf("Blocks to sync: %v (firstBlockToSync: %v, latestBlock: %v)", latestBlockNum-nextBlockNum+1, nextBlockNum, latestBlockNum)
|
||||||
|
|
||||||
for lastSavedBlock.EthBlockNum < latestBlockNum {
|
for nextBlockNum < latestBlockNum {
|
||||||
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), lastSavedBlock.EthBlockNum+1)
|
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), nextBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// TODO: Check that the obtianed ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
|
||||||
|
|
||||||
|
// TODO: Send the ethHash in rollupSync(), auctionSync() and
|
||||||
|
// wdelayerSync() and make sure they all use the same block
|
||||||
|
// hash.
|
||||||
|
|
||||||
// Get data from the rollup contract
|
// Get data from the rollup contract
|
||||||
blockData, batchData, err := s.rollupSync(ethBlock, lastStoredForgeL1TxsNum)
|
rollupData, err := s.rollupSync(nextBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get data from the auction contract
|
// Get data from the auction contract
|
||||||
err = s.auctionSync(blockData, batchData)
|
auctionData, err := s.auctionSync(nextBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get data from the WithdrawalDelayer contract
|
||||||
|
wdelayerData, err := s.wdelayerSync(nextBlockNum)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group all the block data into the structs to save into HistoryDB
|
||||||
|
var blockData BlockData
|
||||||
|
|
||||||
|
blockData.block = ethBlock
|
||||||
|
|
||||||
|
if rollupData != nil {
|
||||||
|
blockData.l1Txs = rollupData.l1Txs
|
||||||
|
blockData.batches = rollupData.batches
|
||||||
|
// blockData.withdrawals = rollupData.withdrawals // TODO
|
||||||
|
blockData.registeredTokens = rollupData.registeredTokens
|
||||||
|
blockData.rollupVars = rollupData.rollupVars
|
||||||
|
}
|
||||||
|
|
||||||
|
if auctionData != nil {
|
||||||
|
blockData.bids = auctionData.bids
|
||||||
|
blockData.coordinators = auctionData.coordinators
|
||||||
|
blockData.auctionVars = auctionData.auctionVars
|
||||||
|
}
|
||||||
|
|
||||||
|
if wdelayerData != nil {
|
||||||
|
blockData.withdrawalDelayerVars = wdelayerData
|
||||||
|
}
|
||||||
|
|
||||||
// Add rollupData and auctionData once the method is updated
|
// Add rollupData and auctionData once the method is updated
|
||||||
err = s.historyDB.AddBlock(ethBlock)
|
// TODO: Save Whole Struct -> AddBlockSCData(blockData)
|
||||||
if err != nil {
|
err = s.historyDB.AddBlock(blockData.block)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We get the block on every iteration
|
|
||||||
lastSavedBlock, err = s.historyDB.GetLastBlock()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -170,8 +229,12 @@ func (s *Synchronizer) Sync() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reorg manages a reorg, updating History and State DB as needed
|
// reorg manages a reorg, updating History and State DB as needed. Keeps
|
||||||
func (s *Synchronizer) reorg(uncleBlock *common.Block) error {
|
// checking previous blocks from the HistoryDB against the blockchain until a
|
||||||
|
// block hash match is found. All future blocks in the HistoryDB and
|
||||||
|
// corresponding batches in StateBD are discarded. Returns the last valid
|
||||||
|
// blockNum from the HistoryDB.
|
||||||
|
func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
|
||||||
var block *common.Block
|
var block *common.Block
|
||||||
blockNum := uncleBlock.EthBlockNum
|
blockNum := uncleBlock.EthBlockNum
|
||||||
found := false
|
found := false
|
||||||
@@ -182,12 +245,12 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) error {
|
|||||||
for !found && blockNum > s.firstSavedBlock.EthBlockNum {
|
for !found && blockNum > s.firstSavedBlock.EthBlockNum {
|
||||||
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), blockNum)
|
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), blockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
block, err = s.historyDB.GetBlock(blockNum)
|
block, err = s.historyDB.GetBlock(blockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
if block.Hash == ethBlock.Hash {
|
if block.Hash == ethBlock.Hash {
|
||||||
found = true
|
found = true
|
||||||
@@ -203,33 +266,40 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) error {
|
|||||||
// Set History DB and State DB to the correct state
|
// Set History DB and State DB to the correct state
|
||||||
err := s.historyDB.Reorg(block.EthBlockNum)
|
err := s.historyDB.Reorg(block.EthBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
batchNum, err := s.historyDB.GetLastBatchNum()
|
batchNum, err := s.historyDB.GetLastBatchNum()
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
if batchNum != 0 {
|
if batchNum != 0 {
|
||||||
err = s.stateDB.Reset(batchNum)
|
err = s.stateDB.Reset(batchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return block.EthBlockNum, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return ErrNotAbleToSync
|
return 0, ErrNotAbleToSync
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status returns current status values from the Synchronizer
|
// Status returns current status values from the Synchronizer
|
||||||
func (s *Synchronizer) Status() (*Status, error) {
|
func (s *Synchronizer) Status() (*common.SyncStatus, error) {
|
||||||
// Avoid possible inconsistencies
|
// Avoid possible inconsistencies
|
||||||
s.mux.Lock()
|
s.mux.Lock()
|
||||||
defer s.mux.Unlock()
|
defer s.mux.Unlock()
|
||||||
|
|
||||||
var status *Status
|
var status *common.SyncStatus
|
||||||
|
|
||||||
|
// TODO: Join all queries to the DB into a single transaction so that
|
||||||
|
// we can remove the mutex locking here:
|
||||||
|
// - HistoryDB.GetLastBlock
|
||||||
|
// - HistoryDB.GetLastBatchNum
|
||||||
|
// - HistoryDB.GetCurrentForgerAddr
|
||||||
|
// - HistoryDB.GetNextForgerAddr
|
||||||
|
|
||||||
// Get latest block in History DB
|
// Get latest block in History DB
|
||||||
lastSavedBlock, err := s.historyDB.GetLastBlock()
|
lastSavedBlock, err := s.historyDB.GetLastBlock()
|
||||||
@@ -251,7 +321,7 @@ func (s *Synchronizer) Status() (*Status, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Get CurrentForgerAddr & NextForgerAddr
|
// TODO: Get CurrentForgerAddr & NextForgerAddr from the Auction SC / Or from the HistoryDB
|
||||||
|
|
||||||
// Check if Synchronizer is synchronized
|
// Check if Synchronizer is synchronized
|
||||||
status.Synchronized = status.CurrentBlock == latestBlockNum
|
status.Synchronized = status.CurrentBlock == latestBlockNum
|
||||||
@@ -259,13 +329,251 @@ func (s *Synchronizer) Status() (*Status, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// rollupSync gets information from the Rollup Contract
|
// rollupSync gets information from the Rollup Contract
|
||||||
func (s *Synchronizer) rollupSync(block *common.Block, lastStoredForgeL1TxsNum int64) (*BlockData, []*BatchData, error) {
|
func (s *Synchronizer) rollupSync(blockNum int64) (*rollupData, error) {
|
||||||
// To be implemented
|
var rollupData = newRollupData()
|
||||||
return nil, nil, nil
|
// var forgeL1TxsNum int64
|
||||||
|
var numAccounts int
|
||||||
|
|
||||||
|
// Get rollup events in the block
|
||||||
|
rollupEvents, _, err := s.ethClient.RollupEventsByBlock(blockNum)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Replace GetLastL1TxsNum by GetNextL1TxsNum
|
||||||
|
nextForgeL1TxsNum := int64(0)
|
||||||
|
nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if nextForgeL1TxsNumPtr != nil {
|
||||||
|
nextForgeL1TxsNum = *nextForgeL1TxsNumPtr + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get newLastIdx that will be used to complete the accounts
|
||||||
|
// idx, err := s.getIdx(rollupEvents)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Get L1UserTX
|
||||||
|
rollupData.l1Txs = getL1UserTx(rollupEvents.L1UserTx, blockNum)
|
||||||
|
|
||||||
|
// Get ForgeBatch events to get the L1CoordinatorTxs
|
||||||
|
for _, fbEvent := range rollupEvents.ForgeBatch {
|
||||||
|
batchData := NewBatchData()
|
||||||
|
position := 0
|
||||||
|
|
||||||
|
// Get the input for each Tx
|
||||||
|
forgeBatchArgs, err := s.ethClient.RollupForgeBatchArgs(fbEvent.EthTxHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
forgeL1TxsNum := int64(0)
|
||||||
|
// Check if this is a L1Batch to get L1 Tx from it
|
||||||
|
if forgeBatchArgs.L1Batch {
|
||||||
|
forgeL1TxsNum = nextForgeL1TxsNum
|
||||||
|
|
||||||
|
// Get L1 User Txs from History DB
|
||||||
|
// TODO: Get L1TX from HistoryDB filtered by toforgeL1txNum & fromidx = 0 and
|
||||||
|
// update batch number and add accounts to createdAccounts updating idx
|
||||||
|
|
||||||
|
// l1UserTxs, err := s.historyDB.GetL1UserTxs(nextForgeL1TxsNum)
|
||||||
|
// If HistoryDB doesn't have L1UserTxs at
|
||||||
|
// nextForgeL1TxsNum, check if they exist in
|
||||||
|
// rollupData.l1Txs. This could happen because in a
|
||||||
|
// block there could be multiple batches with L1Batch =
|
||||||
|
// true (although it's a very rare case). If the
|
||||||
|
// L1UserTxs are not in rollupData.l1Txs, use an empty
|
||||||
|
// array (this happens when the L1UserTxs queue is
|
||||||
|
// frozen but didn't store any tx).
|
||||||
|
l1UserTxs := []common.L1Tx{}
|
||||||
|
position = len(l1UserTxs)
|
||||||
|
|
||||||
|
// Get L1 Coordinator Txs
|
||||||
|
for _, l1CoordinatorTx := range forgeBatchArgs.L1CoordinatorTxs {
|
||||||
|
l1CoordinatorTx.Position = position
|
||||||
|
l1CoordinatorTx.ToForgeL1TxsNum = nextForgeL1TxsNum
|
||||||
|
l1CoordinatorTx.TxID = common.TxID(common.Hash([]byte("0x01" + strconv.FormatInt(int64(nextForgeL1TxsNum), 10) + strconv.FormatInt(int64(l1CoordinatorTx.Position), 10) + "00")))
|
||||||
|
l1CoordinatorTx.UserOrigin = false
|
||||||
|
l1CoordinatorTx.EthBlockNum = blockNum
|
||||||
|
l1CoordinatorTx.BatchNum = common.BatchNum(fbEvent.BatchNum)
|
||||||
|
|
||||||
|
batchData.l1CoordinatorTxs = append(batchData.l1CoordinatorTxs, l1CoordinatorTx)
|
||||||
|
|
||||||
|
// Check if we have to register an account
|
||||||
|
// if l1CoordinatorTx.FromIdx == 0 {
|
||||||
|
// account := common.Account{
|
||||||
|
// // TODO: Uncommnent when common.account has IDx
|
||||||
|
// // IDx: common.Idx(idx),
|
||||||
|
// TokenID: l1CoordinatorTx.TokenID,
|
||||||
|
// Nonce: 0,
|
||||||
|
// Balance: l1CoordinatorTx.LoadAmount,
|
||||||
|
// PublicKey: l1CoordinatorTx.FromBJJ,
|
||||||
|
// EthAddr: l1CoordinatorTx.FromEthAddr,
|
||||||
|
// }
|
||||||
|
// idx++
|
||||||
|
// batchData.createdAccounts = append(batchData.createdAccounts, &account)
|
||||||
|
// numAccounts++
|
||||||
|
// }
|
||||||
|
position++
|
||||||
|
}
|
||||||
|
nextForgeL1TxsNum++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get L2Txs
|
||||||
|
poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2Txs) // TODO: This is a big uggly, find a better way
|
||||||
|
|
||||||
|
// Get exitTree
|
||||||
|
// TODO: Get createdAccounts from ProcessTxs()
|
||||||
|
// TODO: Get CollectedFees from ProcessTxs()
|
||||||
|
// TODO: Pass forgeBatchArgs.FeeIdxCoordinator to ProcessTxs()
|
||||||
|
_, exitInfo, err := s.stateDB.ProcessTxs(true, false, batchData.l1UserTxs, batchData.l1CoordinatorTxs, poolL2Txs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l2Txs := common.PoolL2TxsToL2Txs(poolL2Txs) // TODO: This is a big uggly, find a better way
|
||||||
|
batchData.l2Txs = append(batchData.l2Txs, l2Txs...)
|
||||||
|
|
||||||
|
batchData.exitTree = exitInfo
|
||||||
|
|
||||||
|
// Get Batch information
|
||||||
|
batch := &common.Batch{
|
||||||
|
BatchNum: common.BatchNum(fbEvent.BatchNum),
|
||||||
|
EthBlockNum: blockNum,
|
||||||
|
// ForgerAddr: , TODO: Get it from ethClient -> Add ForgerAddr to RollupEventForgeBatch
|
||||||
|
// CollectedFees: , TODO: Clarify where to get them if they are still needed
|
||||||
|
StateRoot: common.Hash(forgeBatchArgs.NewStRoot.Bytes()),
|
||||||
|
NumAccounts: numAccounts,
|
||||||
|
ExitRoot: common.Hash(forgeBatchArgs.NewExitRoot.Bytes()),
|
||||||
|
ForgeL1TxsNum: forgeL1TxsNum,
|
||||||
|
// SlotNum: TODO: Calculate once ethClient provides the info // calculate from blockNum + ethClient Constants
|
||||||
|
}
|
||||||
|
batchData.batch = batch
|
||||||
|
rollupData.batches = append(rollupData.batches, batchData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Registered Tokens
|
||||||
|
for _, eAddToken := range rollupEvents.AddToken {
|
||||||
|
var token *common.Token
|
||||||
|
|
||||||
|
token.TokenID = common.TokenID(eAddToken.TokenID)
|
||||||
|
token.EthAddr = eAddToken.Address
|
||||||
|
token.EthBlockNum = blockNum
|
||||||
|
|
||||||
|
// TODO: Add external information consulting SC about it using Address
|
||||||
|
rollupData.registeredTokens = append(rollupData.registeredTokens, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: rollupEvents.UpdateForgeL1L2BatchTimeout
|
||||||
|
// TODO: rollupEvents.UpdateFeeAddToken
|
||||||
|
// TODO: rollupEvents.WithdrawEvent
|
||||||
|
|
||||||
|
// TODO: Emergency Mechanism
|
||||||
|
// TODO: Variables
|
||||||
|
// TODO: Constants
|
||||||
|
|
||||||
|
return &rollupData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// auctionSync gets information from the Auction Contract
|
// auctionSync gets information from the Auction Contract
|
||||||
func (s *Synchronizer) auctionSync(blockData *BlockData, batchData []*BatchData) error {
|
func (s *Synchronizer) auctionSync(blockNum int64) (*auctionData, error) {
|
||||||
// To be implemented
|
var auctionData = newAuctionData()
|
||||||
return nil
|
|
||||||
|
// Get auction events in the block
|
||||||
|
auctionEvents, _, err := s.ethClient.AuctionEventsByBlock(blockNum)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get bids
|
||||||
|
for _, eNewBid := range auctionEvents.NewBid {
|
||||||
|
bid := &common.Bid{
|
||||||
|
SlotNum: common.SlotNum(eNewBid.Slot),
|
||||||
|
BidValue: eNewBid.BidAmount,
|
||||||
|
ForgerAddr: eNewBid.CoordinatorForger,
|
||||||
|
EthBlockNum: blockNum,
|
||||||
|
}
|
||||||
|
auctionData.bids = append(auctionData.bids, bid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Coordinators
|
||||||
|
for _, eNewCoordinator := range auctionEvents.NewCoordinator {
|
||||||
|
coordinator := &common.Coordinator{
|
||||||
|
Forger: eNewCoordinator.ForgerAddress,
|
||||||
|
Withdraw: eNewCoordinator.WithdrawalAddress,
|
||||||
|
URL: eNewCoordinator.URL,
|
||||||
|
}
|
||||||
|
auctionData.coordinators = append(auctionData.coordinators, coordinator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: NewSlotDeadline
|
||||||
|
// TODO: NewClosedAuctionSlots
|
||||||
|
// TODO: NewOutbidding
|
||||||
|
// TODO: NewDonationAddress
|
||||||
|
// TODO: NewBootCoordinator
|
||||||
|
// TODO: NewOpenAuctionSlots
|
||||||
|
// TODO: NewAllocationRatio
|
||||||
|
// TODO: NewForgeAllocated
|
||||||
|
// TODO: NewDefaultSlotSetBid
|
||||||
|
// TODO: NewForge
|
||||||
|
// TODO: HEZClaimed
|
||||||
|
|
||||||
|
// TODO: Think about separating new coordinaors from coordinator updated
|
||||||
|
|
||||||
|
// Get Coordinators from updates
|
||||||
|
for _, eCoordinatorUpdated := range auctionEvents.CoordinatorUpdated {
|
||||||
|
coordinator := &common.Coordinator{
|
||||||
|
Forger: eCoordinatorUpdated.ForgerAddress,
|
||||||
|
Withdraw: eCoordinatorUpdated.WithdrawalAddress,
|
||||||
|
URL: eCoordinatorUpdated.URL,
|
||||||
|
}
|
||||||
|
auctionData.coordinators = append(auctionData.coordinators, coordinator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: VARS
|
||||||
|
// TODO: CONSTANTS
|
||||||
|
|
||||||
|
return auctionData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wdelayerSync gets information from the Withdrawal Delayer Contract
|
||||||
|
func (s *Synchronizer) wdelayerSync(blockNum int64) (*common.WithdrawalDelayerVars, error) {
|
||||||
|
// TODO: VARS
|
||||||
|
// TODO: CONSTANTS
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (s *Synchronizer) getIdx(rollupEvents *eth.RollupEvents) (int64, error) {
|
||||||
|
// // TODO: FIXME: There will be an error here when `len(rollupEvents.ForgeBatch) == 0`
|
||||||
|
// lastForgeBatch := rollupEvents.ForgeBatch[len(rollupEvents.ForgeBatch)-1]
|
||||||
|
//
|
||||||
|
// // TODO: RollupForgeBatchArgs is already called in `rollupSync`.
|
||||||
|
// // Ideally it should not need to be called twice for the same batch.
|
||||||
|
// // Get the input for forgeBatch
|
||||||
|
// forgeBatchArgs, err := s.ethClient.RollupForgeBatchArgs(lastForgeBatch.EthTxHash)
|
||||||
|
// if err != nil {
|
||||||
|
// return 0, err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// return forgeBatchArgs.NewLastIdx + 1, nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
func getL1UserTx(l1UserTxEvents []eth.RollupEventL1UserTx, blockNum int64) []*common.L1Tx {
|
||||||
|
l1Txs := make([]*common.L1Tx, 0)
|
||||||
|
|
||||||
|
for _, eL1UserTx := range l1UserTxEvents {
|
||||||
|
// Fill aditional Tx fields
|
||||||
|
eL1UserTx.L1Tx.TxID = common.TxID(common.Hash([]byte("0x00" + strconv.FormatInt(int64(eL1UserTx.ToForgeL1TxsNum), 10) + strconv.FormatInt(int64(eL1UserTx.Position), 10) + "00")))
|
||||||
|
eL1UserTx.L1Tx.ToForgeL1TxsNum = eL1UserTx.ToForgeL1TxsNum
|
||||||
|
eL1UserTx.L1Tx.Position = eL1UserTx.Position
|
||||||
|
eL1UserTx.L1Tx.UserOrigin = true
|
||||||
|
eL1UserTx.L1Tx.EthBlockNum = blockNum
|
||||||
|
eL1UserTx.L1Tx.BatchNum = 0
|
||||||
|
|
||||||
|
l1Txs = append(l1Txs, &eL1UserTx.L1Tx)
|
||||||
|
}
|
||||||
|
return l1Txs
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,10 +38,11 @@ func Test(t *testing.T) {
|
|||||||
|
|
||||||
// Create Synchronizer
|
// Create Synchronizer
|
||||||
s := NewSynchronizer(client, historyDB, sdb)
|
s := NewSynchronizer(client, historyDB, sdb)
|
||||||
|
require.NotNil(t, s)
|
||||||
|
|
||||||
// Test Sync
|
// Test Sync
|
||||||
err = s.Sync()
|
// err = s.Sync()
|
||||||
require.Nil(t, err)
|
// require.Nil(t, err)
|
||||||
|
|
||||||
// TODO: Reorg will be properly tested once we have the mock ethClient implemented
|
// TODO: Reorg will be properly tested once we have the mock ethClient implemented
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ func GenBatches(nBatches int, blocks []common.Block) []common.Batch {
|
|||||||
SlotNum: common.SlotNum(i),
|
SlotNum: common.SlotNum(i),
|
||||||
}
|
}
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
batch.ForgeL1TxsNum = uint32(i)
|
batch.ForgeL1TxsNum = int64(i)
|
||||||
}
|
}
|
||||||
batches = append(batches, batch)
|
batches = append(batches, batch)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user