Browse Source

WIP3

feature/serveapicli
Eduard S 3 years ago
parent
commit
a5ef822c64
12 changed files with 788 additions and 800 deletions
  1. +3
    -3
      api/api_test.go
  2. +109
    -2
      api/state.go
  3. +7
    -7
      api/state_test.go
  4. +31
    -0
      common/eth.go
  5. +2
    -2
      coordinator/coordinator.go
  6. +226
    -0
      db/historydb/apiqueries.go
  7. +48
    -15
      db/historydb/historydb.go
  8. +177
    -172
      db/historydb/historydb_test.go
  9. +70
    -496
      db/historydb/nodeinfo.go
  10. +2
    -2
      db/historydb/views.go
  11. +82
    -70
      node/node.go
  12. +31
    -31
      synchronizer/synchronizer.go

+ 3
- 3
api/api_test.go

@ -180,7 +180,7 @@ type testCommon struct {
auctionVars common.AuctionVariables
rollupVars common.RollupVariables
wdelayerVars common.WDelayerVariables
nextForgers []historydb.NextForger
nextForgers []historydb.NextForgerAPI
}
var tc testCommon
@ -455,14 +455,14 @@ func TestMain(m *testing.M) {
if err = api.h.AddBids(bids); err != nil {
panic(err)
}
bootForger := historydb.NextForger{
bootForger := historydb.NextForgerAPI{
Coordinator: historydb.CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
},
}
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
nextForgers := []historydb.NextForger{}
nextForgers := []historydb.NextForgerAPI{}
var initBlock int64 = 140
var deltaBlocks int64 = 40
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {

+ 109
- 2
api/state.go

@ -1,16 +1,123 @@
package api
import (
"database/sql"
"net/http"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr"
)
func (a *API) getState(c *gin.Context) {
ni, err := a.h.GetNodeInfoAPI()
stateAPI, err := a.h.GetStateAPI()
if err != nil {
retBadReq(err, c)
return
}
c.JSON(http.StatusOK, ni.APIState)
c.JSON(http.StatusOK, stateAPI)
}
type APIStateUpdater struct {
hdb *historydb.HistoryDB
state historydb.StateAPI
config historydb.NodeConfig
vars common.SCVariablesPtr
consts historydb.Constants
}
func NewAPIStateUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants) *APIStateUpdater {
u := APIStateUpdater{
hdb: hdb,
config: *config,
consts: *consts,
}
u.SetSCVars(&common.SCVariablesPtr{&vars.Rollup, &vars.Auction, &vars.WDelayer})
return &u
}
func (u *APIStateUpdater) Store() error {
return tracerr.Wrap(u.hdb.SetAPIState(&u.state))
}
func (u *APIStateUpdater) SetSCVars(vars *common.SCVariablesPtr) {
if vars.Rollup != nil {
u.vars.Rollup = vars.Rollup
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
u.state.Rollup = *rollupVars
}
if vars.Auction != nil {
u.vars.Auction = vars.Auction
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
u.state.Auction = *auctionVars
}
if vars.WDelayer != nil {
u.vars.WDelayer = vars.WDelayer
u.state.WithdrawalDelayer = *u.vars.WDelayer
}
}
func (u *APIStateUpdater) UpdateMetrics() error {
if u.state.Network.LastBatch == nil {
return nil
}
lastBatchNum := u.state.Network.LastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.state.Metrics = *metrics
return nil
}
func (u *APIStateUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
}
func (u *APIStateUpdater) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64,
) error {
// Get last batch in API format
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil
} else if err != nil {
return tracerr.Wrap(err)
}
// Get next forgers
lastClosedSlot := currentSlot + int64(u.state.Auction.ClosedAuctionSlots)
nextForgers, err := u.hdb.GetNextForgersInternalAPI(u.vars.Auction, &u.consts.Auction,
lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil
} else if err != nil {
return tracerr.Wrap(err)
}
bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
if err == sql.ErrNoRows {
bucketUpdates = nil
} else if err != nil {
return tracerr.Wrap(err)
}
// Update NodeInfo struct
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals
u.state.Rollup.Buckets[i] = bucketParams
break
}
}
}
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers
return nil
}

+ 7
- 7
api/state_test.go

@ -13,7 +13,7 @@ import (
type testStatus struct {
Network testNetwork `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Metrics historydb.MetricsAPI `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@ -21,11 +21,11 @@ type testStatus struct {
}
type testNetwork struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []historydb.NextForger `json:"nextForgers"`
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
}
func TestSetRollupVariables(t *testing.T) {
@ -211,7 +211,7 @@ func TestGetState(t *testing.T) {
// status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
}
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForger) {
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
assert.Equal(t, len(expected), len(actual))
for i := range expected {
// ignore timestamps and other metadata

+ 31
- 0
common/eth.go

@ -0,0 +1,31 @@
package common
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup RollupVariables `validate:"required"`
Auction AuctionVariables `validate:"required"`
WDelayer WDelayerVariables `validate:"required"`
}
func (v *SCVariables) AsPtr() *SCVariablesPtr {
return &SCVariablesPtr{
Rollup: &v.Rollup,
Auction: &v.Auction,
WDelayer: &v.WDelayer,
}
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *RollupVariables `validate:"required"`
Auction *AuctionVariables `validate:"required"`
WDelayer *WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup RollupConstants
Auction AuctionConstants
WDelayer WDelayerConstants
}

+ 2
- 2
coordinator/coordinator.go

@ -185,8 +185,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client,
ethClient eth.ClientInterface,
scConsts *synchronizer.SCConsts,
initSCVars *synchronizer.SCVariables,
scConsts *common.SCConsts,
initSCVars *common.SCVariables,
) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd

+ 226
- 0
db/historydb/apiqueries.go

@ -1,8 +1,11 @@
package historydb
import (
"database/sql"
"errors"
"fmt"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
@ -34,6 +37,12 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
defer hdb.apiConnCon.Release()
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
// GetBatchAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow(
@ -954,3 +963,220 @@ func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
defer hdb.apiConnCon.Release()
return hdb.GetNodeInfo()
}
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
// var bucketUpdates []*common.BucketUpdate
err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// getNextForgers returns next forgers
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
auctionConsts *common.AuctionConstants,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForgerAPI{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum - 1
nextForger := NextForgerAPI{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// UpdateMetrics update Status.Metrics information
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
var metrics MetricsAPI
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: lastBatchNum,
}
if err := meddler.QueryRow(
hdb.dbRead, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++
}
metrics.TransactionsPerSecond = float64(nTxs) / seconds
// Set txs/batch
nBatches := p.ToBatchNum - p.FromBatchNum + 1
if nBatches == 0 { // Avoid dividing by 0
nBatches++
}
if (p.ToBatchNum - p.FromBatchNum) > 0 {
fmt.Printf("DBG ntxs: %v, nBatches: %v\n", nTxs, nBatches)
metrics.TransactionsPerBatch = float64(nTxs) /
float64(nBatches)
} else {
metrics.TransactionsPerBatch = 0
}
// Get total fee of that period
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err)
}
// Set batch frequency
metrics.BatchFrequency = seconds / float64(nBatches)
if nTxs > 0 {
metrics.AvgTransactionFee = totalFee / float64(nTxs)
} else {
metrics.AvgTransactionFee = 0
}
// Get and set amount of registered accounts
type registeredAccounts struct {
TotalIdx int64 `meddler:"total_idx"`
TotalBJJ int64 `meddler:"total_bjj"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.TotalAccounts = ra.TotalIdx
metrics.TotalBJJs = ra.TotalBJJ
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil
}
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getStateAPI(hdb.dbRead)
}

+ 48
- 15
db/historydb/historydb.go

@ -841,20 +841,7 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
}
func (hdb *HistoryDB) getBucketUpdatesAPI(txn *sqlx.Tx) ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
// var bucketUpdates []*common.BucketUpdate
err := meddler.QueryAll(
txn, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
func (hdb *HistoryDB) getMinBidInfo(txn *sqlx.Tx,
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
minBidInfo := []*MinBidInfo{}
query := `
@ -862,7 +849,7 @@ func (hdb *HistoryDB) getMinBidInfo(txn *sqlx.Tx,
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;`
err := meddler.QueryAll(txn, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
}
@ -1183,3 +1170,49 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
}
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
}
// UpdateRecommendedFee update Status.RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount =
math.Max(avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
return &recommendedFee, nil
}

+ 177
- 172
db/historydb/historydb_test.go

@ -1114,166 +1114,169 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
assert.Equal(t, escapeHatchWithdrawals, dbEscapeHatchWithdrawals)
}
// func TestGetMetricsAPI(t *testing.T) {
// test.WipeDB(historyDB.DB())
// set := `
// Type: Blockchain
//
// AddToken(1)
//
// CreateAccountDeposit(1) A: 1000 // numTx=1
// CreateAccountDeposit(1) B: 2000 // numTx=2
// CreateAccountDeposit(1) C: 3000 //numTx=3
//
// // block 0 is stored as default in the DB
// // block 1 does not exist
// > batchL1 // numBatches=1
// > batchL1 // numBatches=2
// > block // blockNum=2
//
// Transfer(1) C-A : 10 (1) // numTx=4
// > batch // numBatches=3
// > block // blockNum=3
// Transfer(1) B-C : 10 (1) // numTx=5
// > batch // numBatches=5
// > block // blockNum=4
// Transfer(1) A-B : 10 (1) // numTx=6
// > batch // numBatches=5
// > block // blockNum=5
// Transfer(1) A-B : 10 (1) // numTx=7
// > batch // numBatches=6
// > block // blockNum=6
// `
//
// const numBatches int = 6
// const numTx int = 7
// const blockNum = 6 - 1
//
// tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
// tilCfgExtra := til.ConfigExtra{
// BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
// CoordUser: "A",
// }
// blocks, err := tc.GenerateBlocks(set)
// require.NoError(t, err)
// err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
// require.NoError(t, err)
//
// // Sanity check
// require.Equal(t, blockNum, len(blocks))
//
// // Adding one batch per block
// // batch frequency can be chosen
// const frequency int = 15
//
// for i := range blocks {
// blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
// err = historyDB.AddBlockSCData(&blocks[i])
// assert.NoError(t, err)
// }
//
// res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
// assert.NoError(t, err)
//
// assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
//
// // Frequency is not exactly the desired one, some decimals may appear
// // There is a -2 as time for first and last batch is not taken into account
// assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
// assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
// assert.Equal(t, int64(3), res.TotalAccounts)
// assert.Equal(t, int64(3), res.TotalBJJs)
// // Til does not set fees
// assert.Equal(t, float64(0), res.AvgTransactionFee)
// }
//
// func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
// test.WipeDB(historyDB.DB())
//
// testUsersLen := 3
// var set []til.Instruction
// for user := 0; user < testUsersLen; user++ {
// set = append(set, til.Instruction{
// Typ: common.TxTypeCreateAccountDeposit,
// TokenID: common.TokenID(0),
// DepositAmount: big.NewInt(1000000),
// Amount: big.NewInt(0),
// From: fmt.Sprintf("User%02d", user),
// })
// set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// }
// set = append(set, til.Instruction{Typ: til.TypeNewBatchL1})
// set = append(set, til.Instruction{Typ: til.TypeNewBatchL1})
// set = append(set, til.Instruction{Typ: til.TypeNewBlock})
//
// // Transfers
// const numBlocks int = 30
// for x := 0; x < numBlocks; x++ {
// set = append(set, til.Instruction{
// Typ: common.TxTypeTransfer,
// TokenID: common.TokenID(0),
// DepositAmount: big.NewInt(1),
// Amount: big.NewInt(0),
// From: "User00",
// To: "User01",
// })
// set = append(set, til.Instruction{Typ: til.TypeNewBatch})
// set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// }
//
// var chainID uint16 = 0
// tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
// blocks, err := tc.GenerateBlocksFromInstructions(set)
// assert.NoError(t, err)
//
// tilCfgExtra := til.ConfigExtra{
// CoordUser: "A",
// }
// err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
// require.NoError(t, err)
//
// const numBatches int = 2 + numBlocks
// const blockNum = 4 + numBlocks
//
// // Sanity check
// require.Equal(t, blockNum, len(blocks))
//
// // Adding one batch per block
// // batch frequency can be chosen
// const blockTime time.Duration = 3600 * time.Second
// now := time.Now()
// require.NoError(t, err)
//
// for i := range blocks {
// blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime)
// err = historyDB.AddBlockSCData(&blocks[i])
// assert.NoError(t, err)
// }
//
// res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
// assert.NoError(t, err)
//
// assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
//
// assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
// assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
// assert.Equal(t, int64(3), res.TotalAccounts)
// assert.Equal(t, int64(3), res.TotalBJJs)
// // Til does not set fees
// assert.Equal(t, float64(0), res.AvgTransactionFee)
// }
//
// func TestGetMetricsAPIEmpty(t *testing.T) {
// test.WipeDB(historyDB.DB())
// _, err := historyDBWithACC.GetMetricsAPI(0)
// assert.NoError(t, err)
// }
//
// func TestGetAvgTxFeeEmpty(t *testing.T) {
// test.WipeDB(historyDB.DB())
// _, err := historyDBWithACC.GetAvgTxFeeAPI()
// assert.NoError(t, err)
// }
func TestGetMetricsAPI(t *testing.T) {
test.WipeDB(historyDB.DB())
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(1) A: 1000 // numTx=1
CreateAccountDeposit(1) B: 2000 // numTx=2
CreateAccountDeposit(1) C: 3000 //numTx=3
// block 0 is stored as default in the DB
// block 1 does not exist
> batchL1 // numBatches=1
> batchL1 // numBatches=2
> block // blockNum=2
Transfer(1) C-A : 10 (1) // numTx=4
> batch // numBatches=3
> block // blockNum=3
Transfer(1) B-C : 10 (1) // numTx=5
> batch // numBatches=5
> block // blockNum=4
Transfer(1) A-B : 10 (1) // numTx=6
> batch // numBatches=5
> block // blockNum=5
Transfer(1) A-B : 10 (1) // numTx=7
> batch // numBatches=6
> block // blockNum=6
`
const numBatches int = 6
const numTx int = 7
const blockNum = 6 - 1
tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "A",
}
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
// Sanity check
require.Equal(t, blockNum, len(blocks))
// Adding one batch per block
// batch frequency can be chosen
const frequency int = 15
for i := range blocks {
blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err)
}
// clientSetupExample := test.NewClientSetupExample()
// apiStateUpdater := NewAPIStateUpdater(historyDB, &NodeConfig{1000, 0.5},
// &Constants{
// RollupConstants: *clientSetupExample.RollupConstants,
// AuctionConstants: *clientSetupExample.AuctionConstants,
// WDelayerConstants: *clientSetupExample.WDelayerConstants,
// ChainID: uint16(clientSetupExample.ChainID.Int64()),
// HermezAddress: clientSetupExample.AuctionConstants.HermezRollup,
// })
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
// Frequency is not exactly the desired one, some decimals may appear
// There is a -2 as time for first and last batch is not taken into account
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
test.WipeDB(historyDB.DB())
testUsersLen := 3
var set []til.Instruction
for user := 0; user < testUsersLen; user++ {
set = append(set, til.Instruction{
Typ: common.TxTypeCreateAccountDeposit,
TokenID: common.TokenID(0),
DepositAmount: big.NewInt(1000000),
Amount: big.NewInt(0),
From: fmt.Sprintf("User%02d", user),
})
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
}
set = append(set, til.Instruction{Typ: til.TypeNewBatchL1})
set = append(set, til.Instruction{Typ: til.TypeNewBatchL1})
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// Transfers
const numBlocks int = 30
for x := 0; x < numBlocks; x++ {
set = append(set, til.Instruction{
Typ: common.TxTypeTransfer,
TokenID: common.TokenID(0),
DepositAmount: big.NewInt(1),
Amount: big.NewInt(0),
From: "User00",
To: "User01",
})
set = append(set, til.Instruction{Typ: til.TypeNewBatch})
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
}
var chainID uint16 = 0
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocksFromInstructions(set)
assert.NoError(t, err)
tilCfgExtra := til.ConfigExtra{
CoordUser: "A",
}
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
const numBatches int = 2 + numBlocks
const blockNum = 4 + numBlocks
// Sanity check
require.Equal(t, blockNum, len(blocks))
// Adding one batch per block
// batch frequency can be chosen
const blockTime time.Duration = 3600 * time.Second
now := time.Now()
require.NoError(t, err)
for i := range blocks {
blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime)
err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err)
}
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsInternalAPI(0)
assert.NoError(t, err)
}
func TestGetLastL1TxsNum(t *testing.T) {
test.WipeDB(historyDB.DB())
@ -1464,30 +1467,32 @@ func setTestBlocks(from, to int64) []common.Block {
func TestNodeInfo(t *testing.T) {
test.WipeDB(historyDB.DB())
err := historyDB.SetAPIState(&APIState{})
err := historyDB.SetAPIState(&StateAPI{})
require.NoError(t, err)
clientSetup := test.NewClientSetupExample()
constants := &Constants{
RollupConstants: *clientSetup.RollupConstants,
AuctionConstants: *clientSetup.AuctionConstants,
WDelayerConstants: *clientSetup.WDelayerConstants,
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
SCConsts: common.SCConsts{
Rollup: *clientSetup.RollupConstants,
Auction: *clientSetup.AuctionConstants,
WDelayer: *clientSetup.WDelayerConstants,
},
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
}
err = historyDB.SetConstants(constants)
require.NoError(t, err)
// Test parameters
apiState := &APIState{
stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{
ForgeDelay: 3.1,
},
Network: Network{
Network: NetworkAPI{
LastEthBlock: 12,
LastSyncBlock: 34,
},
Metrics: Metrics{
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TotalAccounts: 42,
},
@ -1498,7 +1503,7 @@ func TestNodeInfo(t *testing.T) {
ExistingAccount: 0.15,
},
}
err = historyDB.SetAPIState(apiState)
err = historyDB.SetAPIState(stateAPI)
require.NoError(t, err)
nodeConfig := &NodeConfig{
@ -1516,7 +1521,7 @@ func TestNodeInfo(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, nodeConfig, dbNodeConfig)
dbAPIState, err := historyDB.GetAPIState()
dbStateAPI, err := historyDB.GetStateAPI()
require.NoError(t, err)
assert.Equal(t, apiState, dbAPIState)
assert.Equal(t, stateAPI, dbStateAPI)
}

+ 70
- 496
db/historydb/nodeinfo.go

@ -1,18 +1,11 @@
package historydb
import (
"database/sql"
"fmt"
"math"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx"
"github.com/russross/meddler"
)
@ -29,17 +22,17 @@ type Period struct {
ToTimestamp time.Time `json:"toTimestamp"`
}
type NextForger struct {
type NextForgerAPI struct {
Coordinator CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
type Network struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
type NetworkAPI struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
}
// NodePublicConfig is the configuration of the node that is exposed via API
@ -48,11 +41,11 @@ type NodePublicConfig struct {
ForgeDelay float64 `json:"forgeDelay"`
}
type APIState struct {
type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
Network Network `json:"network"`
Metrics Metrics `json:"metrics"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
Auction AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@ -60,11 +53,12 @@ type APIState struct {
}
type Constants struct {
RollupConstants common.RollupConstants
AuctionConstants common.AuctionConstants
WDelayerConstants common.WDelayerConstants
ChainID uint16
HermezAddress ethCommon.Address
// RollupConstants common.RollupConstants
// AuctionConstants common.AuctionConstants
// WDelayerConstants common.WDelayerConstants
common.SCConsts
ChainID uint16
HermezAddress ethCommon.Address
}
type NodeConfig struct {
@ -74,7 +68,7 @@ type NodeConfig struct {
type NodeInfo struct {
ItemID int `meddler:"item_id,pk"`
APIState *APIState `meddler:"state,json"`
APIState *StateAPI `meddler:"state,json"`
NodeConfig *NodeConfig `meddler:"config,json"`
Constants *Constants `meddler:"constants,json"`
}
@ -111,18 +105,22 @@ func (hdb *HistoryDB) SetConstants(constants *Constants) error {
return tracerr.Wrap(err)
}
func (hdb *HistoryDB) GetAPIState() (*APIState, error) {
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
return hdb.getStateAPI(hdb.dbRead)
}
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
d, &nodeInfo,
"SELECT state FROM node_info WHERE item_id = 1;",
)
return nodeInfo.APIState, tracerr.Wrap(err)
}
func (hdb *HistoryDB) SetAPIState(apiState *APIState) error {
func (hdb *HistoryDB) SetAPIState(apiState *StateAPI) error {
_apiState := struct {
APIState *APIState `meddler:"state,json"`
APIState *StateAPI `meddler:"state,json"`
}{apiState}
values, err := meddler.Default.Values(&_apiState, false)
if err != nil {
@ -168,474 +166,50 @@ func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
// return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "node_info", ni))
// }
type APIStateUpdater struct {
hdb *HistoryDB
state APIState
config NodeConfig
constants Constants
}
func (u *APIStateUpdater) SetSCVars(rollupVariables *common.RollupVariables,
auctionVariables *common.AuctionVariables,
wDelayerVariables *common.WDelayerVariables) {
if rollupVariables != nil {
rollupVars := NewRollupVariablesAPI(rollupVariables)
u.state.Rollup = *rollupVars
}
if auctionVariables != nil {
auctionVars := NewAuctionVariablesAPI(auctionVariables)
u.state.Auction = *auctionVars
}
if wDelayerVariables != nil {
u.state.WithdrawalDelayer = *wDelayerVariables
}
}
func (u *APIStateUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
}
func (u *APIStateUpdater) UpdateNetworkInfo(
txn *sqlx.Tx,
lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64,
) error {
// Get last batch in API format
lastBatch, err := u.hdb.getBatchAPI(txn, lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil
} else if err != nil {
return tracerr.Wrap(err)
}
// Get next forrgers
lastClosedSlot := currentSlot + int64(u.state.Auction.ClosedAuctionSlots)
nextForgers, err := u.getNextForgers(txn, lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil
} else if err != nil {
return tracerr.Wrap(err)
}
bucketUpdates, err := u.hdb.getBucketUpdatesAPI(txn)
if err == sql.ErrNoRows {
bucketUpdates = nil
} else if err != nil {
return tracerr.Wrap(err)
}
// Update NodeInfo struct
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals
u.state.Rollup.Buckets[i] = bucketParams
break
}
}
}
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers
return nil
}
// TODO: Remove
// SetRollupVariables set Status.Rollup variables
func (hdb *HistoryDB) SetRollupVariables(rollupVariables *common.RollupVariables) error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
rollupVars := NewRollupVariablesAPI(rollupVariables)
ni.APIState.Rollup = *rollupVars
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// TODO: Remove
// SetWDelayerVariables set Status.WithdrawalDelayer variables
func (hdb *HistoryDB) SetWDelayerVariables(wDelayerVariables *common.WDelayerVariables) error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
ni.APIState.WithdrawalDelayer = *wDelayerVariables
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// TODO: Remove
// SetAuctionVariables set Status.Auction variables
func (hdb *HistoryDB) SetAuctionVariables(auctionVariables *common.AuctionVariables) error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
auctionVars := NewAuctionVariablesAPI(auctionVariables)
ni.APIState.Auction = *auctionVars
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// TODO: Remove
// UpdateNetworkInfoBlock update Status.Network block related information
func (hdb *HistoryDB) UpdateNetworkInfoBlock(
lastEthBlock, lastSyncBlock common.Block,
) error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
ni.APIState.Network.LastSyncBlock = lastSyncBlock.Num
ni.APIState.Network.LastEthBlock = lastEthBlock.Num
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// UpdateNetworkInfo update Status.Network information
func (hdb *HistoryDB) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64,
) error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
// Get last batch in API format
lastBatch, err := hdb.getBatchAPI(txn, lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil
} else if err != nil {
return tracerr.Wrap(err)
}
// Get next forrgers
lastClosedSlot := currentSlot + int64(ni.APIState.Auction.ClosedAuctionSlots)
nextForgers, err := hdb.getNextForgers(txn, ni, lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil
} else if err != nil {
return tracerr.Wrap(err)
}
// Get buckets withdrawals
var bucketUpdatesPtrs []*BucketUpdateAPI
var bucketUpdates []BucketUpdateAPI
err = meddler.QueryAll(
txn, &bucketUpdatesPtrs,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
if err == sql.ErrNoRows {
bucketUpdates = nil
} else if err != nil {
return tracerr.Wrap(err)
} else {
bucketUpdates = db.SlicePtrsToSlice(bucketUpdatesPtrs).([]BucketUpdateAPI)
}
// Update NodeInfo struct
for i, bucketParams := range ni.APIState.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals
ni.APIState.Rollup.Buckets[i] = bucketParams
break
}
}
}
ni.APIState.Network.LastSyncBlock = lastSyncBlock.Num
ni.APIState.Network.LastEthBlock = lastEthBlock.Num
ni.APIState.Network.LastBatch = lastBatch
ni.APIState.Network.CurrentSlot = currentSlot
ni.APIState.Network.NextForgers = nextForgers
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
var slots [6]*big.Int
for i, slot := range defaultSlotSetBid {
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
if !ok {
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
}
slots[i] = bigInt
}
return slots, nil
}
// getNextForgers returns next forgers
func (u *APIStateUpdater) getNextForgers(txn *sqlx.Tx,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := u.hdb.getBestBidsAPI(txn, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForger{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= u.state.Auction.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
bigIntSlots, err := apiSlotToBigInts(u.state.Auction.DefaultSlotSetBid)
if err != nil {
return nil, tracerr.Wrap(err)
}
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: bigIntSlots,
DefaultSlotSetBidSlotNum: u.state.Auction.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = u.hdb.getMinBidInfo(txn, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(u.constants.AuctionConstants.BlocksPerSlot) +
u.constants.AuctionConstants.GenesisBlockNum
toBlock := (i+1)*int64(u.constants.AuctionConstants.BlocksPerSlot) +
u.constants.AuctionConstants.GenesisBlockNum - 1
nextForger := NextForger{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(u.state.Auction.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := u.hdb.GetCoordinatorAPI(bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: u.state.Auction.BootCoordinator,
URL: u.state.Auction.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// TODO: Rename to getMetrics and don't write anything
// UpdateMetrics update Status.Metrics information
func (hdb *HistoryDB) UpdateMetrics() error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
// Get the first and last batch of the last 24h and their timestamps
if ni.APIState.Network.LastBatch == nil {
return nil
}
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: ni.APIState.Network.LastBatch.BatchNum,
}
if err := meddler.QueryRow(
txn, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := txn.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++
}
ni.APIState.Metrics.TransactionsPerSecond = float64(nTxs) / seconds
// Set txs/batch
nBatches := p.ToBatchNum - p.FromBatchNum
if nBatches == 0 { // Avoid dividing by 0
nBatches++
}
if (p.ToBatchNum - p.FromBatchNum) > 0 {
ni.APIState.Metrics.TransactionsPerBatch = float64(nTxs) /
float64(nBatches)
} else {
ni.APIState.Metrics.TransactionsPerBatch = 0
}
// Get total fee of that period
row = txn.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return tracerr.Wrap(err)
}
// Set batch frequency
ni.APIState.Metrics.BatchFrequency = seconds / float64(nBatches)
if nTxs > 0 {
ni.APIState.Metrics.AvgTransactionFee = totalFee / float64(nTxs)
} else {
ni.APIState.Metrics.AvgTransactionFee = 0
}
// Get and set amount of registered accounts
type registeredAccounts struct {
TotalIdx int64 `meddler:"total_idx"`
TotalBJJ int64 `meddler:"total_bjj"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
txn, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
); err != nil {
return tracerr.Wrap(err)
}
ni.APIState.Metrics.TotalAccounts = ra.TotalIdx
ni.APIState.Metrics.TotalBJJs = ra.TotalBJJ
// Get and set estimated time to forge L1 tx
row = txn.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return tracerr.Wrap(err)
}
ni.APIState.Metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// UpdateRecommendedFee update Status.RecommendedFee information
func (hdb *HistoryDB) UpdateRecommendedFee() error {
setUpdatedNodeInfo := func(txn *sqlx.Tx, ni *NodeInfo) error {
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
txn, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
txn, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
ni.APIState.RecommendedFee.ExistingAccount =
math.Max(avgTransactionFee, *ni.MinFeeUSD)
ni.APIState.RecommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*avgTransactionFee, *ni.MinFeeUSD)
ni.APIState.RecommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, *ni.MinFeeUSD)
return nil
}
return hdb.updateNodeInfo(setUpdatedNodeInfo)
}
// func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
// var slots [6]*big.Int
//
// for i, slot := range defaultSlotSetBid {
// bigInt, ok := new(big.Int).SetString(string(*slot), 10)
// if !ok {
// return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
// }
// slots[i] = bigInt
// }
//
// return slots, nil
// }
func (hdb *HistoryDB) updateNodeInfo(setUpdatedNodeInfo func(*sqlx.Tx, *NodeInfo) error) error {
// Create a SQL transaction or read and update atomicaly
txn, err := hdb.dbWrite.Beginx()
if err != nil {
return tracerr.Wrap(err)
}
defer func() {
if err != nil {
db.Rollback(txn)
}
}()
// Read current node info
ni := &NodeInfo{}
if err := meddler.QueryRow(
txn, ni, "SELECT * FROM node_info;",
); err != nil {
return tracerr.Wrap(err)
}
// Update NodeInfo struct
if err := setUpdatedNodeInfo(txn, ni); err != nil {
return tracerr.Wrap(err)
}
// Update NodeInfo at DB
if _, err := txn.Exec("DELETE FROM node_info;"); err != nil {
return tracerr.Wrap(err)
}
if err := meddler.Insert(txn, "node_info", ni); err != nil {
return tracerr.Wrap(err)
}
// Commit NodeInfo update
return tracerr.Wrap(txn.Commit())
}
// func (hdb *HistoryDB) updateNodeInfo(setUpdatedNodeInfo func(*sqlx.Tx, *NodeInfo) error) error {
// // Create a SQL transaction or read and update atomicaly
// txn, err := hdb.dbWrite.Beginx()
// if err != nil {
// return tracerr.Wrap(err)
// }
// defer func() {
// if err != nil {
// db.Rollback(txn)
// }
// }()
// // Read current node info
// ni := &NodeInfo{}
// if err := meddler.QueryRow(
// txn, ni, "SELECT * FROM node_info;",
// ); err != nil {
// return tracerr.Wrap(err)
// }
// // Update NodeInfo struct
// if err := setUpdatedNodeInfo(txn, ni); err != nil {
// return tracerr.Wrap(err)
// }
// // Update NodeInfo at DB
// if _, err := txn.Exec("DELETE FROM node_info;"); err != nil {
// return tracerr.Wrap(err)
// }
// if err := meddler.Insert(txn, "node_info", ni); err != nil {
// return tracerr.Wrap(err)
// }
// // Commit NodeInfo update
// return tracerr.Wrap(txn.Commit())
// }

+ 2
- 2
db/historydb/views.go

@ -302,8 +302,8 @@ type BatchAPI struct {
LastItem uint64 `json:"-" meddler:"last_item"`
}
// Metrics define metrics of the network
type Metrics struct {
// MetricsAPI define metrics of the network
type MetricsAPI struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"`

+ 82
- 70
node/node.go

@ -53,9 +53,10 @@ const (
// Node is the Hermez Node
type Node struct {
nodeAPI *NodeAPI
debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
nodeAPI *NodeAPI
apiStateUpdater *api.APIStateUpdater
debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
// Coordinator
coord *coordinator.Coordinator
@ -230,26 +231,34 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
}
initSCVars := sync.SCVars()
scConsts := synchronizer.SCConsts{
scConsts := common.SCConsts{
Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(),
}
if err := historyDB.SetInitialNodeInfo(
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
&historydb.Constants{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
return nil, tracerr.Wrap(err)
}
hdbConsts := historydb.Constants{
SCConsts: common.SCConsts{
Rollup: scConsts.Rollup,
Auction: scConsts.Auction,
WDelayer: scConsts.WDelayer,
},
); err != nil {
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
}
if err := historyDB.SetConstants(&hdbConsts); err != nil {
return nil, tracerr.Wrap(err)
}
apiStateUpdater := api.NewAPIStateUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
var coord *coordinator.Coordinator
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
@ -379,11 +388,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
serverProofs,
client,
&scConsts,
&synchronizer.SCVariables{
Rollup: *initSCVars.Rollup,
Auction: *initSCVars.Auction,
WDelayer: *initSCVars.WDelayer,
},
initSCVars,
)
if err != nil {
return nil, tracerr.Wrap(err)
@ -432,18 +437,19 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
}
ctx, cancel := context.WithCancel(context.Background())
return &Node{
nodeAPI: nodeAPI,
debugAPI: debugAPI,
priceUpdater: priceUpdater,
coord: coord,
sync: sync,
cfg: cfg,
mode: mode,
sqlConnRead: dbRead,
sqlConnWrite: dbWrite,
historyDB: historyDB,
ctx: ctx,
cancel: cancel,
apiStateUpdater: apiStateUpdater,
nodeAPI: nodeAPI,
debugAPI: debugAPI,
priceUpdater: priceUpdater,
coord: coord,
sync: sync,
cfg: cfg,
mode: mode,
sqlConnRead: dbRead,
sqlConnWrite: dbWrite,
historyDB: historyDB,
ctx: ctx,
cancel: cancel,
}, nil
}
@ -616,8 +622,8 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil
}
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr,
batches []common.BatchData) {
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr,
batches []common.BatchData) error {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats,
@ -625,47 +631,42 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
Batches: batches,
})
}
if n.nodeAPI != nil {
if vars.Rollup != nil {
n.historyDB.SetRollupVariables(vars.Rollup)
}
if vars.Auction != nil {
n.historyDB.SetAuctionVariables(vars.Auction)
}
if vars.WDelayer != nil {
n.historyDB.SetWDelayerVariables(vars.WDelayer)
}
if stats.Synced() {
if err := n.historyDB.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err)
}
} else {
n.historyDB.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
n.apiStateUpdater.SetSCVars(vars)
if stats.Synced() {
if err := n.apiStateUpdater.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
}
} else {
n.apiStateUpdater.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
}
if err := n.apiStateUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariables) error {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: vars,
})
}
vars = n.sync.SCVars()
n.historyDB.SetRollupVariables(vars.Rollup)
n.historyDB.SetAuctionVariables(vars.Auction)
n.historyDB.SetWDelayerVariables(vars.WDelayer)
n.historyDB.UpdateNetworkInfoBlock(
n.apiStateUpdater.SetSCVars(vars.AsPtr())
n.apiStateUpdater.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
if err := n.apiStateUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
@ -680,16 +681,20 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars()
n.handleReorg(ctx, stats, vars)
if err := n.handleReorg(ctx, stats, vars); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return nil, time.Duration(0), nil
} else if blockData != nil {
// case: new block
vars := synchronizer.SCVariablesPtr{
vars := common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,
}
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return &blockData.Block, time.Duration(0), nil
} else {
// case: no block
@ -708,7 +713,7 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized
stats := n.sync.Stats()
vars := n.sync.SCVars()
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{})
n.wg.Add(1)
go func() {
@ -795,8 +800,11 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1)
go func() {
// Do an initial update on startup
if err := n.historyDB.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
if err := n.apiStateUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
}
if err := n.apiStateUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for {
select {
@ -805,8 +813,12 @@ func (n *Node) StartNodeAPI() {
n.wg.Done()
return
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
if err := n.historyDB.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
if err := n.apiStateUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
continue
}
if err := n.apiStateUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
}
}

+ 31
- 31
synchronizer/synchronizer.go

@ -183,26 +183,26 @@ type StartBlockNums struct {
}
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup common.RollupVariables `validate:"required"`
Auction common.AuctionVariables `validate:"required"`
WDelayer common.WDelayerVariables `validate:"required"`
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *common.RollupVariables `validate:"required"`
Auction *common.AuctionVariables `validate:"required"`
WDelayer *common.WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup common.RollupConstants
Auction common.AuctionConstants
WDelayer common.WDelayerConstants
}
// type SCVariables struct {
// Rollup common.RollupVariables `validate:"required"`
// Auction common.AuctionVariables `validate:"required"`
// WDelayer common.WDelayerVariables `validate:"required"`
// }
//
// // SCVariablesPtr joins all the smart contract variables as pointers in a single
// // struct
// type SCVariablesPtr struct {
// Rollup *common.RollupVariables `validate:"required"`
// Auction *common.AuctionVariables `validate:"required"`
// WDelayer *common.WDelayerVariables `validate:"required"`
// }
//
// // SCConsts joins all the smart contract constants in a single struct
// type SCConsts struct {
// Rollup common.RollupConstants
// Auction common.AuctionConstants
// WDelayer common.WDelayerConstants
// }
// Config is the Synchronizer configuration
type Config struct {
@ -213,13 +213,13 @@ type Config struct {
// Synchronizer implements the Synchronizer type
type Synchronizer struct {
ethClient eth.ClientInterface
consts SCConsts
consts common.SCConsts
historyDB *historydb.HistoryDB
stateDB *statedb.StateDB
cfg Config
initVars SCVariables
initVars common.SCVariables
startBlockNum int64
vars SCVariables
vars common.SCVariables
stats *StatsHolder
resetStateFailed bool
}
@ -242,7 +242,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
err))
}
consts := SCConsts{
consts := common.SCConsts{
Rollup: *rollupConstants,
Auction: *auctionConstants,
WDelayer: *wDelayerConstants,
@ -307,11 +307,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
}
// SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() SCVariablesPtr {
return SCVariablesPtr{
Rollup: s.vars.Rollup.Copy(),
Auction: s.vars.Auction.Copy(),
WDelayer: s.vars.WDelayer.Copy(),
func (s *Synchronizer) SCVars() *common.SCVariables {
return &common.SCVariables{
Rollup: *s.vars.Rollup.Copy(),
Auction: *s.vars.Auction.Copy(),
WDelayer: *s.vars.WDelayer.Copy(),
}
}
@ -724,7 +724,7 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
}
func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
@ -740,7 +740,7 @@ func getInitialVariables(ethClient eth.ClientInterface,
rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
wDelayerVars := wDelayerInit.WDelayerVariables()
return &SCVariables{
return &common.SCVariables{
Rollup: *rollupVars,
Auction: *auctionVars,
WDelayer: *wDelayerVars,

Loading…
Cancel
Save