Compare commits

...

40 Commits

Author SHA1 Message Date
Eduard S
b330889570 WIP 2021-03-09 14:32:23 +01:00
Eduard S
a5ef822c64 WIP3 2021-03-08 18:17:15 +01:00
Eduard S
5501f30062 WIP2 2021-03-04 14:05:35 +01:00
Eduard S
d4f6926311 WIP 2021-03-03 14:37:41 +01:00
Eduard S
bfba1ba2d2 Clean 2021-03-03 12:50:21 +01:00
arnaubennassar
eed635539f pull 2021-03-02 18:49:34 +01:00
arnaubennassar
87610f6188 wip 2021-03-02 18:46:56 +01:00
arnaubennassar
4b596072d2 Add table to decouple API from node 2021-03-02 15:22:02 +01:00
Eduard S
95c4019cb2 WIP 2021-03-01 10:51:30 +01:00
Eduard S
c4d5e8a7ab WIP 2021-03-01 10:51:30 +01:00
Eduard S
c1375d9c5f Serve API only via cli 2021-03-01 10:51:30 +01:00
Eduard S
39b7882ef2 Merge pull request #594 from hermeznetwork/fix/post-tx-no-usd
Avoid SQL error when checking value of token without usd on tx insert
2021-03-01 10:34:35 +01:00
Eduard S
c7d0422c16 Merge pull request #596 from hermeznetwork/fix/l1CoordinatorFromBytes-parser
Update L1CoordinatorTxFromBytes parser to EIP712
2021-03-01 10:19:21 +01:00
arnaucube
56ffea2190 Update L1CoordinatorTxFromBytes to EIP712 2021-02-28 10:56:51 +01:00
arnaubennassar
cf70111de5 Avoid SQL error when checking value of token without usd on tx insert 2021-02-26 18:22:07 +01:00
a_bennassar
f664a3a382 Merge pull request #588 from hermeznetwork/feature/check-forge-balance
check the forge address balance before starting the node
2021-02-26 17:32:53 +01:00
Danilo Pantani
cd1df6ea8c check the forge address balance before starting the node 2021-02-26 12:51:31 -03:00
Eduard S
26e2bbc262 WIP 2021-02-26 16:17:06 +01:00
Eduard S
6da827c751 Merge pull request #591 from hermeznetwork/feature/accCreationAuth-EIP712
Update AccCreationAuth signature hash to EIP-712
2021-02-26 13:50:52 +01:00
arnaucube
60023e4574 Update AccCreationAuth signature hash to EIP-712
Compatible with `hermeznetwork/contracts` commit: `67726208723a40f2251953aaabf4d2b6221f8b13`
Compatible with `hermeznetwork/commonjs` commit: `dee266fb036a64bebc65756ebd5f0361929c110d`
2021-02-26 13:46:35 +01:00
arnau
9de3a4ec6a Merge pull request #590 from hermeznetwork/fix/zki-imExitRoot
Set the intermediary signal when exit TX is not inserted
2021-02-26 13:39:06 +01:00
Eduard S
bb4c464200 WIP 2021-02-26 13:09:24 +01:00
Eduard S
982899efed Serve API only via cli 2021-02-26 13:09:24 +01:00
Alberto Elias
91c96eb429 Merge pull request #589 from hermeznetwork/feature/api-batch-timeout
Add forge delay to api get state
2021-02-26 11:44:20 +00:00
Jordi Baylina
70f874aaf1 Set the intermediary signal when exit TX is not inserted 2021-02-26 12:19:29 +01:00
arnaubennassar
54508b0ba6 Add forge delay to api get state 2021-02-26 12:15:39 +01:00
Eduard S
0adcf1a2bc Merge pull request #587 from hermeznetwork/feature/legacy-sync2
Update synchronizer.Sync2 to Sync from legacy impl
2021-02-25 17:02:15 +01:00
a_bennassar
527cd9a2cc Merge pull request #584 from hermeznetwork/fix/fgingAfterReboot
Reorg l2db before starting pipeline
2021-02-25 16:59:57 +01:00
arnau
b269117a32 Merge pull request #586 from hermeznetwork/feature/gindebugviaconfig
Set gin debug mode via config
2021-02-25 16:57:55 +01:00
arnau
e14705c13b Merge pull request #585 from hermeznetwork/feature/gascalc
Calculate ForgeBatch gasLimit with parametrized formula
2021-02-25 16:56:59 +01:00
arnau
5ff0350f51 Merge pull request #583 from hermeznetwork/fix/checkinterval
Fix missing timer reset in TxManager
2021-02-25 16:53:48 +01:00
arnaucube
15fd3945dd Update synchronizer.Sync2 to Sync from legacy impl 2021-02-25 16:52:09 +01:00
Eduard S
83c256deda Merge pull request #581 from hermeznetwork/feature/api-L1-forge-time
Add API metric for time to forge L1 tx
2021-02-25 16:37:05 +01:00
Eduard S
d50ae71710 Merge pull request #580 from hermeznetwork/fix/zki-multiple-exits-balance2
Fix ZKI Exit Balance2 accumulate Amounts
2021-02-25 16:22:52 +01:00
Eduard S
ba108b1146 Set gin debug mode via config
Add new config setting `Debug.GinDebugMode`.  When set to true, gin will run in
debug mode.  If not set, gin will run in release mode.  Before this change, gin
always ran in debug mode, so to keep the same behaviour as before, set this
parameter to true
2021-02-25 15:51:59 +01:00
Eduard S
c771bdf94e Calculate ForgeBatch gasLimit with parametrized formula
Add the following config parameters at
`Coordinator.EthClient.ForgeBatchGasCost`:
- `Fixed`
- `L1UserTx`
- `L1CoordTx`
- `L2Tx`
Which are the costs associated to a ForgeBatch transaction, split
into different parts to be used in the formula to compute the gasLimit.
2021-02-25 15:26:04 +01:00
Eduard S
3c68aa5014 Reorg l2db before starting pipeline 2021-02-25 14:12:15 +01:00
Eduard S
4856251f01 Fix missing timer reset in TxManager
Also, replace usage of time.Duration by time.NewTimer, because the later allows
replacing timers by stopping them before so that we never leak resources.
2021-02-25 14:04:22 +01:00
arnaubennassar
4acfeb0ad9 Add API metric for time to forge L1 tx 2021-02-25 13:06:26 +01:00
arnaucube
400ab14f04 Fix ZKI Exit Balance2 accumulated Amounts 2021-02-25 11:37:34 +01:00
35 changed files with 1833 additions and 936 deletions

View File

@@ -2,39 +2,19 @@ package api
import ( import (
"errors" "errors"
"sync"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// TODO: Add correct values to constants
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Status define status of the network
type Status struct {
sync.RWMutex
Network Network `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// API serves HTTP requests to allow external interaction with the Hermez node // API serves HTTP requests to allow external interaction with the Hermez node
type API struct { type API struct {
h *historydb.HistoryDB h *historydb.HistoryDB
cg *configAPI cg *configAPI
l2 *l2db.L2DB l2 *l2db.L2DB
status Status
chainID uint16 chainID uint16
hermezAddress ethCommon.Address hermezAddress ethCommon.Address
} }
@@ -45,7 +25,6 @@ func NewAPI(
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *Config,
) (*API, error) { ) (*API, error) {
// Check input // Check input
// TODO: is stateDB only needed for explorer endpoints or for both? // TODO: is stateDB only needed for explorer endpoints or for both?
@@ -55,18 +34,20 @@ func NewAPI(
if explorerEndpoints && hdb == nil { if explorerEndpoints && hdb == nil {
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB")) return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
} }
consts, err := hdb.GetConstants()
if err != nil {
return nil, err
}
a := &API{ a := &API{
h: hdb, h: hdb,
cg: &configAPI{ cg: &configAPI{
RollupConstants: *newRollupConstants(config.RollupConstants), RollupConstants: *newRollupConstants(consts.Rollup),
AuctionConstants: config.AuctionConstants, AuctionConstants: consts.Auction,
WDelayerConstants: config.WDelayerConstants, WDelayerConstants: consts.WDelayer,
}, },
l2: l2db, l2: l2db,
status: Status{}, chainID: consts.ChainID,
chainID: config.ChainID, hermezAddress: consts.HermezAddress,
hermezAddress: config.HermezAddress,
} }
// Add coordinator endpoints // Add coordinator endpoints

View File

@@ -180,12 +180,13 @@ type testCommon struct {
auctionVars common.AuctionVariables auctionVars common.AuctionVariables
rollupVars common.RollupVariables rollupVars common.RollupVariables
wdelayerVars common.WDelayerVariables wdelayerVars common.WDelayerVariables
nextForgers []NextForger nextForgers []historydb.NextForgerAPI
} }
var tc testCommon var tc testCommon
var config configAPI var config configAPI
var api *API var api *API
var stateAPIUpdater *StateAPIUpdater
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data, // TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
// emulating the task of the synchronizer in order to have data to be returned // emulating the task of the synchronizer in order to have data to be returned
@@ -206,16 +207,6 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
// StateDB
dir, err := ioutil.TempDir("", "tmpdb")
if err != nil {
panic(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
}()
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
@@ -230,15 +221,38 @@ func TestMain(m *testing.M) {
// API // API
apiGin := gin.Default() apiGin := gin.Default()
// Reset DB
test.WipeDB(hdb.DB())
constants := &historydb.Constants{
SCConsts: common.SCConsts{
Rollup: _config.RollupConstants,
Auction: _config.AuctionConstants,
WDelayer: _config.WDelayerConstants,
},
ChainID: chainID,
HermezAddress: _config.HermezAddress,
}
if err := hdb.SetConstants(constants); err != nil {
panic(err)
}
nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10,
MinFeeUSD: 0,
}
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err)
}
api, err = NewAPI( api, err = NewAPI(
true, true,
true, true,
apiGin, apiGin,
hdb, hdb,
l2DB, l2DB,
&_config,
) )
if err != nil { if err != nil {
log.Error(err)
panic(err) panic(err)
} }
// Start server // Start server
@@ -254,9 +268,6 @@ func TestMain(m *testing.M) {
} }
}() }()
// Reset DB
test.WipeDB(api.h.DB())
// Genratre blockchain data with til // Genratre blockchain data with til
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{ tilCfgExtra := til.ConfigExtra{
@@ -457,19 +468,19 @@ func TestMain(m *testing.M) {
if err = api.h.AddBids(bids); err != nil { if err = api.h.AddBids(bids); err != nil {
panic(err) panic(err)
} }
bootForger := NextForger{ bootForger := historydb.NextForgerAPI{
Coordinator: historydb.CoordinatorAPI{ Coordinator: historydb.CoordinatorAPI{
Forger: auctionVars.BootCoordinator, Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL, URL: auctionVars.BootCoordinatorURL,
}, },
} }
// Set next forgers: set all as boot coordinator then replace the non boot coordinators // Set next forgers: set all as boot coordinator then replace the non boot coordinators
nextForgers := []NextForger{} nextForgers := []historydb.NextForgerAPI{}
var initBlock int64 = 140 var initBlock int64 = 140
var deltaBlocks int64 = 40 var deltaBlocks int64 = 40
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ { for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
fromBlock := initBlock + deltaBlocks*int64(i-1) fromBlock := initBlock + deltaBlocks*int64(i-1)
bootForger.Period = Period{ bootForger.Period = historydb.Period{
SlotNum: int64(i), SlotNum: int64(i),
FromBlock: fromBlock, FromBlock: fromBlock,
ToBlock: fromBlock + deltaBlocks - 1, ToBlock: fromBlock + deltaBlocks - 1,
@@ -509,6 +520,12 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000), WithdrawalDelay: uint64(3000),
} }
stateAPIUpdater = NewStateAPIUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
// Generate test data, as expected to be received/sended from/to the API // Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords) testCoords := genTestCoordinators(commonCoords)
testBids := genTestBids(commonBlocks, testCoords, bids) testBids := genTestBids(commonBlocks, testCoords, bids)
@@ -586,15 +603,12 @@ func TestMain(m *testing.M) {
if err := database.Close(); err != nil { if err := database.Close(); err != nil {
panic(err) panic(err)
} }
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
os.Exit(result) os.Exit(result)
} }
func TestTimeout(t *testing.T) { func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond) apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO) hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
@@ -624,14 +638,12 @@ func TestTimeout(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
}() }()
_config := getConfigTest(0)
_, err = NewAPI( _, err = NewAPI(
true, true,
true, true,
apiGinTO, apiGinTO,
hdbTO, hdbTO,
l2DBTO, l2DBTO,
&_config,
) )
require.NoError(t, err) require.NoError(t, err)

View File

@@ -99,7 +99,9 @@ func TestGetSlot(t *testing.T) {
nil, &fetchedSlot, nil, &fetchedSlot,
), ),
) )
emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars) // ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
assertSlot(t, emptySlot, fetchedSlot) assertSlot(t, emptySlot, fetchedSlot)
// Invalid slotNum // Invalid slotNum
@@ -127,8 +129,10 @@ func TestGetSlots(t *testing.T) {
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter) err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
assert.NoError(t, err) assert.NoError(t, err)
allSlots := tc.slots allSlots := tc.slots
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ { for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars) emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars)
allSlots = append(allSlots, emptySlot) allSlots = append(allSlots, emptySlot)
} }
assertSlots(t, allSlots, fetchedSlots) assertSlots(t, allSlots, fetchedSlots)

View File

@@ -2,313 +2,160 @@ package api
import ( import (
"database/sql" "database/sql"
"fmt"
"math"
"math/big"
"net/http" "net/http"
"time" "sync"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// Network define status of the network
type Network struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *historydb.BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
}
// NextForger is a representation of the information of a coordinator and the period will forge
type NextForger struct {
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// Period is a representation of a period
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
func (a *API) getState(c *gin.Context) { func (a *API) getState(c *gin.Context) {
// TODO: There are no events for the buckets information, so now this information will be 0 stateAPI, err := a.h.GetStateAPI()
a.status.RLock() if err != nil {
status := a.status //nolint retBadReq(err, c)
a.status.RUnlock() return
c.JSON(http.StatusOK, status) //nolint
}
// SC Vars
// SetRollupVariables set Status.Rollup variables
func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
a.status.Lock()
var rollupVAPI historydb.RollupVariablesAPI
rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
for i, bucket := range rollupVariables.Buckets {
var apiBucket historydb.BucketParamsAPI
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
rollupVAPI.Buckets[i] = apiBucket
} }
c.JSON(http.StatusOK, stateAPI)
rollupVAPI.SafeMode = rollupVariables.SafeMode
a.status.Rollup = rollupVAPI
a.status.Unlock()
} }
// SetWDelayerVariables set Status.WithdrawalDelayer variables // StateAPIUpdater is an utility object to facilitate updating the StateAPI
func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) { type StateAPIUpdater struct {
a.status.Lock() hdb *historydb.HistoryDB
a.status.WithdrawalDelayer = wDelayerVariables state historydb.StateAPI
a.status.Unlock() config historydb.NodeConfig
vars common.SCVariablesPtr
consts historydb.Constants
rw sync.RWMutex
} }
// SetAuctionVariables set Status.Auction variables // NewStateAPIUpdater creates a new StateAPIUpdater
func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) { func NewStateAPIUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
a.status.Lock() consts *historydb.Constants) *StateAPIUpdater {
var auctionAPI historydb.AuctionVariablesAPI u := StateAPIUpdater{
hdb: hdb,
auctionAPI.EthBlockNum = auctionVariables.EthBlockNum config: *config,
auctionAPI.DonationAddress = auctionVariables.DonationAddress consts: *consts,
auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
auctionAPI.Outbidding = auctionVariables.Outbidding
auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
} }
u.SetSCVars(vars.AsPtr())
for i, ratio := range auctionVariables.AllocationRatio { return &u
auctionAPI.AllocationRatio[i] = ratio
}
a.status.Auction = auctionAPI
a.status.Unlock()
} }
// Network // Store the State in the HistoryDB
func (u *StateAPIUpdater) Store() error {
u.rw.RLock()
defer u.rw.RUnlock()
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state))
}
// SetSCVars sets the smart contract vars (ony updates those that are not nil)
func (u *StateAPIUpdater) SetSCVars(vars *common.SCVariablesPtr) {
u.rw.Lock()
defer u.rw.Unlock()
if vars.Rollup != nil {
u.vars.Rollup = vars.Rollup
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
u.state.Rollup = *rollupVars
}
if vars.Auction != nil {
u.vars.Auction = vars.Auction
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
u.state.Auction = *auctionVars
}
if vars.WDelayer != nil {
u.vars.WDelayer = vars.WDelayer
u.state.WithdrawalDelayer = *u.vars.WDelayer
}
}
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *StateAPIUpdater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
}
// UpdateMetrics update Status.Metrics information
func (u *StateAPIUpdater) UpdateMetrics() error {
u.rw.RLock()
lastBatch := u.state.Network.LastBatch
u.rw.RUnlock()
if lastBatch == nil {
return nil
}
lastBatchNum := lastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.Metrics = *metrics
u.rw.Unlock()
return nil
}
// UpdateNetworkInfoBlock update Status.Network block related information // UpdateNetworkInfoBlock update Status.Network block related information
func (a *API) UpdateNetworkInfoBlock( func (u *StateAPIUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
lastEthBlock, lastSyncBlock common.Block, u.rw.Lock()
) { u.state.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastSyncBlock = lastSyncBlock.Num u.state.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num u.rw.Unlock()
} }
// UpdateNetworkInfo update Status.Network information // UpdateNetworkInfo update Status.Network information
func (a *API) UpdateNetworkInfo( func (u *StateAPIUpdater) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block, lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64, lastBatchNum common.BatchNum, currentSlot int64,
) error { ) error {
lastBatch, err := a.h.GetBatchAPI(lastBatchNum) // Get last batch in API format
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil lastBatch = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots) u.rw.RLock()
nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot) auctionVars := u.vars.Auction
u.rw.RUnlock()
// Get next forgers
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil nextForgers = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
a.status.Lock()
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastBatch = lastBatch
a.status.Network.CurrentSlot = currentSlot
a.status.Network.NextForgers = nextForgers
// Update buckets withdrawals bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
bucketsUpdate, err := a.h.GetBucketUpdatesAPI() if err == sql.ErrNoRows {
if tracerr.Unwrap(err) == sql.ErrNoRows { bucketUpdates = nil
bucketsUpdate = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for i, bucketParams := range a.status.Rollup.Buckets { u.rw.Lock()
for _, bucketUpdate := range bucketsUpdate { // Update NodeInfo struct
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i { if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals bucketParams.Withdrawals = bucketUpdate.Withdrawals
a.status.Rollup.Buckets[i] = bucketParams u.state.Rollup.Buckets[i] = bucketParams
break break
} }
} }
} }
a.status.Unlock() u.state.Network.LastSyncBlock = lastSyncBlock.Num
return nil u.state.Network.LastEthBlock = lastEthBlock.Num
} u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int u.state.Network.NextForgers = nextForgers
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) { u.rw.Unlock()
var slots [6]*big.Int
for i, slot := range defaultSlotSetBid {
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
if !ok {
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
}
slots[i] = bigInt
}
return slots, nil
}
// getNextForgers returns next forgers
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := a.h.GetBestBidsAPI(&currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForger{}
// Get min bid info
var minBidInfo []historydb.MinBidInfo
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
if err != nil {
return nil, tracerr.Wrap(err)
}
minBidInfo = []historydb.MinBidInfo{{
DefaultSlotSetBid: bigIntSlots,
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
nextForger := NextForger{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = historydb.CoordinatorAPI{
Forger: a.status.Auction.BootCoordinator,
URL: a.status.Auction.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// Metrics
// UpdateMetrics update Status.Metrics information
func (a *API) UpdateMetrics() error {
a.status.RLock()
if a.status.Network.LastBatch == nil {
a.status.RUnlock()
return nil
}
batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock()
metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Metrics = *metrics
a.status.Unlock()
return nil
}
// Recommended fee
// UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil {
return tracerr.Wrap(err)
}
var minFeeUSD float64
if a.l2 != nil {
minFeeUSD = a.l2.MinFeeUSD()
}
a.status.Lock()
a.status.RecommendedFee.ExistingAccount =
math.Max(feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.Unlock()
return nil return nil
} }

View File

@@ -13,7 +13,7 @@ import (
type testStatus struct { type testStatus struct {
Network testNetwork `json:"network"` Network testNetwork `json:"network"`
Metrics historydb.Metrics `json:"metrics"` Metrics historydb.MetricsAPI `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"` Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"` Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"` WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@@ -21,18 +21,19 @@ type testStatus struct {
} }
type testNetwork struct { type testNetwork struct {
LastEthBlock int64 `json:"lastEthereumBlock"` LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"` LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"` LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"` CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"` NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
} }
func TestSetRollupVariables(t *testing.T) { func TestSetRollupVariables(t *testing.T) {
rollupVars := &common.RollupVariables{} stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars})
assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true) require.NoError(t, stateAPIUpdater.Store())
api.SetRollupVariables(tc.rollupVars) ni, err := api.h.GetNodeInfoAPI()
assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true) require.NoError(t, err)
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
} }
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) { func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
@@ -51,17 +52,19 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
} }
func TestSetWDelayerVariables(t *testing.T) { func TestSetWDelayerVariables(t *testing.T) {
wdelayerVars := &common.WDelayerVariables{} stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars})
assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer) require.NoError(t, stateAPIUpdater.Store())
api.SetWDelayerVariables(tc.wdelayerVars) ni, err := api.h.GetNodeInfoAPI()
assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer) require.NoError(t, err)
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
} }
func TestSetAuctionVariables(t *testing.T) { func TestSetAuctionVariables(t *testing.T) {
auctionVars := &common.AuctionVariables{} stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars})
assertEqualAuctionVariables(t, *auctionVars, api.status.Auction) require.NoError(t, stateAPIUpdater.Store())
api.SetAuctionVariables(tc.auctionVars) ni, err := api.h.GetNodeInfoAPI()
assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction) require.NoError(t, err)
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
} }
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) { func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
@@ -85,11 +88,6 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
} }
func TestUpdateNetworkInfo(t *testing.T) { func TestUpdateNetworkInfo(t *testing.T) {
status := &Network{}
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
@@ -118,14 +116,18 @@ func TestUpdateNetworkInfo(t *testing.T) {
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates) err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
require.NoError(t, err) require.NoError(t, err)
err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) // stateAPIUpdater := NewStateAPIUpdater(hdb)
assert.NoError(t, err) err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock) require.NoError(t, err)
assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum) require.NoError(t, stateAPIUpdater.Store())
assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot) ni, err := api.h.GetNodeInfoAPI()
assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers)) require.NoError(t, err)
assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123))) assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock)
assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43))) assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum)
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
} }
func TestUpdateMetrics(t *testing.T) { func TestUpdateMetrics(t *testing.T) {
@@ -133,51 +135,60 @@ func TestUpdateMetrics(t *testing.T) {
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12) lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1) currentSlotNum := int64(1)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err) require.NoError(t, err)
err = api.UpdateMetrics() err = stateAPIUpdater.UpdateMetrics()
assert.NoError(t, err) require.NoError(t, err)
assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0)) require.NoError(t, stateAPIUpdater.Store())
assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0)) ni, err := api.h.GetNodeInfoAPI()
assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0)) require.NoError(t, err)
assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0)) assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0)) assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0)) assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
} }
func TestUpdateRecommendedFee(t *testing.T) { func TestUpdateRecommendedFee(t *testing.T) {
err := api.UpdateRecommendedFee() err := stateAPIUpdater.UpdateRecommendedFee()
assert.NoError(t, err) require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
var minFeeUSD float64 var minFeeUSD float64
if api.l2 != nil { if api.l2 != nil {
minFeeUSD = api.l2.MinFeeUSD() minFeeUSD = api.l2.MinFeeUSD()
} }
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, minFeeUSD) ni, err := api.h.GetNodeInfoAPI()
assert.Equal(t, api.status.RecommendedFee.CreatesAccount, require.NoError(t, err)
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage) assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister, // assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage) // ni.StateAPI.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountAndRegister,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
} }
func TestGetState(t *testing.T) { func TestGetState(t *testing.T) {
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12) lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1) currentSlotNum := int64(1)
api.SetRollupVariables(tc.rollupVars) stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{
api.SetWDelayerVariables(tc.wdelayerVars) Rollup: &tc.rollupVars,
api.SetAuctionVariables(tc.auctionVars) Auction: &tc.auctionVars,
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) WDelayer: &tc.wdelayerVars,
assert.NoError(t, err) })
err = api.UpdateMetrics() err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err) require.NoError(t, err)
err = api.UpdateRecommendedFee() err = stateAPIUpdater.UpdateMetrics()
assert.NoError(t, err) require.NoError(t, err)
err = stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
endpoint := apiURL + "state" endpoint := apiURL + "state"
var status testStatus var status testStatus
assert.NoError(t, doGoodReq("GET", endpoint, nil, &status)) require.NoError(t, doGoodReq("GET", endpoint, nil, &status))
// SC vars // SC vars
// UpdateNetworkInfo will overwrite buckets withdrawal values // UpdateNetworkInfo will overwrite buckets withdrawal values
@@ -204,13 +215,13 @@ func TestGetState(t *testing.T) {
// Recommended fee // Recommended fee
// TODO: perform real asserts (not just greater than 0) // TODO: perform real asserts (not just greater than 0)
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0)) assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
assert.Equal(t, status.RecommendedFee.CreatesAccount, // assert.Equal(t, status.RecommendedFee.CreatesAccount,
status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage) // status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister, // assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage) // status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
} }
func assertNextForgers(t *testing.T, expected, actual []NextForger) { func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
assert.Equal(t, len(expected), len(actual)) assert.Equal(t, len(expected), len(actual))
for i := range expected { for i := range expected {
// ignore timestamps and other metadata // ignore timestamps and other metadata

View File

@@ -1329,13 +1329,6 @@ components:
type: string type: string
description: Moment in which the transaction was added to the pool. description: Moment in which the transaction was added to the pool.
format: date-time format: date-time
batchNum:
type: integer
description: Identifier of a batch. Every new forged batch increases by one the batchNum, starting at 0.
minimum: 0
maximum: 4294967295
nullable: true
example: null
requestFromAccountIndex: requestFromAccountIndex:
type: string type: string
description: >- description: >-
@@ -1390,7 +1383,6 @@ components:
$ref: '#/components/schemas/Token' $ref: '#/components/schemas/Token'
example: example:
amount: '100000000000000' amount: '100000000000000'
batchNum:
fee: 0 fee: 0
fromAccountIndex: hez:SCC:256 fromAccountIndex: hez:SCC:256
fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn
@@ -1438,7 +1430,6 @@ components:
- info - info
- signature - signature
- timestamp - timestamp
- batchNum
- requestFromAccountIndex - requestFromAccountIndex
- requestToAccountIndex - requestToAccountIndex
- requestToHezEthereumAddress - requestToHezEthereumAddress
@@ -2578,6 +2569,21 @@ components:
description: List of next coordinators to forge. description: List of next coordinators to forge.
items: items:
$ref: '#/components/schemas/NextForger' $ref: '#/components/schemas/NextForger'
NodeConfig:
type: object
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
properties:
forgeDelay:
type: number
description: |
Delay in seconds after which a batch is forged if the slot is
already committed. If set to 0s, the coordinator will continuously
forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value.
example: 193.4
additionalProperties: false
required:
- forgeDelay
State: State:
type: object type: object
description: Gobal variables of the network description: Gobal variables of the network
@@ -2594,6 +2600,8 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer' $ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee: recommendedFee:
$ref: '#/components/schemas/RecommendedFee' $ref: '#/components/schemas/RecommendedFee'
nodeConfig:
$ref: '#/components/schemas/NodeConfig'
additionalProperties: false additionalProperties: false
required: required:
- network - network
@@ -2602,6 +2610,7 @@ components:
- auction - auction
- withdrawalDelayer - withdrawalDelayer
- recommendedFee - recommendedFee
- nodeConfig
StateNetwork: StateNetwork:
type: object type: object
description: Gobal statistics of the network description: Gobal statistics of the network
@@ -2812,6 +2821,10 @@ components:
type: number type: number
description: Average fee percentage paid for L2 transactions in the last 24 hours. description: Average fee percentage paid for L2 transactions in the last 24 hours.
example: 1.54 example: 1.54
estimatedTimeToForgeL1:
type: number
description: Estimated time needed to forge a L1 transaction, from the time it's added on the smart contract, until it's actualy forged. In seconds.
example: 193.4
additionalProperties: false additionalProperties: false
required: required:
- transactionsPerBatch - transactionsPerBatch
@@ -2820,6 +2833,7 @@ components:
- totalAccounts - totalAccounts
- totalBJJs - totalBJJs
- avgTransactionFee - avgTransactionFee
- estimatedTimeToForgeL1
PendingItems: PendingItems:
type: integer type: integer
description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent. description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent.

View File

@@ -14,6 +14,7 @@ Type = "bitfinexV2"
[Debug] [Debug]
APIAddress = "localhost:12345" APIAddress = "localhost:12345"
MeddlerLogs = true MeddlerLogs = true
GinDebugMode = true
[StateDB] [StateDB]
Path = "/tmp/iden3-test/hermez/statedb" Path = "/tmp/iden3-test/hermez/statedb"
@@ -50,6 +51,7 @@ ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordina
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator # ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
MinimumForgeAddressBalance = 0
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6 L1BatchTimeoutPerc = 0.6
StartSlotBlocksDelay = 2 StartSlotBlocksDelay = 2
@@ -104,6 +106,12 @@ GasPriceIncPerc = 10
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"
Password = "yourpasswordhere" Password = "yourpasswordhere"
[Coordinator.EthClient.ForgeBatchGasCost]
Fixed = 500000
L1UserTx = 8000
L1CoordTx = 9000
L2Tx = 1
[Coordinator.API] [Coordinator.API]
Coordinator = true Coordinator = true

View File

@@ -107,17 +107,7 @@ func cmdWipeSQL(c *cli.Context) error {
return nil return nil
} }
func cmdRun(c *cli.Context) error { func waitSigInt() {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
stopCh := make(chan interface{}) stopCh := make(chan interface{})
// catch ^C to send the stop signal // catch ^C to send the stop signal
@@ -138,11 +128,40 @@ func cmdRun(c *cli.Context) error {
} }
}() }()
<-stopCh <-stopCh
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
waitSigInt()
node.Stop() node.Stop()
return nil return nil
} }
func cmdServeAPI(c *cli.Context) error {
cfg, err := parseCliAPIServer(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
}
srv.Start()
waitSigInt()
srv.Stop()
return nil
}
func cmdDiscard(c *cli.Context) error { func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c) _cfg, err := parseCli(c)
if err != nil { if err != nil {
@@ -225,20 +244,59 @@ func getConfig(c *cli.Context) (*Config, error) {
var cfg Config var cfg Config
mode := c.String(flagMode) mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg) nodeCfgPath := c.String(flagCfg)
if nodeCfgPath == "" {
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
}
var err error var err error
switch mode { switch mode {
case modeSync: case modeSync:
cfg.mode = node.ModeSynchronizer cfg.mode = node.ModeSynchronizer
cfg.node, err = config.LoadNode(nodeCfgPath) cfg.node, err = config.LoadNode(nodeCfgPath, false)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
case modeCoord: case modeCoord:
cfg.mode = node.ModeCoordinator cfg.mode = node.ModeCoordinator
cfg.node, err = config.LoadCoordinator(nodeCfgPath) cfg.node, err = config.LoadNode(nodeCfgPath, true)
if err != nil {
return nil, tracerr.Wrap(err)
}
default:
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
}
return &cfg, nil
}
// ConfigAPIServer is the configuration of the api server execution
type ConfigAPIServer struct {
mode node.Mode
server *config.APIServer
}
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
cfg, err := getConfigAPIServer(c)
if err != nil {
if err := cli.ShowAppHelp(c); err != nil {
panic(err)
}
return nil, tracerr.Wrap(err)
}
return cfg, nil
}
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
var cfg ConfigAPIServer
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -304,6 +362,12 @@ func main() {
Usage: "Run the hermez-node in the indicated mode", Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun, Action: cmdRun,
}, },
{
Name: "serveapi",
Aliases: []string{},
Usage: "Serve the API only",
Action: cmdServeAPI,
},
{ {
Name: "discard", Name: "discard",
Aliases: []string{}, Aliases: []string{},

View File

@@ -1,21 +1,25 @@
package common package common
import ( import (
"encoding/binary"
"strconv"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
ethMath "github.com/ethereum/go-ethereum/common/math"
ethCrypto "github.com/ethereum/go-ethereum/crypto" ethCrypto "github.com/ethereum/go-ethereum/crypto"
ethSigner "github.com/ethereum/go-ethereum/signer/core"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez // AccountCreationAuthMsg is the message that is signed to authorize a Hermez
// account creation // account creation
const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollup account creation" const AccountCreationAuthMsg = "Account creation"
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem // EIP712Version is the used version of the EIP-712
const EthMsgPrefix = "\x19Ethereum Signed Message:\n" const EIP712Version = "1"
// EIP712Provider defines the Provider for the EIP-712
const EIP712Provider = "Hermez Network"
var ( var (
// EmptyEthSignature is an ethereum signature of all zeroes // EmptyEthSignature is an ethereum signature of all zeroes
@@ -31,27 +35,64 @@ type AccountCreationAuth struct {
Timestamp time.Time `meddler:"timestamp,utctime"` Timestamp time.Time `meddler:"timestamp,utctime"`
} }
// toHash returns a byte array to be hashed from the AccountCreationAuth, which
// follows the EIP-712 encoding
func (a *AccountCreationAuth) toHash(chainID uint16, func (a *AccountCreationAuth) toHash(chainID uint16,
hermezContractAddr ethCommon.Address) []byte { hermezContractAddr ethCommon.Address) ([]byte, error) {
var chainIDBytes [2]byte chainIDFormatted := ethMath.NewHexOrDecimal256(int64(chainID))
binary.BigEndian.PutUint16(chainIDBytes[:], chainID)
// [EthPrefix | AccountCreationAuthMsg | compressedBJJ | chainID | hermezContractAddr]
var b []byte
b = append(b, []byte(AccountCreationAuthMsg)...)
b = append(b, SwapEndianness(a.BJJ[:])...) // for js implementation compatibility
b = append(b, chainIDBytes[:]...)
b = append(b, hermezContractAddr[:]...)
ethPrefix := EthMsgPrefix + strconv.Itoa(len(b)) signerData := ethSigner.TypedData{
return append([]byte(ethPrefix), b...) Types: ethSigner.Types{
"EIP712Domain": []ethSigner.Type{
{Name: "name", Type: "string"},
{Name: "version", Type: "string"},
{Name: "chainId", Type: "uint256"},
{Name: "verifyingContract", Type: "address"},
},
"Authorise": []ethSigner.Type{
{Name: "Provider", Type: "string"},
{Name: "Authorisation", Type: "string"},
{Name: "BJJKey", Type: "bytes32"},
},
},
PrimaryType: "Authorise",
Domain: ethSigner.TypedDataDomain{
Name: EIP712Provider,
Version: EIP712Version,
ChainId: chainIDFormatted,
VerifyingContract: hermezContractAddr.Hex(),
},
Message: ethSigner.TypedDataMessage{
"Provider": EIP712Provider,
"Authorisation": AccountCreationAuthMsg,
"BJJKey": SwapEndianness(a.BJJ[:]),
},
}
domainSeparator, err := signerData.HashStruct("EIP712Domain", signerData.Domain.Map())
if err != nil {
return nil, tracerr.Wrap(err)
}
typedDataHash, err := signerData.HashStruct(signerData.PrimaryType, signerData.Message)
if err != nil {
return nil, tracerr.Wrap(err)
}
rawData := []byte{0x19, 0x01} // "\x19\x01"
rawData = append(rawData, domainSeparator...)
rawData = append(rawData, typedDataHash...)
return rawData, nil
} }
// HashToSign returns the hash to be signed by the Etherum address to authorize // HashToSign returns the hash to be signed by the Etherum address to authorize
// the account creation // the account creation, which follows the EIP-712 encoding
func (a *AccountCreationAuth) HashToSign(chainID uint16, func (a *AccountCreationAuth) HashToSign(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) { hermezContractAddr ethCommon.Address) ([]byte, error) {
b := a.toHash(chainID, hermezContractAddr) b, err := a.toHash(chainID, hermezContractAddr)
return ethCrypto.Keccak256Hash(b).Bytes(), nil if err != nil {
return nil, tracerr.Wrap(err)
}
return ethCrypto.Keccak256(b), nil
} }
// Sign signs the account creation authorization message using the provided // Sign signs the account creation authorization message using the provided
@@ -59,16 +100,17 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
// should do an ethereum signature using the account corresponding to // should do an ethereum signature using the account corresponding to
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in // `a.EthAddr`. The `signHash` function is used to make signig flexible: in
// tests we sign directly using the private key, outside tests we sign using // tests we sign directly using the private key, outside tests we sign using
// the keystore (which never exposes the private key). // the keystore (which never exposes the private key). Sign follows the EIP-712
// encoding.
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error), func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
chainID uint16, hermezContractAddr ethCommon.Address) error { chainID uint16, hermezContractAddr ethCommon.Address) error {
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
if err != nil { if err != nil {
return err return tracerr.Wrap(err)
} }
sig, err := signHash(hash) sig, err := signHash(hash)
if err != nil { if err != nil {
return err return tracerr.Wrap(err)
} }
sig[64] += 27 sig[64] += 27
a.Signature = sig a.Signature = sig
@@ -77,7 +119,8 @@ func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
} }
// VerifySignature ensures that the Signature is done with the EthAddr, for the // VerifySignature ensures that the Signature is done with the EthAddr, for the
// chainID and hermezContractAddress passed by parameter // chainID and hermezContractAddress passed by parameter. VerifySignature
// follows the EIP-712 encoding.
func (a *AccountCreationAuth) VerifySignature(chainID uint16, func (a *AccountCreationAuth) VerifySignature(chainID uint16,
hermezContractAddr ethCommon.Address) bool { hermezContractAddr ethCommon.Address) bool {
// Calculate hash to be signed // Calculate hash to be signed

View File

@@ -39,7 +39,7 @@ func TestAccountCreationAuthSignVerify(t *testing.T) {
// Hash and sign manually and compare the generated signature // Hash and sign manually and compare the generated signature
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e", assert.Equal(t, "9414667457e658dd31949b82996b75c65a055512244c3bbfd22ff56add02ba65",
hex.EncodeToString(hash)) hex.EncodeToString(hash))
sig, err := ethCrypto.Sign(hash, ethSk) sig, err := ethCrypto.Sign(hash, ethSk)
require.NoError(t, err) require.NoError(t, err)
@@ -75,9 +75,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7",
chainID: uint16(4), chainID: uint16(4),
hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf", hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700047e5f4552091a69125d5dfcb7b8c2659029395bdf", toHashExpected: "190189658bba487e11c7da602676ee32bc90b77d3f32a305b147e4f3c3b35f19672e5d84ccc38d0ab245c469b719549d837113465c2abf9972c49403ca6fd10ed3dc",
hashExpected: "39afea52d843a4de905b6b5ebb0ee8c678141f711d96d9b429c4aec10ef9911f", hashExpected: "c56eba41e511df100c804c5c09288f35887efea4f033be956481af335df3bea2",
sigExpected: "73d10d6ecf06ee8a5f60ac90f06b78bef9c650f414ba3ac73e176dc32e896159147457e9c86f0b4bd60fdaf2c0b2aec890a7df993d69a4805e242a6b845ebf231c", sigExpected: "dbedcc5ce02db8f48afbdb2feba9a3a31848eaa8fca5f312ce37b01db45d2199208335330d4445bd2f51d1db68dbc0d0bf3585c4a07504b4efbe46a69eaae5a21b",
} }
tv1 := testVector{ tv1 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000002", ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
@@ -85,9 +85,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d", pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d",
chainID: uint16(0), chainID: uint16(0),
hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf", hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00002b5ad5c4795c026514f8317c7a215e218dccd6cf", toHashExpected: "1901dafbc253dedf90d6421dc6e25d5d9efc6985133cb2a8d363d0a081a0e3eddddc65f603a88de36aaeabd3b4cf586538c7f3fd50c94780530a3707c8c14ad9fd11",
hashExpected: "89a3895993a4736232212e59566294feb3da227af44375daf3307dcad5451d5d", hashExpected: "deb9afa479282cf27b442ce8ba86b19448aa87eacef691521a33db5d0feb9959",
sigExpected: "bb4156156c705494ad5f99030342c64657e51e2994750f92125717c40bf56ad632044aa6bd00979feea92c417b552401e65fe5f531f15010d9d1c278da8be1df1b", sigExpected: "6a0da90ba2d2b1be679a28ebe54ee03082d44b836087391cd7d2607c1e4dafe04476e6e88dccb8707c68312512f16c947524b35c80f26c642d23953e9bb84c701c",
} }
tv2 := testVector{ tv2 := testVector{
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122", ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
@@ -95,9 +95,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52", pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52",
chainID: uint16(31337), // =0x7a69 chainID: uint16(31337), // =0x7a69
hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c", hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c",
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b527a69f4e77e5da47ac3125140c470c71cbca77b5c638c", toHashExpected: "190167617949b934d7e01add4009cd3d47415a26727b7d6288e5dce33fb3721d5a1a9ce511b19b694c9aaf8183f4987ed752f24884c54c003d11daa2e98c7547a79e",
hashExpected: "4f6ead01278ba4597d4720e37482f585a713497cea994a95209f4c57a963b4a7", hashExpected: "157b570c597e615b8356ce008ac39f43bc9b6d50080bc07d968031b9378acbbb",
sigExpected: "43b5818802a137a72a190c1d8d767ca507f7a4804b1b69b5e055abf31f4f2b476c80bb1ba63260d95610f6f831420d32130e7f22fec5d76e16644ddfcedd0d441c", sigExpected: "a0766181102428b5672e523dc4b905c10ddf025c10dbd0b3534ef864632a14652737610041c670b302fc7dca28edd5d6eac42b72d69ce58da8ce21287b244e381b",
} }
tvs = append(tvs, tv0) tvs = append(tvs, tv0)
tvs = append(tvs, tv1) tvs = append(tvs, tv1)
@@ -122,10 +122,10 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
BJJ: pkComp, BJJ: pkComp,
} }
toHash := a.toHash(chainID, hermezContractAddr) toHash, err := a.toHash(chainID, hermezContractAddr)
require.NoError(t, err)
assert.Equal(t, tv.toHashExpected, assert.Equal(t, tv.toHashExpected,
hex.EncodeToString(toHash)) hex.EncodeToString(toHash))
assert.Equal(t, 120+len(EthMsgPrefix)+len([]byte("120")), len(toHash))
msg, err := a.HashToSign(chainID, hermezContractAddr) msg, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)

33
common/eth.go Normal file
View File

@@ -0,0 +1,33 @@
package common
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup RollupVariables `validate:"required"`
Auction AuctionVariables `validate:"required"`
WDelayer WDelayerVariables `validate:"required"`
}
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
// original SCVariables
func (v *SCVariables) AsPtr() *SCVariablesPtr {
return &SCVariablesPtr{
Rollup: &v.Rollup,
Auction: &v.Auction,
WDelayer: &v.WDelayer,
}
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *RollupVariables `validate:"required"`
Auction *AuctionVariables `validate:"required"`
WDelayer *WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup RollupConstants
Auction AuctionConstants
WDelayer WDelayerConstants
}

View File

@@ -368,19 +368,12 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return tx, nil return tx, nil
} }
func signHash(data []byte) []byte {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
return ethCrypto.Keccak256([]byte(msg))
}
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte // L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) { func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != RollupConstL1CoordinatorTotalBytes { if len(b) != RollupConstL1CoordinatorTotalBytes {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b))) return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
} }
bytesMessage := []byte("I authorize this babyjubjub key for hermez rollup account creation")
tx := &L1Tx{ tx := &L1Tx{
UserOrigin: false, UserOrigin: false,
} }
@@ -401,18 +394,20 @@ func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommo
// L1CoordinatorTX ETH // L1CoordinatorTX ETH
// Ethereum adds 27 to v // Ethereum adds 27 to v
v = b[0] - byte(27) //nolint:gomnd v = b[0] - byte(27) //nolint:gomnd
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
var data []byte
data = append(data, bytesMessage...)
data = append(data, pkCompB...)
data = append(data, chainIDBytes[:]...)
data = append(data, hermezAddress.Bytes()...)
var signature []byte var signature []byte
signature = append(signature, r[:]...) signature = append(signature, r[:]...)
signature = append(signature, s[:]...) signature = append(signature, s[:]...)
signature = append(signature, v) signature = append(signature, v)
hash := signHash(data)
pubKeyBytes, err := ethCrypto.Ecrecover(hash, signature) accCreationAuth := AccountCreationAuth{
BJJ: tx.FromBJJ,
}
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
if err != nil {
return nil, tracerr.Wrap(err)
}
pubKeyBytes, err := ethCrypto.Ecrecover(h, signature)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -227,7 +227,6 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
func TestL1CoordinatorTxByteParsers(t *testing.T) { func TestL1CoordinatorTxByteParsers(t *testing.T) {
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe") hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
chainID := big.NewInt(1337) chainID := big.NewInt(1337)
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19") privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err) require.NoError(t, err)
@@ -245,18 +244,16 @@ func TestL1CoordinatorTxByteParsers(t *testing.T) {
pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c") pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
err = pkComp.UnmarshalText(pkCompL) err = pkComp.UnmarshalText(pkCompL)
require.NoError(t, err) require.NoError(t, err)
bytesMessage1 := []byte("\x19Ethereum Signed Message:\n120")
bytesMessage2 := []byte("I authorize this babyjubjub key for hermez rollup account creation")
babyjubB := SwapEndianness(pkComp[:]) accCreationAuth := AccountCreationAuth{
var data []byte EthAddr: fromEthAddr,
data = append(data, bytesMessage1...) BJJ: pkComp,
data = append(data, bytesMessage2...) }
data = append(data, babyjubB[:]...)
data = append(data, chainIDBytes...) h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
data = append(data, hermezAddress.Bytes()...) require.NoError(t, err)
hash := crypto.Keccak256Hash(data)
signature, err := crypto.Sign(hash.Bytes(), privateKey) signature, err := crypto.Sign(h, privateKey)
require.NoError(t, err) require.NoError(t, err)
// Ethereum adds 27 to v // Ethereum adds 27 to v
v := int(signature[64]) v := int(signature[64])

View File

@@ -73,7 +73,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original Type doesn't match the correct one, return error // If original Type doesn't match the correct one, return error
if txTypeOld != "" && txTypeOld != tx.Type { if txTypeOld != "" && txTypeOld != tx.Type {
return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s", return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s",
tx.Type, txTypeOld)) txTypeOld, tx.Type))
} }
txIDOld := tx.TxID txIDOld := tx.TxID
@@ -83,7 +83,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original TxID doesn't match the correct one, return error // If original TxID doesn't match the correct one, return error
if txIDOld != (TxID{}) && txIDOld != tx.TxID { if txIDOld != (TxID{}) && txIDOld != tx.TxID {
return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s", return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s",
tx.TxID.String(), txIDOld.String())) txIDOld.String(), tx.TxID.String()))
} }
return tx, nil return tx, nil

View File

@@ -35,10 +35,30 @@ type ServerProof struct {
URL string `validate:"required"` URL string `validate:"required"`
} }
// ForgeBatchGasCost is the costs associated to a ForgeBatch transaction, split
// into different parts to be used in a formula.
type ForgeBatchGasCost struct {
Fixed uint64 `validate:"required"`
L1UserTx uint64 `validate:"required"`
L1CoordTx uint64 `validate:"required"`
L2Tx uint64 `validate:"required"`
}
// CoordinatorAPI specifies the configuration parameters of the API in mode
// coordinator
type CoordinatorAPI struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
}
// Coordinator is the coordinator specific configuration. // Coordinator is the coordinator specific configuration.
type Coordinator struct { type Coordinator struct {
// ForgerAddress is the address under which this coordinator is forging // ForgerAddress is the address under which this coordinator is forging
ForgerAddress ethCommon.Address `validate:"required"` ForgerAddress ethCommon.Address `validate:"required"`
// MinimumForgeAddressBalance is the minimum balance the forger address
// needs to start the coordinator in wei. Of set to 0, the coordinator
// will not check the balance before starting.
MinimumForgeAddressBalance *big.Int
// FeeAccount is the Hermez account that the coordinator uses to receive fees // FeeAccount is the Hermez account that the coordinator uses to receive fees
FeeAccount struct { FeeAccount struct {
// Address is the ethereum address of the account to receive fees // Address is the ethereum address of the account to receive fees
@@ -180,11 +200,11 @@ type Coordinator struct {
// Password used to decrypt the keys in the keystore // Password used to decrypt the keys in the keystore
Password string `validate:"required"` Password string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
} `validate:"required"` } `validate:"required"`
API struct { API CoordinatorAPI `validate:"required"`
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
Debug struct { Debug struct {
// BatchPath if set, specifies the path where batchInfo is stored // BatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
@@ -199,6 +219,45 @@ type Coordinator struct {
} }
} }
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
// diferentiated SQL connections for read/write. If the read configuration is
// not provided, the write one it's going to be used for both reads and writes
type PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
HostWrite string `validate:"required"`
// User of the PostgreSQL write server
UserWrite string `validate:"required"`
// Password of the PostgreSQL write server
PasswordWrite string `validate:"required"`
// Name of the PostgreSQL write server database
NameWrite string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
}
// NodeDebug specifies debug configuration parameters
type NodeDebug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
// Node is the hermez node configuration. // Node is the hermez node configuration.
type Node struct { type Node struct {
PriceUpdater struct { PriceUpdater struct {
@@ -215,32 +274,8 @@ type Node struct {
// Keep is the number of checkpoints to keep // Keep is the number of checkpoints to keep
Keep int `validate:"required"` Keep int `validate:"required"`
} `validate:"required"` } `validate:"required"`
// It's possible to use diferentiated SQL connections for read/write. PostgreSQL PostgreSQL `validate:"required"`
// If the read configuration is not provided, the write one it's going to be used Web3 struct {
// for both reads and writes
PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
HostWrite string `validate:"required"`
// User of the PostgreSQL write server
UserWrite string `validate:"required"`
// Password of the PostgreSQL write server
PasswordWrite string `validate:"required"`
// Name of the PostgreSQL write server database
NameWrite string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
} `validate:"required"`
Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server // URL is the URL of the web3 ethereum-node RPC server
URL string `validate:"required"` URL string `validate:"required"`
} `validate:"required"` } `validate:"required"`
@@ -270,6 +305,7 @@ type Node struct {
// TokenHEZ address // TokenHEZ address
TokenHEZName string `validate:"required"` TokenHEZName string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// API specifies the configuration parameters of the API
API struct { API struct {
// Address where the API will listen if set // Address where the API will listen if set
Address string Address string
@@ -287,17 +323,45 @@ type Node struct {
// can wait to stablish a SQL connection // can wait to stablish a SQL connection
SQLConnectionTimeout Duration SQLConnectionTimeout Duration
} `validate:"required"` } `validate:"required"`
Debug struct { Debug NodeDebug `validate:"required"`
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
}
Coordinator Coordinator `validate:"-"` Coordinator Coordinator `validate:"-"`
} }
// APIServer is the api server configuration parameters
type APIServer struct {
// NodeAPI specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string `validate:"required"`
// Explorer enables the Explorer API endpoints
Explorer bool
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Coordinator struct {
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
L2DB struct {
// MaxTxs is the maximum number of pending L2Txs that can be
// stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
} `validate:"required"`
}
Debug NodeDebug `validate:"required"`
}
// Load loads a generic config. // Load loads a generic config.
func Load(path string, cfg interface{}) error { func Load(path string, cfg interface{}) error {
bs, err := ioutil.ReadFile(path) //nolint:gosec bs, err := ioutil.ReadFile(path) //nolint:gosec
@@ -311,8 +375,8 @@ func Load(path string, cfg interface{}) error {
return nil return nil
} }
// LoadCoordinator loads the Coordinator configuration from path. // LoadNode loads the Node configuration from path.
func LoadCoordinator(path string) (*Node, error) { func LoadNode(path string, coordinator bool) (*Node, error) {
var cfg Node var cfg Node
if err := Load(path, &cfg); err != nil { if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
@@ -321,21 +385,28 @@ func LoadCoordinator(path string) (*Node, error) {
if err := validate.Struct(cfg); err != nil { if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
if err := validate.Struct(cfg.Coordinator); err != nil { if coordinator {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
} }
return &cfg, nil return &cfg, nil
} }
// LoadNode loads the Node configuration from path. // LoadAPIServer loads the APIServer configuration from path.
func LoadNode(path string) (*Node, error) { func LoadAPIServer(path string, coordinator bool) (*APIServer, error) {
var cfg Node var cfg APIServer
if err := Load(path, &cfg); err != nil { if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
} }
validate := validator.New() validate := validator.New()
if err := validate.Struct(cfg); err != nil { if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil return &cfg, nil
} }

View File

@@ -11,6 +11,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
@@ -115,7 +116,10 @@ type Config struct {
Purger PurgerCfg Purger PurgerCfg
// VerifierIdx is the index of the verifier contract registered in the // VerifierIdx is the index of the verifier contract registered in the
// smart contract // smart contract
VerifierIdx uint8 VerifierIdx uint8
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost config.ForgeBatchGasCost
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -140,8 +144,8 @@ type Coordinator struct {
pipelineNum int // Pipeline sequential number. The first pipeline is 1 pipelineNum int // Pipeline sequential number. The first pipeline is 1
pipelineFromBatch fromBatch // batch from which we started the pipeline pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client provers []prover.Client
consts synchronizer.SCConsts consts common.SCConsts
vars synchronizer.SCVariables vars common.SCVariables
stats synchronizer.Stats stats synchronizer.Stats
started bool started bool
@@ -181,8 +185,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client, serverProofs []prover.Client,
ethClient eth.ClientInterface, ethClient eth.ClientInterface,
scConsts *synchronizer.SCConsts, scConsts *common.SCConsts,
initSCVars *synchronizer.SCVariables, initSCVars *common.SCVariables,
) (*Coordinator, error) { ) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100% // nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
@@ -271,13 +275,13 @@ type MsgSyncBlock struct {
Batches []common.BatchData Batches []common.BatchData
// Vars contains each Smart Contract variables if they are updated, or // Vars contains each Smart Contract variables if they are updated, or
// nil if they haven't changed. // nil if they haven't changed.
Vars synchronizer.SCVariablesPtr Vars common.SCVariablesPtr
} }
// MsgSyncReorg indicates a reorg // MsgSyncReorg indicates a reorg
type MsgSyncReorg struct { type MsgSyncReorg struct {
Stats synchronizer.Stats Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr Vars common.SCVariablesPtr
} }
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
@@ -296,7 +300,7 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) { func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
if update.Rollup != nil { if update.Rollup != nil {
vars.Rollup = *update.Rollup vars.Rollup = *update.Rollup
} }
@@ -308,7 +312,7 @@ func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariable
} }
} }
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) { func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&c.vars, vars) updateSCVars(&c.vars, vars)
} }
@@ -383,11 +387,23 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
fromBatch.ForgerAddr = c.cfg.ForgerAddress fromBatch.ForgerAddr = c.cfg.ForgerAddress
fromBatch.StateRoot = big.NewInt(0) fromBatch.StateRoot = big.NewInt(0)
} }
// Before starting the pipeline make sure we reset any
// l2tx from the pool that was forged in a batch that
// didn't end up being mined. We are already doing
// this in handleStopPipeline, but we do it again as a
// failsafe in case the last synced batchnum is
// different than in the previous call to l2DB.Reorg,
// or in case the node was restarted when there was a
// started batch that included l2txs but was not mined.
if err := c.l2DB.Reorg(fromBatch.BatchNum); err != nil {
return tracerr.Wrap(err)
}
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineFromBatch = fromBatch c.pipelineFromBatch = fromBatch
// Start the pipeline
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil { if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -508,7 +524,7 @@ func (c *Coordinator) Start() {
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
waitCh := time.After(longWaitDuration) timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
@@ -520,24 +536,27 @@ func (c *Coordinator) Start() {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.handleMsg", "err", err) log.Errorw("Coordinator.handleMsg", "err", err)
waitCh = time.After(c.cfg.SyncRetryInterval) if !timer.Stop() {
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
waitCh = time.After(longWaitDuration) case <-timer.C:
case <-waitCh: timer.Reset(longWaitDuration)
if !c.stats.Synced() { if !c.stats.Synced() {
waitCh = time.After(longWaitDuration)
continue continue
} }
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil { if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
waitCh = time.After(longWaitDuration)
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.syncStats", "err", err) log.Errorw("Coordinator.syncStats", "err", err)
waitCh = time.After(c.cfg.SyncRetryInterval) if !timer.Stop() {
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
waitCh = time.After(longWaitDuration)
} }
} }
}() }()

View File

@@ -187,12 +187,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
&prover.MockClient{Delay: 400 * time.Millisecond}, &prover.MockClient{Delay: 400 * time.Millisecond},
} }
scConsts := &synchronizer.SCConsts{ scConsts := &common.SCConsts{
Rollup: *ethClientSetup.RollupConstants, Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants, Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants, WDelayer: *ethClientSetup.WDelayerConstants,
} }
initSCVars := &synchronizer.SCVariables{ initSCVars := &common.SCVariables{
Rollup: *ethClientSetup.RollupVariables, Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables, Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables, WDelayer: *ethClientSetup.WDelayerVariables,
@@ -517,7 +517,7 @@ func TestCoordinatorStress(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
for { for {
blockData, _, err := syn.Sync2(ctx, nil) blockData, _, err := syn.Sync(ctx, nil)
if ctx.Err() != nil { if ctx.Err() != nil {
wg.Done() wg.Done()
return return
@@ -528,7 +528,7 @@ func TestCoordinatorStress(t *testing.T) {
coord.SendMsg(ctx, MsgSyncBlock{ coord.SendMsg(ctx, MsgSyncBlock{
Stats: *stats, Stats: *stats,
Batches: blockData.Rollup.Batches, Batches: blockData.Rollup.Batches,
Vars: synchronizer.SCVariablesPtr{ Vars: common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars, Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars, Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars, WDelayer: blockData.WDelayer.Vars,

View File

@@ -22,7 +22,7 @@ import (
type statsVars struct { type statsVars struct {
Stats synchronizer.Stats Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr Vars common.SCVariablesPtr
} }
type state struct { type state struct {
@@ -36,7 +36,7 @@ type state struct {
type Pipeline struct { type Pipeline struct {
num int num int
cfg Config cfg Config
consts synchronizer.SCConsts consts common.SCConsts
// state // state
state state state state
@@ -57,7 +57,7 @@ type Pipeline struct {
purger *Purger purger *Purger
stats synchronizer.Stats stats synchronizer.Stats
vars synchronizer.SCVariables vars common.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
ctx context.Context ctx context.Context
@@ -90,7 +90,7 @@ func NewPipeline(ctx context.Context,
coord *Coordinator, coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *synchronizer.SCConsts, scConsts *common.SCConsts,
) (*Pipeline, error) { ) (*Pipeline, error) {
proversPool := NewProversPool(len(provers)) proversPool := NewProversPool(len(provers))
proversPoolSize := 0 proversPoolSize := 0
@@ -124,7 +124,7 @@ func NewPipeline(ctx context.Context,
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) { func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
select { select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
@@ -133,7 +133,7 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *common.SCVariables) error {
p.state = state{ p.state = state{
batchNum: batchNum, batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum, lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
@@ -194,7 +194,7 @@ func (p *Pipeline) reset(batchNum common.BatchNum,
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&p.vars, vars) updateSCVars(&p.vars, vars)
} }
@@ -255,7 +255,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// Start the forging pipeline // Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum, func (p *Pipeline) Start(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *common.SCVariables) error {
if p.started { if p.started {
log.Fatal("Pipeline already started") log.Fatal("Pipeline already started")
} }
@@ -271,7 +271,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Add(1) p.wg.Add(1)
go func() { go func() {
waitCh := time.After(zeroDuration) timer := time.NewTimer(zeroDuration)
for { for {
select { select {
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -281,23 +281,21 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case statsVars := <-p.statsVarsCh: case statsVars := <-p.statsVarsCh:
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-waitCh: case <-timer.C:
timer.Reset(p.cfg.ForgeRetryInterval)
// Once errAtBatchNum != 0, we stop forging // Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we // batches because there's been an error and we
// wait for the pipeline to be stopped. // wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 { if p.getErrAtBatchNum() != 0 {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue continue
} }
batchNum = p.state.batchNum + 1 batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum) batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced || } else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay || tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay { tracerr.Unwrap(err) == errForgeBeforeDelay {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue continue
} else if err != nil { } else if err != nil {
p.setErrAtBatchNum(batchNum) p.setErrAtBatchNum(batchNum)
@@ -306,7 +304,6 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
"Pipeline.handleForgBatch: %v", err), "Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum, FailedBatchNum: batchNum,
}) })
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue continue
} }
p.lastForgeTime = time.Now() p.lastForgeTime = time.Now()
@@ -316,7 +313,10 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case batchChSentServerProof <- batchInfo: case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done(): case <-p.ctx.Done():
} }
waitCh = time.After(zeroDuration) if !timer.Stop() {
<-timer.C
}
timer.Reset(zeroDuration)
} }
} }
}() }()

View File

@@ -148,7 +148,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
ctx := context.Background() ctx := context.Background()
for { for {
syncBlock, discards, err := sync.Sync2(ctx, nil) syncBlock, discards, err := sync.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -206,11 +206,7 @@ PoolTransfer(0) User2-User3: 300 (126)
require.NoError(t, err) require.NoError(t, err)
} }
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{ err = pipeline.reset(batchNum, syncStats, syncSCVars)
Rollup: *syncSCVars.Rollup,
Auction: *syncSCVars.Auction,
WDelayer: *syncSCVars.WDelayer,
})
require.NoError(t, err) require.NoError(t, err)
// Sanity check // Sanity check
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts() sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()

View File

@@ -31,10 +31,10 @@ type TxManager struct {
batchCh chan *BatchInfo batchCh chan *BatchInfo
chainID *big.Int chainID *big.Int
account accounts.Account account accounts.Account
consts synchronizer.SCConsts consts common.SCConsts
stats synchronizer.Stats stats synchronizer.Stats
vars synchronizer.SCVariables vars common.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum discardPipelineCh chan int // int refers to the pipelineNum
@@ -55,7 +55,7 @@ type TxManager struct {
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB, func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (*TxManager, error) { coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (*TxManager, error) {
chainID, err := ethClient.EthChainID() chainID, err := ethClient.EthChainID()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -102,7 +102,7 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) { func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
select { select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
@@ -118,12 +118,12 @@ func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
} }
} }
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) { func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&t.vars, vars) updateSCVars(&t.vars, vars)
} }
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) { func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.TransactOpts, error) {
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx) gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -143,15 +143,12 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
auth.Value = big.NewInt(0) // in wei auth.Value = big.NewInt(0) // in wei
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
// This requires a function that estimates the gas usage of the gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
// forgeBatch call based on the contents of the ForgeBatch args: uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
// - length of l2txs uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
// - length of l1Usertxs uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
// - length of l1CoordTxs with authorization signature auth.GasLimit = gasLimit
// - length of l1CoordTxs without authoriation signature
// - etc.
auth.GasLimit = 1000000
auth.GasPrice = gasPrice auth.GasPrice = gasPrice
auth.Nonce = nil auth.Nonce = nil
@@ -191,7 +188,7 @@ func addPerc(v *big.Int, p int64) *big.Int {
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error { func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
auth, err := t.NewAuth(ctx) auth, err := t.NewAuth(ctx, batchInfo)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -419,8 +416,6 @@ func (q *Queue) Push(batchInfo *BatchInfo) {
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
waitCh := time.After(longWaitDuration)
var statsVars statsVars var statsVars statsVars
select { select {
case statsVars = <-t.statsVarsCh: case statsVars = <-t.statsVarsCh:
@@ -431,6 +426,7 @@ func (t *TxManager) Run(ctx context.Context) {
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -474,13 +470,17 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} }
t.queue.Push(batchInfo) t.queue.Push(batchInfo)
waitCh = time.After(t.cfg.TxManagerCheckInterval) if !timer.Stop() {
case <-waitCh: <-timer.C
}
timer.Reset(t.cfg.TxManagerCheckInterval)
case <-timer.C:
queuePosition, batchInfo := t.queue.Next() queuePosition, batchInfo := t.queue.Next()
if batchInfo == nil { if batchInfo == nil {
waitCh = time.After(longWaitDuration) timer.Reset(longWaitDuration)
continue continue
} }
timer.Reset(t.cfg.TxManagerCheckInterval)
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck

View File

@@ -1,8 +1,11 @@
package historydb package historydb
import ( import (
"database/sql"
"errors" "errors"
"fmt" "fmt"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
@@ -32,9 +35,18 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
// GetBatchInternalAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{} batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow( return batch, tracerr.Wrap(meddler.QueryRow(
hdb.dbRead, batch, d, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num, `SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root, batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num, batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
@@ -180,6 +192,14 @@ func (hdb *HistoryDB) GetBestBidsAPI(
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
}
func (hdb *HistoryDB) getBestBidsAPI(
d meddler.DB,
minSlotNum, maxSlotNum *int64,
bidderAddr *ethCommon.Address,
limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string var query string
var args []interface{} var args []interface{}
// JOIN the best bid of each slot with the latest update of each coordinator // JOIN the best bid of each slot with the latest update of each coordinator
@@ -214,7 +234,7 @@ func (hdb *HistoryDB) GetBestBidsAPI(
} }
query = hdb.dbRead.Rebind(queryStr) query = hdb.dbRead.Rebind(queryStr)
bidPtrs := []*BidAPI{} bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(hdb.dbRead, &bidPtrs, query, args...); err != nil { if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// log.Debug(query) // log.Debug(query)
@@ -697,25 +717,6 @@ func (hdb *HistoryDB) GetExitsAPI(
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
} }
// GetBucketUpdatesAPI retrieves latest values for each bucket
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
var bucketUpdates []*BucketUpdateAPI
err = meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info // GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
func (hdb *HistoryDB) GetCoordinatorsAPI( func (hdb *HistoryDB) GetCoordinatorsAPI(
bidderAddr, forgerAddr *ethCommon.Address, bidderAddr, forgerAddr *ethCommon.Address,
@@ -800,29 +801,6 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
return auctionVars, tracerr.Wrap(err) return auctionVars, tracerr.Wrap(err)
} }
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
auctionVars := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;
`
err = meddler.QueryAll(hdb.dbRead, &auctionVars, query, slotNum, maxItems)
if err != nil {
return nil, tracerr.Wrap(err)
}
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
}
// GetAccountAPI returns an account by its index // GetAccountAPI returns an account by its index
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) { func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
@@ -941,112 +919,6 @@ func (hdb *HistoryDB) GetAccountsAPI(
accounts[0].TotalItems - uint64(len(accounts)), nil accounts[0].TotalItems - uint64(len(accounts)), nil
} }
// GetMetricsAPI returns metrics
func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
metrics := &Metrics{}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT
COALESCE (MIN(batch.batch_num), 0) as batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS min_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS' and batch.batch_num <= $1;`, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) as total_txs
FROM tx WHERE tx.batch_num between $1 AND $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
// Avoid dividing by 0
if seconds == 0 {
seconds++
}
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
} else {
metrics.TransactionsPerBatch = float64(0)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num between $1 and $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
if metricsTotals.TotalBatches > 0 {
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
} else {
metrics.BatchFrequency = 0
}
if metricsTotals.TotalTransactions > 0 {
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
metrics.AvgTransactionFee = 0
}
err = meddler.QueryRow(
hdb.dbRead, metrics,
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
if err != nil {
return nil, tracerr.Wrap(err)
}
return metrics, nil
}
// GetAvgTxFeeAPI returns average transaction fee of the last 1h
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return 0, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
if err != nil {
return 0, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return 0, tracerr.Wrap(err)
}
var avgTransactionFee float64
if metricsTotals.TotalTransactions > 0 {
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
avgTransactionFee = 0
}
return avgTransactionFee, nil
}
// GetCommonAccountAPI returns the account associated to an account idx // GetCommonAccountAPI returns the account associated to an account idx
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) { func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
@@ -1061,3 +933,253 @@ func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, erro
) )
return account, tracerr.Wrap(err) return account, tracerr.Wrap(err)
} }
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
}
func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
d, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// GetNodeInfoAPI retusnt he NodeInfo
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.GetNodeInfo()
}
// GetBucketUpdatesInternalAPI returns the latest bucket updates
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetNextForgersInternalAPI returns next forgers
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
auctionConsts *common.AuctionConstants,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForgerAPI{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum - 1
nextForger := NextForgerAPI{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
var metrics MetricsAPI
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: lastBatchNum,
}
if err := meddler.QueryRow(
hdb.dbRead, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++
}
metrics.TransactionsPerSecond = float64(nTxs) / seconds
// Set txs/batch
nBatches := p.ToBatchNum - p.FromBatchNum + 1
if nBatches == 0 { // Avoid dividing by 0
nBatches++
}
if (p.ToBatchNum - p.FromBatchNum) > 0 {
fmt.Printf("DBG ntxs: %v, nBatches: %v\n", nTxs, nBatches)
metrics.TransactionsPerBatch = float64(nTxs) /
float64(nBatches)
} else {
metrics.TransactionsPerBatch = 0
}
// Get total fee of that period
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err)
}
// Set batch frequency
metrics.BatchFrequency = seconds / float64(nBatches)
if nTxs > 0 {
metrics.AvgTransactionFee = totalFee / float64(nTxs)
} else {
metrics.AvgTransactionFee = 0
}
// Get and set amount of registered accounts
type registeredAccounts struct {
TotalIdx int64 `meddler:"total_idx"`
TotalBJJ int64 `meddler:"total_bjj"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.TotalAccounts = ra.TotalIdx
metrics.TotalBJJs = ra.TotalBJJ
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil
}
// GetStateAPI returns the StateAPI
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getStateAPI(hdb.dbRead)
}

View File

@@ -841,6 +841,18 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err) return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
} }
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
minBidInfo := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;`
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
}
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error { func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
if len(tokenExchanges) == 0 { if len(tokenExchanges) == 0 {
return nil return nil
@@ -1139,17 +1151,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(txn.Commit()) return tracerr.Wrap(txn.Commit())
} }
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
hdb.dbRead, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// AddAuctionVars insert auction vars into the DB // AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error { func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars)) return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
@@ -1169,3 +1170,49 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
} }
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
} }
// GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount =
math.Max(avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
return &recommendedFee, nil
}

View File

@@ -1172,7 +1172,7 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch) assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
@@ -1250,7 +1250,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1) assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
@@ -1265,13 +1265,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
func TestGetMetricsAPIEmpty(t *testing.T) { func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsAPI(0) _, err := historyDBWithACC.GetMetricsInternalAPI(0)
assert.NoError(t, err)
}
func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetAvgTxFeeAPI()
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -1460,3 +1454,65 @@ func setTestBlocks(from, to int64) []common.Block {
} }
return blocks return blocks
} }
func TestNodeInfo(t *testing.T) {
test.WipeDB(historyDB.DB())
err := historyDB.SetStateInternalAPI(&StateAPI{})
require.NoError(t, err)
clientSetup := test.NewClientSetupExample()
constants := &Constants{
SCConsts: common.SCConsts{
Rollup: *clientSetup.RollupConstants,
Auction: *clientSetup.AuctionConstants,
WDelayer: *clientSetup.WDelayerConstants,
},
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
}
err = historyDB.SetConstants(constants)
require.NoError(t, err)
// Test parameters
stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{
ForgeDelay: 3.1,
},
Network: NetworkAPI{
LastEthBlock: 12,
LastSyncBlock: 34,
},
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TotalAccounts: 42,
},
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
WithdrawalDelayer: *clientSetup.WDelayerVariables,
RecommendedFee: common.RecommendedFee{
ExistingAccount: 0.15,
},
}
err = historyDB.SetStateInternalAPI(stateAPI)
require.NoError(t, err)
nodeConfig := &NodeConfig{
MaxPoolTxs: 123,
MinFeeUSD: 0.5,
}
err = historyDB.SetNodeConfig(nodeConfig)
require.NoError(t, err)
dbConstants, err := historyDB.GetConstants()
require.NoError(t, err)
assert.Equal(t, constants, dbConstants)
dbNodeConfig, err := historyDB.GetNodeConfig()
require.NoError(t, err)
assert.Equal(t, nodeConfig, dbNodeConfig)
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
require.NoError(t, err)
assert.Equal(t, stateAPI, dbStateAPI)
}

169
db/historydb/nodeinfo.go Normal file
View File

@@ -0,0 +1,169 @@
package historydb
import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Period represents a time period in ethereum
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
// NextForgerAPI represents the next forger exposed via the API
type NextForgerAPI struct {
Coordinator CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// NetworkAPI is the network state exposed via the API
type NetworkAPI struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
}
// NodePublicConfig is the configuration of the node that is exposed via API
type NodePublicConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
Auction AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// Constants contains network constants
type Constants struct {
common.SCConsts
ChainID uint16
HermezAddress ethCommon.Address
}
// NodeConfig contains the node config exposed in the API
type NodeConfig struct {
MaxPoolTxs uint32 `meddler:"max_pool_txs"`
MinFeeUSD float64 `meddler:"min_fee"`
}
// NodeInfo contains information about he node used when serving the API
type NodeInfo struct {
ItemID int `meddler:"item_id,pk"`
StateAPI *StateAPI `meddler:"state,json"`
NodeConfig *NodeConfig `meddler:"config,json"`
Constants *Constants `meddler:"constants,json"`
}
// GetNodeInfo returns the NodeInfo
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
ni := &NodeInfo{}
err := meddler.QueryRow(
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
)
return ni, tracerr.Wrap(err)
}
// GetConstants returns the Constats
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT constants FROM node_info WHERE item_id = 1;",
)
return nodeInfo.Constants, tracerr.Wrap(err)
}
// SetConstants sets the Constants
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
_constants := struct {
Constants *Constants `meddler:"constants,json"`
}{constants}
values, err := meddler.Default.Values(&_constants, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetStateInternalAPI returns the StateAPI
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
return hdb.getStateAPI(hdb.dbRead)
}
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
d, &nodeInfo,
"SELECT state FROM node_info WHERE item_id = 1;",
)
return nodeInfo.StateAPI, tracerr.Wrap(err)
}
// SetStateInternalAPI sets the StateAPI
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
_stateAPI := struct {
StateAPI *StateAPI `meddler:"state,json"`
}{stateAPI}
values, err := meddler.Default.Values(&_stateAPI, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetNodeConfig returns the NodeConfig
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT config FROM node_info WHERE item_id = 1;",
)
return nodeInfo.NodeConfig, tracerr.Wrap(err)
}
// SetNodeConfig sets the NodeConfig
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
_nodeConfig := struct {
NodeConfig *NodeConfig `meddler:"config,json"`
}{nodeConfig}
values, err := meddler.Default.Values(&_nodeConfig, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}

View File

@@ -302,25 +302,15 @@ type BatchAPI struct {
LastItem uint64 `json:"-" meddler:"last_item"` LastItem uint64 `json:"-" meddler:"last_item"`
} }
// Metrics define metrics of the network // MetricsAPI define metrics of the network
type Metrics struct { type MetricsAPI struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"` TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"` BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"` TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"` TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"` TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
AvgTransactionFee float64 `json:"avgTransactionFee"` AvgTransactionFee float64 `json:"avgTransactionFee"`
} EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimatedTimeToForgeL1"`
// MetricsTotals is used to get temporal information from HistoryDB
// to calculate data to be stored into the Metrics struct
type MetricsTotals struct {
TotalTransactions uint64 `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
TotalBatches int64 `meddler:"total_batches"`
TotalFeesUSD float64 `meddler:"total_fees"`
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
} }
// BidAPI is a representation of a bid with additional information // BidAPI is a representation of a bid with additional information
@@ -373,6 +363,27 @@ type RollupVariablesAPI struct {
SafeMode bool `json:"safeMode" meddler:"safe_mode"` SafeMode bool `json:"safeMode" meddler:"safe_mode"`
} }
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
rollupVars := RollupVariablesAPI{
EthBlockNum: rollupVariables.EthBlockNum,
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
WithdrawalDelay: rollupVariables.WithdrawalDelay,
SafeMode: rollupVariables.SafeMode,
}
for i, bucket := range rollupVariables.Buckets {
rollupVars.Buckets[i] = BucketParamsAPI{
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
}
}
return &rollupVars
}
// AuctionVariablesAPI are the variables of the Auction Smart Contract // AuctionVariablesAPI are the variables of the Auction Smart Contract
type AuctionVariablesAPI struct { type AuctionVariablesAPI struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"` EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
@@ -397,3 +408,28 @@ type AuctionVariablesAPI struct {
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before // SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"` SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
} }
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
auctionVars := AuctionVariablesAPI{
EthBlockNum: auctionVariables.EthBlockNum,
DonationAddress: auctionVariables.DonationAddress,
BootCoordinator: auctionVariables.BootCoordinator,
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
Outbidding: auctionVariables.Outbidding,
SlotDeadline: auctionVariables.SlotDeadline,
}
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionVars.AllocationRatio[i] = ratio
}
return &auctionVars
}

View File

@@ -50,7 +50,7 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
row := l2db.dbRead.QueryRow(`SELECT row := l2db.dbRead.QueryRow(`SELECT
($1::NUMERIC * token.usd * fee_percentage($2::NUMERIC)) / ($1::NUMERIC * COALESCE(token.usd, 0) * fee_percentage($2::NUMERIC)) /
(10.0 ^ token.decimals::NUMERIC) (10.0 ^ token.decimals::NUMERIC)
FROM token WHERE token.token_id = $3;`, FROM token WHERE token.token_id = $3;`,
tx.AmountFloat, tx.Fee, tx.TokenID) tx.AmountFloat, tx.Fee, tx.TokenID)

View File

@@ -95,7 +95,6 @@ func (tx PoolTxAPI) MarshalJSON() ([]byte, error) {
"info": tx.Info, "info": tx.Info,
"signature": tx.Signature, "signature": tx.Signature,
"timestamp": tx.Timestamp, "timestamp": tx.Timestamp,
"batchNum": tx.BatchNum,
"requestFromAccountIndex": tx.RqFromIdx, "requestFromAccountIndex": tx.RqFromIdx,
"requestToAccountIndex": tx.RqToIdx, "requestToAccountIndex": tx.RqToIdx,
"requestToHezEthereumAddress": tx.RqToEthAddr, "requestToHezEthereumAddress": tx.RqToEthAddr,

View File

@@ -661,6 +661,16 @@ CREATE TABLE account_creation_auth (
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now()) timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
); );
CREATE TABLE node_info (
item_id SERIAL PRIMARY KEY,
state BYTEA, -- object returned by GET /state
config BYTEA, -- Node config
-- max_pool_txs BIGINT, -- L2DB config
-- min_fee NUMERIC, -- L2DB config
constants BYTEA -- info of the network that is constant
);
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
-- +migrate Down -- +migrate Down
-- triggers -- triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token; DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
@@ -675,6 +685,7 @@ DROP FUNCTION IF EXISTS set_tx;
DROP FUNCTION IF EXISTS forge_l1_user_txs; DROP FUNCTION IF EXISTS forge_l1_user_txs;
DROP FUNCTION IF EXISTS set_pool_tx; DROP FUNCTION IF EXISTS set_pool_tx;
-- drop tables IF EXISTS -- drop tables IF EXISTS
DROP TABLE IF EXISTS node_info;
DROP TABLE IF EXISTS account_creation_auth; DROP TABLE IF EXISTS account_creation_auth;
DROP TABLE IF EXISTS tx_pool; DROP TABLE IF EXISTS tx_pool;
DROP TABLE IF EXISTS auction_vars; DROP TABLE IF EXISTS auction_vars;

View File

@@ -53,9 +53,10 @@ const (
// Node is the Hermez Node // Node is the Hermez Node
type Node struct { type Node struct {
nodeAPI *NodeAPI nodeAPI *NodeAPI
debugAPI *debugapi.DebugAPI stateAPIUpdater *api.StateAPIUpdater
priceUpdater *priceupdater.PriceUpdater debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
// Coordinator // Coordinator
coord *coordinator.Coordinator coord *coordinator.Coordinator
@@ -67,6 +68,7 @@ type Node struct {
mode Mode mode Mode
sqlConnRead *sqlx.DB sqlConnRead *sqlx.DB
sqlConnWrite *sqlx.DB sqlConnWrite *sqlx.DB
historyDB *historydb.HistoryDB
ctx context.Context ctx context.Context
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
@@ -137,6 +139,23 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path, keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
scryptN, scryptP) scryptN, scryptP)
balance, err := ethClient.BalanceAt(context.TODO(), cfg.Coordinator.ForgerAddress, nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
minForgeBalance := cfg.Coordinator.MinimumForgeAddressBalance
if minForgeBalance != nil && balance.Cmp(minForgeBalance) == -1 {
return nil, tracerr.Wrap(fmt.Errorf(
"forger account balance is less than cfg.Coordinator.MinimumForgeAddressBalance: %v < %v",
balance.Int64(), minForgeBalance))
}
log.Infow("forger ethereum account balance",
"addr", cfg.Coordinator.ForgerAddress,
"balance", balance.Int64(),
"minForgeBalance", minForgeBalance.Int64(),
)
// Unlock Coordinator ForgerAddr in the keystore to make calls // Unlock Coordinator ForgerAddr in the keystore to make calls
// to ForgeBatch in the smart contract // to ForgeBatch in the smart contract
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) { if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
@@ -212,12 +231,34 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
initSCVars := sync.SCVars() initSCVars := sync.SCVars()
scConsts := synchronizer.SCConsts{ scConsts := common.SCConsts{
Rollup: *sync.RollupConstants(), Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(), Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(), WDelayer: *sync.WDelayerConstants(),
} }
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
return nil, tracerr.Wrap(err)
}
hdbConsts := historydb.Constants{
SCConsts: common.SCConsts{
Rollup: scConsts.Rollup,
Auction: scConsts.Auction,
WDelayer: scConsts.WDelayer,
},
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
}
if err := historyDB.SetConstants(&hdbConsts); err != nil {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := api.NewStateAPIUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
var l2DB *l2db.L2DB var l2DB *l2db.L2DB
if mode == ModeCoordinator { if mode == ModeCoordinator {
@@ -336,6 +377,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay, PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay, InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
}, },
ForgeBatchGasCost: cfg.Coordinator.EthClient.ForgeBatchGasCost,
VerifierIdx: uint8(verifierIdx), VerifierIdx: uint8(verifierIdx),
TxProcessorConfig: txProcessorCfg, TxProcessorConfig: txProcessorCfg,
}, },
@@ -346,11 +388,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
serverProofs, serverProofs,
client, client,
&scConsts, &scConsts,
&synchronizer.SCVariables{ initSCVars,
Rollup: *initSCVars.Rollup,
Auction: *initSCVars.Auction,
WDelayer: *initSCVars.WDelayer,
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -358,6 +396,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
var nodeAPI *NodeAPI var nodeAPI *NodeAPI
if cfg.API.Address != "" { if cfg.API.Address != "" {
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
if cfg.API.UpdateMetricsInterval.Duration == 0 { if cfg.API.UpdateMetricsInterval.Duration == 0 {
return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v", return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
cfg.API.UpdateMetricsInterval.Duration)) cfg.API.UpdateMetricsInterval.Duration))
@@ -377,22 +420,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
coord, cfg.API.Explorer, coord, cfg.API.Explorer,
server, server,
historyDB, historyDB,
stateDB,
l2DB, l2DB,
&api.Config{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
} }
var debugAPI *debugapi.DebugAPI var debugAPI *debugapi.DebugAPI
if cfg.Debug.APIAddress != "" { if cfg.Debug.APIAddress != "" {
@@ -405,20 +437,138 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
return &Node{ return &Node{
nodeAPI: nodeAPI, stateAPIUpdater: stateAPIUpdater,
debugAPI: debugAPI, nodeAPI: nodeAPI,
priceUpdater: priceUpdater, debugAPI: debugAPI,
coord: coord, priceUpdater: priceUpdater,
sync: sync, coord: coord,
cfg: cfg, sync: sync,
mode: mode, cfg: cfg,
sqlConnRead: dbRead, mode: mode,
sqlConnWrite: dbWrite, sqlConnRead: dbRead,
ctx: ctx, sqlConnWrite: dbWrite,
cancel: cancel, historyDB: historyDB,
ctx: ctx,
cancel: cancel,
}, nil }, nil
} }
// APIServer is a server that only runs the API
type APIServer struct {
nodeAPI *NodeAPI
mode Mode
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
}
// NewAPIServer creates a new APIServer
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
apiConnCon := dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
dbRead, dbWrite,
0,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
0,
apiConnCon,
)
}
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
server := gin.Default()
coord := false
if mode == ModeCoordinator {
coord = cfg.Coordinator.API.Coordinator
}
nodeAPI, err := NewNodeAPI(
cfg.API.Address,
coord, cfg.API.Explorer,
server,
historyDB,
l2DB,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
return &APIServer{
nodeAPI: nodeAPI,
mode: mode,
ctx: ctx,
cancel: cancel,
}, nil
}
// Start the APIServer
func (s *APIServer) Start() {
log.Infow("Starting api server...", "mode", s.mode)
log.Info("Starting NodeAPI...")
s.wg.Add(1)
go func() {
defer func() {
log.Info("NodeAPI routine stopped")
s.wg.Done()
}()
if err := s.nodeAPI.Run(s.ctx); err != nil {
if s.ctx.Err() != nil {
return
}
log.Fatalw("NodeAPI.Run", "err", err)
}
}()
}
// Stop the APIServer
func (s *APIServer) Stop() {
log.Infow("Stopping NodeAPI...")
s.cancel()
s.wg.Wait()
}
// NodeAPI holds the node http API // NodeAPI holds the node http API
type NodeAPI struct { //nolint:golint type NodeAPI struct { //nolint:golint
api *api.API api *api.API
@@ -438,9 +588,7 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints bool, coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *api.Config,
) (*NodeAPI, error) { ) (*NodeAPI, error) {
engine := gin.Default() engine := gin.Default()
engine.NoRoute(handleNoRoute) engine.NoRoute(handleNoRoute)
@@ -450,7 +598,6 @@ func NewNodeAPI(
engine, engine,
hdb, hdb,
l2db, l2db,
config,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -495,64 +642,57 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil return nil
} }
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr, func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr,
batches []common.BatchData) { batches []common.BatchData) error {
if n.mode == ModeCoordinator { if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{ n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats, Stats: *stats,
Vars: vars, Vars: *vars,
Batches: batches, Batches: batches,
}) })
} }
if n.nodeAPI != nil { n.stateAPIUpdater.SetSCVars(vars)
if vars.Rollup != nil { if stats.Synced() {
n.nodeAPI.api.SetRollupVariables(*vars.Rollup) if err := n.stateAPIUpdater.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
} }
if vars.Auction != nil { } else {
n.nodeAPI.api.SetAuctionVariables(*vars.Auction) n.stateAPIUpdater.UpdateNetworkInfoBlock(
}
if vars.WDelayer != nil {
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
}
if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err)
}
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
}
}
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: vars,
})
}
if n.nodeAPI != nil {
vars := n.sync.SCVars()
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
) )
} }
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariables) error {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: *vars.AsPtr(),
})
}
n.stateAPIUpdater.SetSCVars(vars.AsPtr())
n.stateAPIUpdater.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
} }
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we // TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
// don't have to pass it around. // don't have to pass it around.
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) { func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
blockData, discarded, err := n.sync.Sync2(ctx, lastBlock) blockData, discarded, err := n.sync.Sync(ctx, lastBlock)
stats := n.sync.Stats() stats := n.sync.Stats()
if err != nil { if err != nil {
// case: error // case: error
@@ -561,16 +701,20 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg // case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded) log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars() vars := n.sync.SCVars()
n.handleReorg(ctx, stats, vars) if err := n.handleReorg(ctx, stats, vars); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return nil, time.Duration(0), nil return nil, time.Duration(0), nil
} else if blockData != nil { } else if blockData != nil {
// case: new block // case: new block
vars := synchronizer.SCVariablesPtr{ vars := common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars, Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars, Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars, WDelayer: blockData.WDelayer.Vars,
} }
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches) if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return &blockData.Block, time.Duration(0), nil return &blockData.Block, time.Duration(0), nil
} else { } else {
// case: no block // case: no block
@@ -589,7 +733,9 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized // the last synced one) is synchronized
stats := n.sync.Stats() stats := n.sync.Stats()
vars := n.sync.SCVars() vars := n.sync.SCVars()
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{}) if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil {
log.Fatalw("Node.handleNewBlock", "err", err)
}
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
@@ -676,18 +822,25 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup // Do an initial update on startup
if err := n.nodeAPI.api.UpdateMetrics(); err != nil { if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err) log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
log.Info("API.UpdateMetrics loop done") log.Info("ApiStateUpdater.UpdateMetrics loop done")
n.wg.Done() n.wg.Done()
return return
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration): case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
if err := n.nodeAPI.api.UpdateMetrics(); err != nil { if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err) log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
} }
} }
@@ -696,18 +849,25 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup // Do an initial update on startup
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil { if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err) log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
log.Info("API.UpdateRecommendedFee loop done") log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done")
n.wg.Done() n.wg.Done()
return return
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration): case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil { if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err) log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
} }
} }

View File

@@ -183,26 +183,26 @@ type StartBlockNums struct {
} }
// SCVariables joins all the smart contract variables in a single struct // SCVariables joins all the smart contract variables in a single struct
type SCVariables struct { // type SCVariables struct {
Rollup common.RollupVariables `validate:"required"` // Rollup common.RollupVariables `validate:"required"`
Auction common.AuctionVariables `validate:"required"` // Auction common.AuctionVariables `validate:"required"`
WDelayer common.WDelayerVariables `validate:"required"` // WDelayer common.WDelayerVariables `validate:"required"`
} // }
//
// SCVariablesPtr joins all the smart contract variables as pointers in a single // // SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct // // struct
type SCVariablesPtr struct { // type SCVariablesPtr struct {
Rollup *common.RollupVariables `validate:"required"` // Rollup *common.RollupVariables `validate:"required"`
Auction *common.AuctionVariables `validate:"required"` // Auction *common.AuctionVariables `validate:"required"`
WDelayer *common.WDelayerVariables `validate:"required"` // WDelayer *common.WDelayerVariables `validate:"required"`
} // }
//
// SCConsts joins all the smart contract constants in a single struct // // SCConsts joins all the smart contract constants in a single struct
type SCConsts struct { // type SCConsts struct {
Rollup common.RollupConstants // Rollup common.RollupConstants
Auction common.AuctionConstants // Auction common.AuctionConstants
WDelayer common.WDelayerConstants // WDelayer common.WDelayerConstants
} // }
// Config is the Synchronizer configuration // Config is the Synchronizer configuration
type Config struct { type Config struct {
@@ -213,13 +213,13 @@ type Config struct {
// Synchronizer implements the Synchronizer type // Synchronizer implements the Synchronizer type
type Synchronizer struct { type Synchronizer struct {
ethClient eth.ClientInterface ethClient eth.ClientInterface
consts SCConsts consts common.SCConsts
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
stateDB *statedb.StateDB stateDB *statedb.StateDB
cfg Config cfg Config
initVars SCVariables initVars common.SCVariables
startBlockNum int64 startBlockNum int64
vars SCVariables vars common.SCVariables
stats *StatsHolder stats *StatsHolder
resetStateFailed bool resetStateFailed bool
} }
@@ -242,7 +242,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w", return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
err)) err))
} }
consts := SCConsts{ consts := common.SCConsts{
Rollup: *rollupConstants, Rollup: *rollupConstants,
Auction: *auctionConstants, Auction: *auctionConstants,
WDelayer: *wDelayerConstants, WDelayer: *wDelayerConstants,
@@ -307,11 +307,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
} }
// SCVars returns a copy of the Smart Contract Variables // SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() SCVariablesPtr { func (s *Synchronizer) SCVars() *common.SCVariables {
return SCVariablesPtr{ return &common.SCVariables{
Rollup: s.vars.Rollup.Copy(), Rollup: *s.vars.Rollup.Copy(),
Auction: s.vars.Auction.Copy(), Auction: *s.vars.Auction.Copy(),
WDelayer: s.vars.WDelayer.Copy(), WDelayer: *s.vars.WDelayer.Copy(),
} }
} }
@@ -503,13 +503,13 @@ func (s *Synchronizer) resetIntermediateState() error {
return nil return nil
} }
// Sync2 attems to synchronize an ethereum block starting from lastSavedBlock. // Sync attems to synchronize an ethereum block starting from lastSavedBlock.
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB. // If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
// If a block is synched, it will be returned and also stored in the DB. If a // If a block is synched, it will be returned and also stored in the DB. If a
// reorg is detected, the number of discarded blocks will be returned and no // reorg is detected, the number of discarded blocks will be returned and no
// synchronization will be made. // synchronization will be made.
// TODO: Be smart about locking: only lock during the read/write operations // TODO: Be smart about locking: only lock during the read/write operations
func (s *Synchronizer) Sync2(ctx context.Context, func (s *Synchronizer) Sync(ctx context.Context,
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) { lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
if s.resetStateFailed { if s.resetStateFailed {
if err := s.resetIntermediateState(); err != nil { if err := s.resetIntermediateState(); err != nil {
@@ -724,7 +724,7 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
} }
func getInitialVariables(ethClient eth.ClientInterface, func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) { consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err)) return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
@@ -740,7 +740,7 @@ func getInitialVariables(ethClient eth.ClientInterface,
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
wDelayerVars := wDelayerInit.WDelayerVariables() wDelayerVars := wDelayerInit.WDelayerVariables()
return &SCVariables{ return &common.SCVariables{
Rollup: *rollupVars, Rollup: *rollupVars,
Auction: *auctionVars, Auction: *auctionVars,
WDelayer: *wDelayerVars, WDelayer: *wDelayerVars,

View File

@@ -359,7 +359,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
// Test Sync for rollup genesis block // Test Sync for rollup genesis block
syncBlock, discards, err := s.Sync2(ctx, nil) syncBlock, discards, err := s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -372,9 +372,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num) assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num) assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
vars := s.SCVars() vars := s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
dbBlocks, err := s.historyDB.GetAllBlocks() dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err) require.NoError(t, err)
@@ -382,7 +382,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), dbBlocks[1].Num) assert.Equal(t, int64(1), dbBlocks[1].Num)
// Sync again and expect no new blocks // Sync again and expect no new blocks
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.Nil(t, syncBlock) require.Nil(t, syncBlock)
@@ -479,7 +479,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 2 // Block 2
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -496,7 +496,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 3 // Block 3
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
@@ -520,7 +520,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -533,9 +533,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num) assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num) assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
dbExits, err := s.historyDB.GetAllExits() dbExits, err := s.historyDB.GetAllExits()
require.NoError(t, err) require.NoError(t, err)
@@ -571,7 +571,7 @@ func TestSyncGeneral(t *testing.T) {
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -656,7 +656,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// First sync detects the reorg and discards 4 blocks // First sync detects the reorg and discards 4 blocks
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
expetedDiscards := int64(4) expetedDiscards := int64(4)
require.Equal(t, &expetedDiscards, discards) require.Equal(t, &expetedDiscards, discards)
@@ -665,9 +665,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num) assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
// At this point, the DB only has data up to block 1 // At this point, the DB only has data up to block 1
dbBlock, err := s.historyDB.GetLastBlock() dbBlock, err := s.historyDB.GetLastBlock()
@@ -684,7 +684,7 @@ func TestSyncGeneral(t *testing.T) {
// Sync blocks 2-6 // Sync blocks 2-6
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -704,9 +704,9 @@ func TestSyncGeneral(t *testing.T) {
} }
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
} }
dbBlock, err = s.historyDB.GetLastBlock() dbBlock, err = s.historyDB.GetLastBlock()
@@ -807,7 +807,7 @@ func TestSyncForgerCommitment(t *testing.T) {
// be in sync // be in sync
for { for {
syncBlock, discards, err := s.Sync2(ctx, nil) syncBlock, discards, err := s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -826,7 +826,7 @@ func TestSyncForgerCommitment(t *testing.T) {
err = client.CtlAddBlocks([]common.BlockData{block}) err = client.CtlAddBlocks([]common.BlockData{block})
require.NoError(t, err) require.NoError(t, err)
syncBlock, discards, err := s.Sync2(ctx, nil) syncBlock, discards, err := s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {

View File

@@ -146,7 +146,7 @@ const longWaitDuration = 999 * time.Hour
// const provingDuration = 2 * time.Second // const provingDuration = 2 * time.Second
func (s *Mock) runProver(ctx context.Context) { func (s *Mock) runProver(ctx context.Context) {
waitCh := time.After(longWaitDuration) timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -154,21 +154,27 @@ func (s *Mock) runProver(ctx context.Context) {
case msg := <-s.msgCh: case msg := <-s.msgCh:
switch msg.value { switch msg.value {
case "cancel": case "cancel":
waitCh = time.After(longWaitDuration) if !timer.Stop() {
<-timer.C
}
timer.Reset(longWaitDuration)
s.Lock() s.Lock()
if !s.status.IsReady() { if !s.status.IsReady() {
s.status = prover.StatusCodeAborted s.status = prover.StatusCodeAborted
} }
s.Unlock() s.Unlock()
case "prove": case "prove":
waitCh = time.After(s.provingDuration) if !timer.Stop() {
<-timer.C
}
timer.Reset(s.provingDuration)
s.Lock() s.Lock()
s.status = prover.StatusCodeBusy s.status = prover.StatusCodeBusy
s.Unlock() s.Unlock()
} }
msg.ackCh <- true msg.ackCh <- true
case <-waitCh: case <-timer.C:
waitCh = time.After(longWaitDuration) timer.Reset(longWaitDuration)
s.Lock() s.Lock()
if s.status != prover.StatusCodeBusy { if s.status != prover.StatusCodeBusy {
s.Unlock() s.Unlock()

View File

@@ -1197,7 +1197,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
exitAccount := &common.Account{ exitAccount := &common.Account{
TokenID: acc.TokenID, TokenID: acc.TokenID,
Nonce: common.Nonce(0), Nonce: common.Nonce(0),
// as is a common.Tx, the Amount is already an // as is a common.Tx, the tx.Amount is already an
// EffectiveAmount // EffectiveAmount
Balance: tx.Amount, Balance: tx.Amount,
BJJ: acc.BJJ, BJJ: acc.BJJ,
@@ -1212,9 +1212,9 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Sign2[tp.i] = big.NewInt(1) tp.zki.Sign2[tp.i] = big.NewInt(1)
} }
tp.zki.Ay2[tp.i] = accBJJY tp.zki.Ay2[tp.i] = accBJJY
// as is a common.Tx, the Amount is already an // Balance2 contains the ExitLeaf Balance before the
// EffectiveAmount // leaf update, which is 0
tp.zki.Balance2[tp.i] = tx.Amount tp.zki.Balance2[tp.i] = big.NewInt(0)
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr) tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
// as Leaf didn't exist in the ExitTree, set NewExit[i]=1 // as Leaf didn't exist in the ExitTree, set NewExit[i]=1
tp.zki.NewExit[tp.i] = big.NewInt(1) tp.zki.NewExit[tp.i] = big.NewInt(1)
@@ -1248,7 +1248,9 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Sign2[tp.i] = big.NewInt(1) tp.zki.Sign2[tp.i] = big.NewInt(1)
} }
tp.zki.Ay2[tp.i] = accBJJY tp.zki.Ay2[tp.i] = accBJJY
tp.zki.Balance2[tp.i] = tx.Amount // Balance2 contains the ExitLeaf Balance before the leaf
// update
tp.zki.Balance2[tp.i] = exitAccount.Balance
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr) tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
} }
@@ -1266,6 +1268,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} }
tp.zki.OldKey2[tp.i] = p.OldKey.BigInt() tp.zki.OldKey2[tp.i] = p.OldKey.BigInt()
tp.zki.OldValue2[tp.i] = p.OldValue.BigInt() tp.zki.OldValue2[tp.i] = p.OldValue.BigInt()
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
} }
return exitAccount, false, nil return exitAccount, false, nil

View File

@@ -795,7 +795,8 @@ func TestMultipleCoordIdxForTokenID(t *testing.T) {
checkBalanceByIdx(t, tp.s, 259, "0") // Coord0 checkBalanceByIdx(t, tp.s, 259, "0") // Coord0
} }
func TestTwoExits(t *testing.T) { func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOutput,
[]*ProcessTxOutput, []*ProcessTxOutput) {
// In the first part we generate a batch with two force exits for the // In the first part we generate a batch with two force exits for the
// same account of 20 each. The txprocessor output should be a single // same account of 20 each. The txprocessor output should be a single
// exitInfo with balance of 40. // exitInfo with balance of 40.
@@ -803,8 +804,9 @@ func TestTwoExits(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir)) defer assert.NoError(t, os.RemoveAll(dir))
nLevels := 16
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32}) Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err) assert.NoError(t, err)
chainID := uint16(1) chainID := uint16(1)
@@ -840,10 +842,10 @@ func TestTwoExits(t *testing.T) {
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs)) require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
config := Config{ config := Config{
NLevels: 32, NLevels: uint32(nLevels),
MaxFeeTx: 64, MaxTx: 3,
MaxTx: 512, MaxL1Tx: 2,
MaxL1Tx: 16, MaxFeeTx: 2,
ChainID: chainID, ChainID: chainID,
} }
tp := NewTxProcessor(sdb, config) tp := NewTxProcessor(sdb, config)
@@ -856,8 +858,6 @@ func TestTwoExits(t *testing.T) {
} }
} }
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
acc, err := sdb.GetAccount(256) acc, err := sdb.GetAccount(256)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, big.NewInt(60), acc.Balance) assert.Equal(t, big.NewInt(60), acc.Balance)
@@ -872,7 +872,7 @@ func TestTwoExits(t *testing.T) {
defer assert.NoError(t, os.RemoveAll(dir2)) defer assert.NoError(t, os.RemoveAll(dir2))
sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128, sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32}) Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err) assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx) tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
@@ -910,7 +910,68 @@ func TestTwoExits(t *testing.T) {
} }
} }
// In the third part we start a fresh statedb and generate a batch with
// two force exit for the same account as before but where the 1st Exit
// is with all the amount, and the 2nd Exit is with more amount than
// the available balance. The txprocessor output should be a single
// exitInfo with balance of 40, and the exit merkle tree proof should
// be equal to the previous ones.
dir3, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir3))
sdb3, err := statedb.NewStateDB(statedb.Config{Path: dir3, Keep: 128,
Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
// Single exit with balance of both exits in previous set. The exit
// root should match.
set3 := `
Type: Blockchain
CreateAccountDeposit(0) A: 100
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
ForceExit(0) A: 40
ForceExit(0) A: 100
> batchL1 // freeze L1User{2}
> batchL1 // forge L1User{2}
> block
`
blocks, err = tc.GenerateBlocks(set3)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
tp = NewTxProcessor(sdb3, config)
ptOuts3 := []*ProcessTxOutput{}
for _, block := range blocks {
for _, batch := range block.Rollup.Batches {
ptOut, err := tp.ProcessTxs(nil, batch.L1UserTxs, nil, nil)
require.NoError(t, err)
ptOuts3 = append(ptOuts3, ptOut)
}
}
return ptOuts, ptOuts2, ptOuts3
}
func TestTwoExitsSynchronizer(t *testing.T) {
ptOuts, ptOuts2, ptOuts3 := testTwoExits(t, statedb.TypeSynchronizer)
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof) assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts3[3].ExitInfos[0].MerkleProof)
} }
func TestExitOf0Amount(t *testing.T) { func TestExitOf0Amount(t *testing.T) {

File diff suppressed because one or more lines are too long