|
diff --git a/api/api.go b/api/api.go
|
|
index 631d76e..5afa8a8 100644
|
|
--- a/api/api.go
|
|
+++ b/api/api.go
|
|
@@ -2,40 +2,19 @@ package api
|
|
|
|
import (
|
|
"errors"
|
|
- "sync"
|
|
|
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
|
"github.com/gin-gonic/gin"
|
|
- "github.com/hermeznetwork/hermez-node/common"
|
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
|
"github.com/hermeznetwork/tracerr"
|
|
)
|
|
|
|
-// TODO: Add correct values to constants
|
|
-const (
|
|
- createAccountExtraFeePercentage float64 = 2
|
|
- createAccountInternalExtraFeePercentage float64 = 2.5
|
|
-)
|
|
-
|
|
-// Status define status of the network
|
|
-type Status struct {
|
|
- sync.RWMutex
|
|
- NodeConfig NodeConfig `json:"nodeConfig"`
|
|
- Network Network `json:"network"`
|
|
- Metrics historydb.Metrics `json:"metrics"`
|
|
- Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
|
- Auction historydb.AuctionVariablesAPI `json:"auction"`
|
|
- WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
|
- RecommendedFee common.RecommendedFee `json:"recommendedFee"`
|
|
-}
|
|
-
|
|
// API serves HTTP requests to allow external interaction with the Hermez node
|
|
type API struct {
|
|
h *historydb.HistoryDB
|
|
cg *configAPI
|
|
l2 *l2db.L2DB
|
|
- status Status
|
|
chainID uint16
|
|
hermezAddress ethCommon.Address
|
|
}
|
|
@@ -46,8 +25,6 @@ func NewAPI(
|
|
server *gin.Engine,
|
|
hdb *historydb.HistoryDB,
|
|
l2db *l2db.L2DB,
|
|
- config *Config,
|
|
- nodeConfig *NodeConfig,
|
|
) (*API, error) {
|
|
// Check input
|
|
// TODO: is stateDB only needed for explorer endpoints or for both?
|
|
@@ -57,20 +34,20 @@ func NewAPI(
|
|
if explorerEndpoints && hdb == nil {
|
|
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
|
|
}
|
|
-
|
|
+ ni, err := hdb.GetNodeInfo()
|
|
+ if err != nil {
|
|
+ return nil, err
|
|
+ }
|
|
a := &API{
|
|
h: hdb,
|
|
cg: &configAPI{
|
|
- RollupConstants: *newRollupConstants(config.RollupConstants),
|
|
- AuctionConstants: config.AuctionConstants,
|
|
- WDelayerConstants: config.WDelayerConstants,
|
|
- },
|
|
- l2: l2db,
|
|
- status: Status{
|
|
- NodeConfig: *nodeConfig,
|
|
+ RollupConstants: *newRollupConstants(ni.Constants.RollupConstants),
|
|
+ AuctionConstants: ni.Constants.AuctionConstants,
|
|
+ WDelayerConstants: ni.Constants.WDelayerConstants,
|
|
},
|
|
- chainID: config.ChainID,
|
|
- hermezAddress: config.HermezAddress,
|
|
+ l2: l2db,
|
|
+ chainID: ni.Constants.ChainID,
|
|
+ hermezAddress: ni.Constants.HermezAddress,
|
|
}
|
|
|
|
// Add coordinator endpoints
|
|
diff --git a/api/state.go b/api/state.go
|
|
index eecdd77..ebb0a3c 100644
|
|
--- a/api/state.go
|
|
+++ b/api/state.go
|
|
@@ -1,320 +1,16 @@
|
|
package api
|
|
|
|
import (
|
|
- "database/sql"
|
|
- "fmt"
|
|
- "math"
|
|
- "math/big"
|
|
"net/http"
|
|
- "time"
|
|
|
|
"github.com/gin-gonic/gin"
|
|
- "github.com/hermeznetwork/hermez-node/apitypes"
|
|
- "github.com/hermeznetwork/hermez-node/common"
|
|
- "github.com/hermeznetwork/hermez-node/db/historydb"
|
|
- "github.com/hermeznetwork/tracerr"
|
|
)
|
|
|
|
-// Network define status of the network
|
|
-type Network struct {
|
|
- LastEthBlock int64 `json:"lastEthereumBlock"`
|
|
- LastSyncBlock int64 `json:"lastSynchedBlock"`
|
|
- LastBatch *historydb.BatchAPI `json:"lastBatch"`
|
|
- CurrentSlot int64 `json:"currentSlot"`
|
|
- NextForgers []NextForger `json:"nextForgers"`
|
|
-}
|
|
-
|
|
-// NodeConfig is the configuration of the node that is exposed via API
|
|
-type NodeConfig struct {
|
|
- // ForgeDelay in seconds
|
|
- ForgeDelay float64 `json:"forgeDelay"`
|
|
-}
|
|
-
|
|
-// NextForger is a representation of the information of a coordinator and the period will forge
|
|
-type NextForger struct {
|
|
- Coordinator historydb.CoordinatorAPI `json:"coordinator"`
|
|
- Period Period `json:"period"`
|
|
-}
|
|
-
|
|
-// Period is a representation of a period
|
|
-type Period struct {
|
|
- SlotNum int64 `json:"slotNum"`
|
|
- FromBlock int64 `json:"fromBlock"`
|
|
- ToBlock int64 `json:"toBlock"`
|
|
- FromTimestamp time.Time `json:"fromTimestamp"`
|
|
- ToTimestamp time.Time `json:"toTimestamp"`
|
|
-}
|
|
-
|
|
func (a *API) getState(c *gin.Context) {
|
|
- // TODO: There are no events for the buckets information, so now this information will be 0
|
|
- a.status.RLock()
|
|
- status := a.status //nolint
|
|
- a.status.RUnlock()
|
|
- c.JSON(http.StatusOK, status) //nolint
|
|
-}
|
|
-
|
|
-// SC Vars
|
|
-
|
|
-// SetRollupVariables set Status.Rollup variables
|
|
-func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
|
|
- a.status.Lock()
|
|
- var rollupVAPI historydb.RollupVariablesAPI
|
|
- rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
|
|
- rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
|
|
- rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
|
|
- rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
|
|
-
|
|
- for i, bucket := range rollupVariables.Buckets {
|
|
- var apiBucket historydb.BucketParamsAPI
|
|
- apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
|
|
- apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
|
|
- apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
|
|
- apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
|
|
- rollupVAPI.Buckets[i] = apiBucket
|
|
- }
|
|
-
|
|
- rollupVAPI.SafeMode = rollupVariables.SafeMode
|
|
- a.status.Rollup = rollupVAPI
|
|
- a.status.Unlock()
|
|
-}
|
|
-
|
|
-// SetWDelayerVariables set Status.WithdrawalDelayer variables
|
|
-func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) {
|
|
- a.status.Lock()
|
|
- a.status.WithdrawalDelayer = wDelayerVariables
|
|
- a.status.Unlock()
|
|
-}
|
|
-
|
|
-// SetAuctionVariables set Status.Auction variables
|
|
-func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) {
|
|
- a.status.Lock()
|
|
- var auctionAPI historydb.AuctionVariablesAPI
|
|
-
|
|
- auctionAPI.EthBlockNum = auctionVariables.EthBlockNum
|
|
- auctionAPI.DonationAddress = auctionVariables.DonationAddress
|
|
- auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
|
|
- auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
|
|
- auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
|
|
- auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
|
|
- auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
|
|
- auctionAPI.Outbidding = auctionVariables.Outbidding
|
|
- auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
|
|
-
|
|
- for i, slot := range auctionVariables.DefaultSlotSetBid {
|
|
- auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
|
|
- }
|
|
-
|
|
- for i, ratio := range auctionVariables.AllocationRatio {
|
|
- auctionAPI.AllocationRatio[i] = ratio
|
|
- }
|
|
-
|
|
- a.status.Auction = auctionAPI
|
|
- a.status.Unlock()
|
|
-}
|
|
-
|
|
-// Network
|
|
-
|
|
-// UpdateNetworkInfoBlock update Status.Network block related information
|
|
-func (a *API) UpdateNetworkInfoBlock(
|
|
- lastEthBlock, lastSyncBlock common.Block,
|
|
-) {
|
|
- a.status.Network.LastSyncBlock = lastSyncBlock.Num
|
|
- a.status.Network.LastEthBlock = lastEthBlock.Num
|
|
-}
|
|
-
|
|
-// UpdateNetworkInfo update Status.Network information
|
|
-func (a *API) UpdateNetworkInfo(
|
|
- lastEthBlock, lastSyncBlock common.Block,
|
|
- lastBatchNum common.BatchNum, currentSlot int64,
|
|
-) error {
|
|
- lastBatch, err := a.h.GetBatchAPI(lastBatchNum)
|
|
- if tracerr.Unwrap(err) == sql.ErrNoRows {
|
|
- lastBatch = nil
|
|
- } else if err != nil {
|
|
- return tracerr.Wrap(err)
|
|
- }
|
|
- lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots)
|
|
- nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot)
|
|
- if tracerr.Unwrap(err) == sql.ErrNoRows {
|
|
- nextForgers = nil
|
|
- } else if err != nil {
|
|
- return tracerr.Wrap(err)
|
|
- }
|
|
- a.status.Lock()
|
|
- a.status.Network.LastSyncBlock = lastSyncBlock.Num
|
|
- a.status.Network.LastEthBlock = lastEthBlock.Num
|
|
- a.status.Network.LastBatch = lastBatch
|
|
- a.status.Network.CurrentSlot = currentSlot
|
|
- a.status.Network.NextForgers = nextForgers
|
|
-
|
|
- // Update buckets withdrawals
|
|
- bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
|
|
- if tracerr.Unwrap(err) == sql.ErrNoRows {
|
|
- bucketsUpdate = nil
|
|
- } else if err != nil {
|
|
- return tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- for i, bucketParams := range a.status.Rollup.Buckets {
|
|
- for _, bucketUpdate := range bucketsUpdate {
|
|
- if bucketUpdate.NumBucket == i {
|
|
- bucketParams.Withdrawals = bucketUpdate.Withdrawals
|
|
- a.status.Rollup.Buckets[i] = bucketParams
|
|
- break
|
|
- }
|
|
- }
|
|
- }
|
|
- a.status.Unlock()
|
|
- return nil
|
|
-}
|
|
-
|
|
-// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
|
|
-func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
|
|
- var slots [6]*big.Int
|
|
-
|
|
- for i, slot := range defaultSlotSetBid {
|
|
- bigInt, ok := new(big.Int).SetString(string(*slot), 10)
|
|
- if !ok {
|
|
- return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
|
|
- }
|
|
- slots[i] = bigInt
|
|
- }
|
|
-
|
|
- return slots, nil
|
|
-}
|
|
-
|
|
-// getNextForgers returns next forgers
|
|
-func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
|
|
- secondsPerBlock := int64(15) //nolint:gomnd
|
|
- // currentSlot and lastClosedSlot included
|
|
- limit := uint(lastClosedSlot - currentSlot + 1)
|
|
- bids, _, err := a.h.GetBestBidsAPI(¤tSlot, &lastClosedSlot, nil, &limit, "ASC")
|
|
- if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
- nextForgers := []NextForger{}
|
|
- // Get min bid info
|
|
- var minBidInfo []historydb.MinBidInfo
|
|
- if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
|
|
- // All min bids can be calculated with the last update of AuctionVariables
|
|
- bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- minBidInfo = []historydb.MinBidInfo{{
|
|
- DefaultSlotSetBid: bigIntSlots,
|
|
- DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
|
|
- }}
|
|
- } else {
|
|
- // Get all the relevant updates from the DB
|
|
- minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
- }
|
|
- // Create nextForger for each slot
|
|
- for i := currentSlot; i <= lastClosedSlot; i++ {
|
|
- fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
|
|
- toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
|
|
- nextForger := NextForger{
|
|
- Period: Period{
|
|
- SlotNum: i,
|
|
- FromBlock: fromBlock,
|
|
- ToBlock: toBlock,
|
|
- FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
|
|
- ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
|
|
- },
|
|
- }
|
|
- foundForger := false
|
|
- // If there is a bid for a slot, get forger (coordinator)
|
|
- for j := range bids {
|
|
- slotNum := bids[j].SlotNum
|
|
- if slotNum == i {
|
|
- // There's a bid for the slot
|
|
- // Check if the bid is greater than the minimum required
|
|
- for i := 0; i < len(minBidInfo); i++ {
|
|
- // Find the most recent update
|
|
- if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
|
|
- // Get min bid
|
|
- minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
|
|
- minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
|
|
- // Check if the bid has beaten the minimum
|
|
- bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
|
|
- if !ok {
|
|
- return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
|
|
- }
|
|
- if minBid.Cmp(bid) == 1 {
|
|
- // Min bid is greater than bid, the slot will be forged by boot coordinator
|
|
- break
|
|
- }
|
|
- foundForger = true
|
|
- break
|
|
- }
|
|
- }
|
|
- if !foundForger { // There is no bid or it's smaller than the minimum
|
|
- break
|
|
- }
|
|
- coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
- nextForger.Coordinator = *coordinator
|
|
- break
|
|
- }
|
|
- }
|
|
- // If there is no bid, the coordinator that will forge is boot coordinator
|
|
- if !foundForger {
|
|
- nextForger.Coordinator = historydb.CoordinatorAPI{
|
|
- Forger: a.status.Auction.BootCoordinator,
|
|
- URL: a.status.Auction.BootCoordinatorURL,
|
|
- }
|
|
- }
|
|
- nextForgers = append(nextForgers, nextForger)
|
|
- }
|
|
- return nextForgers, nil
|
|
-}
|
|
-
|
|
-// Metrics
|
|
-
|
|
-// UpdateMetrics update Status.Metrics information
|
|
-func (a *API) UpdateMetrics() error {
|
|
- a.status.RLock()
|
|
- if a.status.Network.LastBatch == nil {
|
|
- a.status.RUnlock()
|
|
- return nil
|
|
- }
|
|
- batchNum := a.status.Network.LastBatch.BatchNum
|
|
- a.status.RUnlock()
|
|
- metrics, err := a.h.GetMetricsAPI(batchNum)
|
|
+ ni, err := a.h.GetNodeInfo()
|
|
if err != nil {
|
|
- return tracerr.Wrap(err)
|
|
- }
|
|
- a.status.Lock()
|
|
- a.status.Metrics = *metrics
|
|
- a.status.Unlock()
|
|
- return nil
|
|
-}
|
|
-
|
|
-// Recommended fee
|
|
-
|
|
-// UpdateRecommendedFee update Status.RecommendedFee information
|
|
-func (a *API) UpdateRecommendedFee() error {
|
|
- feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
|
|
- if err != nil {
|
|
- return tracerr.Wrap(err)
|
|
- }
|
|
- var minFeeUSD float64
|
|
- if a.l2 != nil {
|
|
- minFeeUSD = a.l2.MinFeeUSD()
|
|
+ retBadReq(err, c)
|
|
+ return
|
|
}
|
|
- a.status.Lock()
|
|
- a.status.RecommendedFee.ExistingAccount =
|
|
- math.Max(feeExistingAccount, minFeeUSD)
|
|
- a.status.RecommendedFee.CreatesAccount =
|
|
- math.Max(createAccountExtraFeePercentage*feeExistingAccount, minFeeUSD)
|
|
- a.status.RecommendedFee.CreatesAccountAndRegister =
|
|
- math.Max(createAccountInternalExtraFeePercentage*feeExistingAccount, minFeeUSD)
|
|
- a.status.Unlock()
|
|
- return nil
|
|
+ c.JSON(http.StatusOK, ni.StateAPI)
|
|
}
|
|
diff --git a/cli/node/main.go b/cli/node/main.go
|
|
index 0138640..d7bf69d 100644
|
|
--- a/cli/node/main.go
|
|
+++ b/cli/node/main.go
|
|
@@ -143,6 +143,47 @@ func cmdRun(c *cli.Context) error {
|
|
return nil
|
|
}
|
|
|
|
+func cmdServeAPI(c *cli.Context) error {
|
|
+ cfgPath := c.String(flagCfg)
|
|
+ cfg, err := config.LoadAPIServer(cfgPath)
|
|
+ if err != nil {
|
|
+ if err := cli.ShowAppHelp(c); err != nil {
|
|
+ panic(err)
|
|
+ }
|
|
+ return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
|
+ }
|
|
+
|
|
+ node, err := node.NewNode(cfg.mode, cfg.node)
|
|
+ if err != nil {
|
|
+ return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
|
|
+ }
|
|
+ node.Start()
|
|
+
|
|
+ stopCh := make(chan interface{})
|
|
+
|
|
+ // catch ^C to send the stop signal
|
|
+ ossig := make(chan os.Signal, 1)
|
|
+ signal.Notify(ossig, os.Interrupt)
|
|
+ const forceStopCount = 3
|
|
+ go func() {
|
|
+ n := 0
|
|
+ for sig := range ossig {
|
|
+ if sig == os.Interrupt {
|
|
+ log.Info("Received Interrupt Signal")
|
|
+ stopCh <- nil
|
|
+ n++
|
|
+ if n == forceStopCount {
|
|
+ log.Fatalf("Received %v Interrupt Signals", forceStopCount)
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }()
|
|
+ <-stopCh
|
|
+ node.Stop()
|
|
+
|
|
+ return nil
|
|
+}
|
|
+
|
|
func cmdDiscard(c *cli.Context) error {
|
|
_cfg, err := parseCli(c)
|
|
if err != nil {
|
|
@@ -225,9 +266,6 @@ func getConfig(c *cli.Context) (*Config, error) {
|
|
var cfg Config
|
|
mode := c.String(flagMode)
|
|
nodeCfgPath := c.String(flagCfg)
|
|
- if nodeCfgPath == "" {
|
|
- return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
|
|
- }
|
|
var err error
|
|
switch mode {
|
|
case modeSync:
|
|
@@ -304,6 +342,12 @@ func main() {
|
|
Usage: "Run the hermez-node in the indicated mode",
|
|
Action: cmdRun,
|
|
},
|
|
+ {
|
|
+ Name: "serveapi",
|
|
+ Aliases: []string{},
|
|
+ Usage: "Serve the API only",
|
|
+ Action: cmdServeAPI,
|
|
+ },
|
|
{
|
|
Name: "discard",
|
|
Aliases: []string{},
|
|
diff --git a/config/config.go b/config/config.go
|
|
index a9f495f..f517497 100644
|
|
--- a/config/config.go
|
|
+++ b/config/config.go
|
|
@@ -44,6 +44,13 @@ type ForgeBatchGasCost struct {
|
|
L2Tx uint64 `validate:"required"`
|
|
}
|
|
|
|
+// CoordinatorAPI specifies the configuration parameters of the API in mode
|
|
+// coordinator
|
|
+type CoordinatorAPI struct {
|
|
+ // Coordinator enables the coordinator API endpoints
|
|
+ Coordinator bool
|
|
+}
|
|
+
|
|
// Coordinator is the coordinator specific configuration.
|
|
type Coordinator struct {
|
|
// ForgerAddress is the address under which this coordinator is forging
|
|
@@ -197,10 +204,7 @@ type Coordinator struct {
|
|
// ForgeBatch transaction.
|
|
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
|
|
} `validate:"required"`
|
|
- API struct {
|
|
- // Coordinator enables the coordinator API endpoints
|
|
- Coordinator bool
|
|
- } `validate:"required"`
|
|
+ API CoordinatorAPI `validate:"required"`
|
|
Debug struct {
|
|
// BatchPath if set, specifies the path where batchInfo is stored
|
|
// in JSON in every step/update of the pipeline
|
|
@@ -215,6 +219,64 @@ type Coordinator struct {
|
|
}
|
|
}
|
|
|
|
+// NodeAPI specifies the configuration parameters of the API
|
|
+type NodeAPI struct {
|
|
+ // Address where the API will listen if set
|
|
+ Address string
|
|
+ // Explorer enables the Explorer API endpoints
|
|
+ Explorer bool
|
|
+ // UpdateMetricsInterval is the interval between updates of the
|
|
+ // API metrics
|
|
+ UpdateMetricsInterval Duration
|
|
+ // UpdateRecommendedFeeInterval is the interval between updates of the
|
|
+ // recommended fees
|
|
+ UpdateRecommendedFeeInterval Duration
|
|
+ // Maximum concurrent connections allowed between API and SQL
|
|
+ MaxSQLConnections int `validate:"required"`
|
|
+ // SQLConnectionTimeout is the maximum amount of time that an API request
|
|
+ // can wait to stablish a SQL connection
|
|
+ SQLConnectionTimeout Duration
|
|
+}
|
|
+
|
|
+// It's possible to use diferentiated SQL connections for read/write.
|
|
+// If the read configuration is not provided, the write one it's going to be used
|
|
+// for both reads and writes
|
|
+type PostgreSQL struct {
|
|
+ // Port of the PostgreSQL write server
|
|
+ PortWrite int `validate:"required"`
|
|
+ // Host of the PostgreSQL write server
|
|
+ HostWrite string `validate:"required"`
|
|
+ // User of the PostgreSQL write server
|
|
+ UserWrite string `validate:"required"`
|
|
+ // Password of the PostgreSQL write server
|
|
+ PasswordWrite string `validate:"required"`
|
|
+ // Name of the PostgreSQL write server database
|
|
+ NameWrite string `validate:"required"`
|
|
+ // Port of the PostgreSQL read server
|
|
+ PortRead int
|
|
+ // Host of the PostgreSQL read server
|
|
+ HostRead string
|
|
+ // User of the PostgreSQL read server
|
|
+ UserRead string
|
|
+ // Password of the PostgreSQL read server
|
|
+ PasswordRead string
|
|
+ // Name of the PostgreSQL read server database
|
|
+ NameRead string
|
|
+}
|
|
+
|
|
+// NodeDebug specifies debug configuration parameters
|
|
+type NodeDebug struct {
|
|
+ // APIAddress is the address where the debugAPI will listen if
|
|
+ // set
|
|
+ APIAddress string
|
|
+ // MeddlerLogs enables meddler debug mode, where unused columns and struct
|
|
+ // fields will be logged
|
|
+ MeddlerLogs bool
|
|
+ // GinDebugMode sets Gin-Gonic (the web framework) to run in
|
|
+ // debug mode
|
|
+ GinDebugMode bool
|
|
+}
|
|
+
|
|
// Node is the hermez node configuration.
|
|
type Node struct {
|
|
PriceUpdater struct {
|
|
@@ -231,32 +293,8 @@ type Node struct {
|
|
// Keep is the number of checkpoints to keep
|
|
Keep int `validate:"required"`
|
|
} `validate:"required"`
|
|
- // It's possible to use diferentiated SQL connections for read/write.
|
|
- // If the read configuration is not provided, the write one it's going to be used
|
|
- // for both reads and writes
|
|
- PostgreSQL struct {
|
|
- // Port of the PostgreSQL write server
|
|
- PortWrite int `validate:"required"`
|
|
- // Host of the PostgreSQL write server
|
|
- HostWrite string `validate:"required"`
|
|
- // User of the PostgreSQL write server
|
|
- UserWrite string `validate:"required"`
|
|
- // Password of the PostgreSQL write server
|
|
- PasswordWrite string `validate:"required"`
|
|
- // Name of the PostgreSQL write server database
|
|
- NameWrite string `validate:"required"`
|
|
- // Port of the PostgreSQL read server
|
|
- PortRead int
|
|
- // Host of the PostgreSQL read server
|
|
- HostRead string
|
|
- // User of the PostgreSQL read server
|
|
- UserRead string
|
|
- // Password of the PostgreSQL read server
|
|
- PasswordRead string
|
|
- // Name of the PostgreSQL read server database
|
|
- NameRead string
|
|
- } `validate:"required"`
|
|
- Web3 struct {
|
|
+ PostgreSQL PostgreSQL `validate:"required"`
|
|
+ Web3 struct {
|
|
// URL is the URL of the web3 ethereum-node RPC server
|
|
URL string `validate:"required"`
|
|
} `validate:"required"`
|
|
@@ -286,37 +324,34 @@ type Node struct {
|
|
// TokenHEZ address
|
|
TokenHEZName string `validate:"required"`
|
|
} `validate:"required"`
|
|
- API struct {
|
|
- // Address where the API will listen if set
|
|
- Address string
|
|
- // Explorer enables the Explorer API endpoints
|
|
- Explorer bool
|
|
- // UpdateMetricsInterval is the interval between updates of the
|
|
- // API metrics
|
|
- UpdateMetricsInterval Duration
|
|
- // UpdateRecommendedFeeInterval is the interval between updates of the
|
|
- // recommended fees
|
|
- UpdateRecommendedFeeInterval Duration
|
|
- // Maximum concurrent connections allowed between API and SQL
|
|
- MaxSQLConnections int `validate:"required"`
|
|
- // SQLConnectionTimeout is the maximum amount of time that an API request
|
|
- // can wait to stablish a SQL connection
|
|
- SQLConnectionTimeout Duration
|
|
- } `validate:"required"`
|
|
- Debug struct {
|
|
- // APIAddress is the address where the debugAPI will listen if
|
|
- // set
|
|
- APIAddress string
|
|
- // MeddlerLogs enables meddler debug mode, where unused columns and struct
|
|
- // fields will be logged
|
|
- MeddlerLogs bool
|
|
- // GinDebugMode sets Gin-Gonic (the web framework) to run in
|
|
- // debug mode
|
|
- GinDebugMode bool
|
|
- }
|
|
+ API NodeAPI `validate:"required"`
|
|
+ Debug NodeDebug `validate:"required"`
|
|
Coordinator Coordinator `validate:"-"`
|
|
}
|
|
|
|
+type APIServer struct {
|
|
+ API NodeAPI `validate:"required"`
|
|
+ PostgreSQL PostgreSQL `validate:"required"`
|
|
+ Coordinator struct {
|
|
+ API struct {
|
|
+ // Coordinator enables the coordinator API endpoints
|
|
+ Coordinator bool
|
|
+ } `validate:"required"`
|
|
+ } `validate:"required"`
|
|
+ L2DB struct {
|
|
+ // MaxTxs is the maximum number of pending L2Txs that can be
|
|
+ // stored in the pool. Once this number of pending L2Txs is
|
|
+ // reached, inserts to the pool will be denied until some of
|
|
+ // the pending txs are forged.
|
|
+ MaxTxs uint32 `validate:"required"`
|
|
+ // MinFeeUSD is the minimum fee in USD that a tx must pay in
|
|
+ // order to be accepted into the pool. Txs with lower than
|
|
+ // minimum fee will be rejected at the API level.
|
|
+ MinFeeUSD float64
|
|
+ } `validate:"required"`
|
|
+ Debug NodeDebug `validate:"required"`
|
|
+}
|
|
+
|
|
// Load loads a generic config.
|
|
func Load(path string, cfg interface{}) error {
|
|
bs, err := ioutil.ReadFile(path) //nolint:gosec
|
|
@@ -358,3 +393,16 @@ func LoadNode(path string) (*Node, error) {
|
|
}
|
|
return &cfg, nil
|
|
}
|
|
+
|
|
+// LoadAPIServer loads the APIServer configuration from path.
|
|
+func LoadAPIServer(path string) (*APIServer, error) {
|
|
+ var cfg APIServer
|
|
+ if err := Load(path, &cfg); err != nil {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
|
|
+ }
|
|
+ validate := validator.New()
|
|
+ if err := validate.Struct(cfg); err != nil {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
|
+ }
|
|
+ return &cfg, nil
|
|
+}
|
|
diff --git a/db/historydb/apiqueries.go b/db/historydb/apiqueries.go
|
|
index 073608c..e187448 100644
|
|
--- a/db/historydb/apiqueries.go
|
|
+++ b/db/historydb/apiqueries.go
|
|
@@ -32,9 +32,12 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
|
return nil, tracerr.Wrap(err)
|
|
}
|
|
defer hdb.apiConnCon.Release()
|
|
+ return hdb.getBatchAPI(hdb.dbRead, batchNum)
|
|
+}
|
|
+func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
|
|
batch := &BatchAPI{}
|
|
return batch, tracerr.Wrap(meddler.QueryRow(
|
|
- hdb.dbRead, batch,
|
|
+ d, batch,
|
|
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
|
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
|
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
|
@@ -180,6 +183,14 @@ func (hdb *HistoryDB) GetBestBidsAPI(
|
|
return nil, 0, tracerr.Wrap(err)
|
|
}
|
|
defer hdb.apiConnCon.Release()
|
|
+ return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
|
|
+}
|
|
+func (hdb *HistoryDB) getBestBidsAPI(
|
|
+ d meddler.DB,
|
|
+ minSlotNum, maxSlotNum *int64,
|
|
+ bidderAddr *ethCommon.Address,
|
|
+ limit *uint, order string,
|
|
+) ([]BidAPI, uint64, error) {
|
|
var query string
|
|
var args []interface{}
|
|
// JOIN the best bid of each slot with the latest update of each coordinator
|
|
@@ -214,7 +225,7 @@ func (hdb *HistoryDB) GetBestBidsAPI(
|
|
}
|
|
query = hdb.dbRead.Rebind(queryStr)
|
|
bidPtrs := []*BidAPI{}
|
|
- if err := meddler.QueryAll(hdb.dbRead, &bidPtrs, query, args...); err != nil {
|
|
+ if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
|
|
return nil, 0, tracerr.Wrap(err)
|
|
}
|
|
// log.Debug(query)
|
|
@@ -697,25 +708,6 @@ func (hdb *HistoryDB) GetExitsAPI(
|
|
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
|
|
}
|
|
|
|
-// GetBucketUpdatesAPI retrieves latest values for each bucket
|
|
-func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
|
|
- cancel, err := hdb.apiConnCon.Acquire()
|
|
- defer cancel()
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
- defer hdb.apiConnCon.Release()
|
|
- var bucketUpdates []*BucketUpdateAPI
|
|
- err = meddler.QueryAll(
|
|
- hdb.dbRead, &bucketUpdates,
|
|
- `SELECT num_bucket, withdrawals FROM bucket_update
|
|
- WHERE item_id in(SELECT max(item_id) FROM bucket_update
|
|
- group by num_bucket)
|
|
- ORDER BY num_bucket ASC;`,
|
|
- )
|
|
- return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
|
|
-}
|
|
-
|
|
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
|
|
func (hdb *HistoryDB) GetCoordinatorsAPI(
|
|
bidderAddr, forgerAddr *ethCommon.Address,
|
|
@@ -800,29 +792,6 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
|
|
return auctionVars, tracerr.Wrap(err)
|
|
}
|
|
|
|
-// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
|
|
-// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
|
|
-func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
|
|
- cancel, err := hdb.apiConnCon.Acquire()
|
|
- defer cancel()
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
- defer hdb.apiConnCon.Release()
|
|
- auctionVars := []*MinBidInfo{}
|
|
- query := `
|
|
- SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
|
|
- WHERE default_slot_set_bid_slot_num < $1
|
|
- ORDER BY default_slot_set_bid_slot_num DESC
|
|
- LIMIT $2;
|
|
- `
|
|
- err = meddler.QueryAll(hdb.dbRead, &auctionVars, query, slotNum, maxItems)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
- return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
|
|
-}
|
|
-
|
|
// GetAccountAPI returns an account by its index
|
|
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
|
cancel, err := hdb.apiConnCon.Acquire()
|
|
@@ -941,135 +910,47 @@ func (hdb *HistoryDB) GetAccountsAPI(
|
|
accounts[0].TotalItems - uint64(len(accounts)), nil
|
|
}
|
|
|
|
-// GetMetricsAPI returns metrics
|
|
-func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
|
|
+// GetCommonAccountAPI returns the account associated to an account idx
|
|
+func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
|
|
cancel, err := hdb.apiConnCon.Acquire()
|
|
defer cancel()
|
|
if err != nil {
|
|
return nil, tracerr.Wrap(err)
|
|
}
|
|
defer hdb.apiConnCon.Release()
|
|
- metricsTotals := &MetricsTotals{}
|
|
- metrics := &Metrics{}
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, metricsTotals, `SELECT
|
|
- COALESCE (MIN(batch.batch_num), 0) as batch_num,
|
|
- COALESCE (MIN(block.timestamp), NOW()) AS min_timestamp,
|
|
- COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
|
|
- FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
|
- WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS' and batch.batch_num <= $1;`, lastBatchNum)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, metricsTotals, `SELECT COUNT(*) as total_txs
|
|
- FROM tx WHERE tx.batch_num between $1 AND $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
|
|
- // Avoid dividing by 0
|
|
- if seconds == 0 {
|
|
- seconds++
|
|
- }
|
|
-
|
|
- metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
|
|
-
|
|
- if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
|
|
- metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
|
|
- float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
|
|
- } else {
|
|
- metrics.TransactionsPerBatch = float64(0)
|
|
- }
|
|
-
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
|
- COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
|
- WHERE batch_num between $1 and $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- if metricsTotals.TotalBatches > 0 {
|
|
- metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
|
|
- } else {
|
|
- metrics.BatchFrequency = 0
|
|
- }
|
|
- if metricsTotals.TotalTransactions > 0 {
|
|
- metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
|
- } else {
|
|
- metrics.AvgTransactionFee = 0
|
|
- }
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, metrics,
|
|
- `SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
+ account := &common.Account{}
|
|
err = meddler.QueryRow(
|
|
- hdb.dbRead, metrics,
|
|
- `SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) AS estimatedTimeToForgeL1 FROM tx
|
|
- INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
|
|
- INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
|
|
- INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
|
|
- WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
|
|
- metricsTotals.FirstBatchNum, lastBatchNum,
|
|
+ hdb.dbRead, account, `SELECT * FROM account WHERE idx = $1;`, idx,
|
|
)
|
|
- if err != nil {
|
|
- return nil, tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- return metrics, nil
|
|
+ return account, tracerr.Wrap(err)
|
|
}
|
|
|
|
-// GetAvgTxFeeAPI returns average transaction fee of the last 1h
|
|
-func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
|
|
+// GetCoordinatorAPI returns a coordinator by its bidderAddr
|
|
+func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
|
cancel, err := hdb.apiConnCon.Acquire()
|
|
defer cancel()
|
|
if err != nil {
|
|
- return 0, tracerr.Wrap(err)
|
|
+ return nil, tracerr.Wrap(err)
|
|
}
|
|
defer hdb.apiConnCon.Release()
|
|
- metricsTotals := &MetricsTotals{}
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
|
|
- COALESCE (MIN(tx.batch_num), 0) as batch_num
|
|
- FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
|
- WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
|
|
- if err != nil {
|
|
- return 0, tracerr.Wrap(err)
|
|
- }
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
|
- COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
|
- WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
|
|
- if err != nil {
|
|
- return 0, tracerr.Wrap(err)
|
|
- }
|
|
-
|
|
- var avgTransactionFee float64
|
|
- if metricsTotals.TotalTransactions > 0 {
|
|
- avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
|
- } else {
|
|
- avgTransactionFee = 0
|
|
- }
|
|
-
|
|
- return avgTransactionFee, nil
|
|
+ return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
|
|
+}
|
|
+func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
|
+ coordinator := &CoordinatorAPI{}
|
|
+ err := meddler.QueryRow(
|
|
+ d, coordinator,
|
|
+ "SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
|
|
+ bidderAddr,
|
|
+ )
|
|
+ return coordinator, tracerr.Wrap(err)
|
|
}
|
|
|
|
-// GetCommonAccountAPI returns the account associated to an account idx
|
|
-func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
|
|
+func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
|
|
cancel, err := hdb.apiConnCon.Acquire()
|
|
defer cancel()
|
|
if err != nil {
|
|
return nil, tracerr.Wrap(err)
|
|
}
|
|
defer hdb.apiConnCon.Release()
|
|
- account := &common.Account{}
|
|
- err = meddler.QueryRow(
|
|
- hdb.dbRead, account, `SELECT * FROM account WHERE idx = $1;`, idx,
|
|
- )
|
|
- return account, tracerr.Wrap(err)
|
|
+ return hdb.GetNodeInfo()
|
|
}
|
|
diff --git a/db/historydb/historydb.go b/db/historydb/historydb.go
|
|
index e887e70..3997f57 100644
|
|
--- a/db/historydb/historydb.go
|
|
+++ b/db/historydb/historydb.go
|
|
@@ -1139,17 +1139,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
|
return tracerr.Wrap(txn.Commit())
|
|
}
|
|
|
|
-// GetCoordinatorAPI returns a coordinator by its bidderAddr
|
|
-func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
|
- coordinator := &CoordinatorAPI{}
|
|
- err := meddler.QueryRow(
|
|
- hdb.dbRead, coordinator,
|
|
- "SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
|
|
- bidderAddr,
|
|
- )
|
|
- return coordinator, tracerr.Wrap(err)
|
|
-}
|
|
-
|
|
// AddAuctionVars insert auction vars into the DB
|
|
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
|
|
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
|
|
diff --git a/db/historydb/views.go b/db/historydb/views.go
|
|
index fe71733..e7bebe5 100644
|
|
--- a/db/historydb/views.go
|
|
+++ b/db/historydb/views.go
|
|
@@ -313,17 +313,6 @@ type Metrics struct {
|
|
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimatedTimeToForgeL1"`
|
|
}
|
|
|
|
-// MetricsTotals is used to get temporal information from HistoryDB
|
|
-// to calculate data to be stored into the Metrics struct
|
|
-type MetricsTotals struct {
|
|
- TotalTransactions uint64 `meddler:"total_txs"`
|
|
- FirstBatchNum common.BatchNum `meddler:"batch_num"`
|
|
- TotalBatches int64 `meddler:"total_batches"`
|
|
- TotalFeesUSD float64 `meddler:"total_fees"`
|
|
- MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
|
|
- MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
|
|
-}
|
|
-
|
|
// BidAPI is a representation of a bid with additional information
|
|
// required by the API
|
|
type BidAPI struct {
|
|
diff --git a/db/migrations/0001.sql b/db/migrations/0001.sql
|
|
index d09a432..ce5dbe0 100644
|
|
--- a/db/migrations/0001.sql
|
|
+++ b/db/migrations/0001.sql
|
|
@@ -661,6 +661,13 @@ CREATE TABLE account_creation_auth (
|
|
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
|
|
);
|
|
|
|
+CREATE TABLE node_info (
|
|
+ state BYTEA, -- object returned by GET /state
|
|
+ pool_max_txs BIGINT, -- L2DB config
|
|
+ min_fee NUMERIC, -- L2DB config
|
|
+ constants BYTEA -- info of the network that is constant
|
|
+)
|
|
+
|
|
-- +migrate Down
|
|
-- triggers
|
|
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
|
|
@@ -675,6 +682,7 @@ DROP FUNCTION IF EXISTS set_tx;
|
|
DROP FUNCTION IF EXISTS forge_l1_user_txs;
|
|
DROP FUNCTION IF EXISTS set_pool_tx;
|
|
-- drop tables IF EXISTS
|
|
+DROP TABLE IF EXISTS node_info;
|
|
DROP TABLE IF EXISTS account_creation_auth;
|
|
DROP TABLE IF EXISTS tx_pool;
|
|
DROP TABLE IF EXISTS auction_vars;
|
|
diff --git a/go.mod b/go.mod
|
|
index 2fdd0e5..3f03bdc 100644
|
|
--- a/go.mod
|
|
+++ b/go.mod
|
|
@@ -26,6 +26,7 @@ require (
|
|
github.com/russross/meddler v1.0.0
|
|
github.com/stretchr/testify v1.6.1
|
|
github.com/urfave/cli/v2 v2.2.0
|
|
+ github.com/ztrue/tracerr v0.3.0
|
|
go.uber.org/zap v1.16.0
|
|
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
|
|
golang.org/x/net v0.0.0-20200822124328-c89045814202
|
|
diff --git a/go.sum b/go.sum
|
|
index 3def156..749aa10 100644
|
|
--- a/go.sum
|
|
+++ b/go.sum
|
|
@@ -690,6 +690,8 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDf
|
|
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
|
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
|
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
|
+github.com/ztrue/tracerr v0.3.0 h1:lDi6EgEYhPYPnKcjsYzmWw4EkFEoA/gfe+I9Y5f+h6Y=
|
|
+github.com/ztrue/tracerr v0.3.0/go.mod h1:qEalzze4VN9O8tnhBXScfCrmoJo10o8TN5ciKjm6Mww=
|
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
|
diff --git a/node/node.go b/node/node.go
|
|
index ddd5bdf..252b9b3 100644
|
|
--- a/node/node.go
|
|
+++ b/node/node.go
|
|
@@ -67,6 +67,7 @@ type Node struct {
|
|
mode Mode
|
|
sqlConnRead *sqlx.DB
|
|
sqlConnWrite *sqlx.DB
|
|
+ historyDB *historydb.HistoryDB
|
|
ctx context.Context
|
|
wg sync.WaitGroup
|
|
cancel context.CancelFunc
|
|
@@ -235,6 +236,20 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|
WDelayer: *sync.WDelayerConstants(),
|
|
}
|
|
|
|
+ if err := historyDB.SetInitialNodeInfo(
|
|
+ cfg.Coordinator.L2DB.MaxTxs,
|
|
+ cfg.Coordinator.L2DB.MinFeeUSD,
|
|
+ &historydb.Constants{
|
|
+ RollupConstants: scConsts.Rollup,
|
|
+ AuctionConstants: scConsts.Auction,
|
|
+ WDelayerConstants: scConsts.WDelayer,
|
|
+ ChainID: chainIDU16,
|
|
+ HermezAddress: cfg.SmartContracts.Rollup,
|
|
+ },
|
|
+ ); err != nil {
|
|
+ return nil, tracerr.Wrap(err)
|
|
+ }
|
|
+
|
|
var coord *coordinator.Coordinator
|
|
var l2DB *l2db.L2DB
|
|
if mode == ModeCoordinator {
|
|
@@ -400,23 +415,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|
coord, cfg.API.Explorer,
|
|
server,
|
|
historyDB,
|
|
- stateDB,
|
|
l2DB,
|
|
- &api.Config{
|
|
- RollupConstants: scConsts.Rollup,
|
|
- AuctionConstants: scConsts.Auction,
|
|
- WDelayerConstants: scConsts.WDelayer,
|
|
- ChainID: chainIDU16,
|
|
- HermezAddress: cfg.SmartContracts.Rollup,
|
|
- },
|
|
- cfg.Coordinator.ForgeDelay.Duration,
|
|
)
|
|
if err != nil {
|
|
return nil, tracerr.Wrap(err)
|
|
}
|
|
- nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
|
|
- nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
|
|
- nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
|
|
}
|
|
var debugAPI *debugapi.DebugAPI
|
|
if cfg.Debug.APIAddress != "" {
|
|
@@ -438,11 +441,108 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|
mode: mode,
|
|
sqlConnRead: dbRead,
|
|
sqlConnWrite: dbWrite,
|
|
+ historyDB: historyDB,
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
}, nil
|
|
}
|
|
|
|
+// APIServer is a server that only runs the API
|
|
+type APIServer struct {
|
|
+ nodeAPI *NodeAPI
|
|
+}
|
|
+
|
|
+func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
|
|
+ // NOTE: I just copied some parts of NewNode related to starting the
|
|
+ // API, but it still cotains many parameters that are not available
|
|
+ meddler.Debug = cfg.Debug.MeddlerLogs
|
|
+ // Stablish DB connection
|
|
+ dbWrite, err := dbUtils.InitSQLDB(
|
|
+ cfg.PostgreSQL.PortWrite,
|
|
+ cfg.PostgreSQL.HostWrite,
|
|
+ cfg.PostgreSQL.UserWrite,
|
|
+ cfg.PostgreSQL.PasswordWrite,
|
|
+ cfg.PostgreSQL.NameWrite,
|
|
+ )
|
|
+ if err != nil {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
|
+ }
|
|
+ var dbRead *sqlx.DB
|
|
+ if cfg.PostgreSQL.HostRead == "" {
|
|
+ dbRead = dbWrite
|
|
+ } else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf(
|
|
+ "PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
|
|
+ ))
|
|
+ } else {
|
|
+ dbRead, err = dbUtils.InitSQLDB(
|
|
+ cfg.PostgreSQL.PortRead,
|
|
+ cfg.PostgreSQL.HostRead,
|
|
+ cfg.PostgreSQL.UserRead,
|
|
+ cfg.PostgreSQL.PasswordRead,
|
|
+ cfg.PostgreSQL.NameRead,
|
|
+ )
|
|
+ if err != nil {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
|
+ }
|
|
+ }
|
|
+ var apiConnCon *dbUtils.APIConnectionController
|
|
+ if cfg.API.Explorer || mode == ModeCoordinator {
|
|
+ apiConnCon = dbUtils.NewAPICnnectionController(
|
|
+ cfg.API.MaxSQLConnections,
|
|
+ cfg.API.SQLConnectionTimeout.Duration,
|
|
+ )
|
|
+ }
|
|
+
|
|
+ historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
|
|
+
|
|
+ var l2DB *l2db.L2DB
|
|
+ if mode == ModeCoordinator {
|
|
+ l2DB = l2db.NewL2DB(
|
|
+ dbRead, dbWrite,
|
|
+ cfg.L2DB.SafetyPeriod,
|
|
+ cfg.L2DB.MaxTxs,
|
|
+ cfg.L2DB.MinFeeUSD,
|
|
+ cfg.L2DB,
|
|
+ apiConnCon,
|
|
+ )
|
|
+ }
|
|
+
|
|
+ var nodeAPI *NodeAPI
|
|
+ if cfg.API.Address != "" {
|
|
+ if cfg.Debug.GinDebugMode {
|
|
+ gin.SetMode(gin.DebugMode)
|
|
+ } else {
|
|
+ gin.SetMode(gin.ReleaseMode)
|
|
+ }
|
|
+ if cfg.API.UpdateMetricsInterval.Duration == 0 {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
|
|
+ cfg.API.UpdateMetricsInterval.Duration))
|
|
+ }
|
|
+ if cfg.API.UpdateRecommendedFeeInterval.Duration == 0 {
|
|
+ return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateRecommendedFeeInterval: %v",
|
|
+ cfg.API.UpdateRecommendedFeeInterval.Duration))
|
|
+ }
|
|
+ server := gin.Default()
|
|
+ coord := false
|
|
+ if mode == ModeCoordinator {
|
|
+ coord = cfg.Coordinator.API.Coordinator
|
|
+ }
|
|
+ var err error
|
|
+ nodeAPI, err = NewNodeAPI(
|
|
+ cfg.API.Address,
|
|
+ coord, cfg.API.Explorer,
|
|
+ server,
|
|
+ historyDB,
|
|
+ l2DB,
|
|
+ )
|
|
+ if err != nil {
|
|
+ return nil, tracerr.Wrap(err)
|
|
+ }
|
|
+ }
|
|
+ // ETC...
|
|
+}
|
|
+
|
|
// NodeAPI holds the node http API
|
|
type NodeAPI struct { //nolint:golint
|
|
api *api.API
|
|
@@ -462,10 +562,7 @@ func NewNodeAPI(
|
|
coordinatorEndpoints, explorerEndpoints bool,
|
|
server *gin.Engine,
|
|
hdb *historydb.HistoryDB,
|
|
- sdb *statedb.StateDB,
|
|
l2db *l2db.L2DB,
|
|
- config *api.Config,
|
|
- forgeDelay time.Duration,
|
|
) (*NodeAPI, error) {
|
|
engine := gin.Default()
|
|
engine.NoRoute(handleNoRoute)
|
|
@@ -475,10 +572,6 @@ func NewNodeAPI(
|
|
engine,
|
|
hdb,
|
|
l2db,
|
|
- config,
|
|
- &api.NodeConfig{
|
|
- ForgeDelay: forgeDelay.Seconds(),
|
|
- },
|
|
)
|
|
if err != nil {
|
|
return nil, tracerr.Wrap(err)
|
|
@@ -534,17 +627,17 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
|
|
}
|
|
if n.nodeAPI != nil {
|
|
if vars.Rollup != nil {
|
|
- n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
|
|
+ n.historyDB.SetRollupVariables(*vars.Rollup)
|
|
}
|
|
if vars.Auction != nil {
|
|
- n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
|
|
+ n.historyDB.SetAuctionVariables(*vars.Auction)
|
|
}
|
|
if vars.WDelayer != nil {
|
|
- n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
|
|
+ n.historyDB.SetWDelayerVariables(*vars.WDelayer)
|
|
}
|
|
|
|
if stats.Synced() {
|
|
- if err := n.nodeAPI.api.UpdateNetworkInfo(
|
|
+ if err := n.historyDB.UpdateNetworkInfo(
|
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
|
common.BatchNum(stats.Eth.LastBatchNum),
|
|
stats.Sync.Auction.CurrentSlot.SlotNum,
|
|
@@ -552,7 +645,7 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
|
|
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
|
}
|
|
} else {
|
|
- n.nodeAPI.api.UpdateNetworkInfoBlock(
|
|
+ n.historyDB.UpdateNetworkInfoBlock(
|
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
|
)
|
|
}
|
|
@@ -566,15 +659,13 @@ func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars
|
|
Vars: vars,
|
|
})
|
|
}
|
|
- if n.nodeAPI != nil {
|
|
- vars := n.sync.SCVars()
|
|
- n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
|
|
- n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
|
|
- n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
|
|
- n.nodeAPI.api.UpdateNetworkInfoBlock(
|
|
- stats.Eth.LastBlock, stats.Sync.LastBlock,
|
|
- )
|
|
- }
|
|
+ vars = n.sync.SCVars()
|
|
+ n.historyDB.SetRollupVariables(*vars.Rollup)
|
|
+ n.historyDB.SetAuctionVariables(*vars.Auction)
|
|
+ n.historyDB.SetWDelayerVariables(*vars.WDelayer)
|
|
+ n.historyDB.UpdateNetworkInfoBlock(
|
|
+ stats.Eth.LastBlock, stats.Sync.LastBlock,
|
|
+ )
|
|
}
|
|
|
|
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
|
|
@@ -704,7 +795,7 @@ func (n *Node) StartNodeAPI() {
|
|
n.wg.Add(1)
|
|
go func() {
|
|
// Do an initial update on startup
|
|
- if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
|
|
+ if err := n.historyDB.UpdateMetrics(); err != nil {
|
|
log.Errorw("API.UpdateMetrics", "err", err)
|
|
}
|
|
for {
|
|
@@ -714,7 +805,7 @@ func (n *Node) StartNodeAPI() {
|
|
n.wg.Done()
|
|
return
|
|
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
|
|
- if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
|
|
+ if err := n.historyDB.UpdateMetrics(); err != nil {
|
|
log.Errorw("API.UpdateMetrics", "err", err)
|
|
}
|
|
}
|
|
@@ -724,7 +815,7 @@ func (n *Node) StartNodeAPI() {
|
|
n.wg.Add(1)
|
|
go func() {
|
|
// Do an initial update on startup
|
|
- if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
|
|
+ if err := n.historyDB.UpdateRecommendedFee(); err != nil {
|
|
log.Errorw("API.UpdateRecommendedFee", "err", err)
|
|
}
|
|
for {
|
|
@@ -734,7 +825,7 @@ func (n *Node) StartNodeAPI() {
|
|
n.wg.Done()
|
|
return
|
|
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
|
|
- if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
|
|
+ if err := n.historyDB.UpdateRecommendedFee(); err != nil {
|
|
log.Errorw("API.UpdateRecommendedFee", "err", err)
|
|
}
|
|
}
|