Allow serving API only via new cli command

- Add new command to the cli/node: `serveapi` that alows serving the API just
  by connecting to the PostgreSQL database.  The mode flag should me passed in
  order to select whether we are connecting to a synchronizer database or a
  coordinator database.  If `coord` is chosen as mode, the coordinator
  endpoints can be activated in order to allow inserting l2txs and
  authorizations into the L2DB.

Summary of the implementation details
- New SQL table with 3 columns (plus `item_id` pk).  The table only contains a
  single row with `item_id` = 1.  Columns:
    - state: historydb.StateAPI in JSON.  This is the struct that is served via
      the `/state` API endpoint.  The node will periodically update this struct
      and store it int he DB.  The api server will query it from the DB to
      serve it.
    - config: historydb.NodeConfig in JSON.  This struct contains node
      configuration parameters that the API needs to be aware of.  It's updated
      once every time the node starts.
    - constants: historydb.Constants in JSON.  This struct contains all the
      hermez network constants gathered via the ethereum client by the node.
      It's written once every time the node starts.
- The HistoryDB contains methods to get and update each one of these columns
  individually.
- The HistoryDB contains all methods that query the DB and prepare objects that
  will appear in the StateAPI endpoint.
- The configuration used in for the `serveapi` cli/node command is defined in
  `config.APIServer`, and is a subset of `node.Config` in order to allow
  reusing the same configuration file of the node if desired.
- A new object is introduced in the api: `StateAPIUpdater`, which contains all
  the necessary information to update the StateAPI in the DB periodically by
  the node.

- Moved the types `SCConsts`, `SCVariables` and `SCVariablesPtr` from
  `syncrhonizer` to `common` for convenience.
This commit is contained in:
Eduard S
2021-02-26 12:40:00 +01:00
parent a5dcc67a08
commit d625cc9287
25 changed files with 1436 additions and 869 deletions

View File

@@ -145,8 +145,8 @@ type Coordinator struct {
pipelineNum int // Pipeline sequential number. The first pipeline is 1
pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client
consts synchronizer.SCConsts
vars synchronizer.SCVariables
consts common.SCConsts
vars common.SCVariables
stats synchronizer.Stats
started bool
@@ -186,8 +186,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client,
ethClient eth.ClientInterface,
scConsts *synchronizer.SCConsts,
initSCVars *synchronizer.SCVariables,
scConsts *common.SCConsts,
initSCVars *common.SCVariables,
) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
@@ -276,13 +276,13 @@ type MsgSyncBlock struct {
Batches []common.BatchData
// Vars contains each Smart Contract variables if they are updated, or
// nil if they haven't changed.
Vars synchronizer.SCVariablesPtr
Vars common.SCVariablesPtr
}
// MsgSyncReorg indicates a reorg
type MsgSyncReorg struct {
Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr
Vars common.SCVariablesPtr
}
// MsgStopPipeline indicates a signal to reset the pipeline
@@ -301,7 +301,7 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
}
}
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
@@ -313,7 +313,7 @@ func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariable
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&c.vars, vars)
}

View File

@@ -188,12 +188,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
&prover.MockClient{Delay: 400 * time.Millisecond},
}
scConsts := &synchronizer.SCConsts{
scConsts := &common.SCConsts{
Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants,
}
initSCVars := &synchronizer.SCVariables{
initSCVars := &common.SCVariables{
Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables,
@@ -529,7 +529,7 @@ func TestCoordinatorStress(t *testing.T) {
coord.SendMsg(ctx, MsgSyncBlock{
Stats: *stats,
Batches: blockData.Rollup.Batches,
Vars: synchronizer.SCVariablesPtr{
Vars: common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,

View File

@@ -22,7 +22,7 @@ import (
type statsVars struct {
Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr
Vars common.SCVariablesPtr
}
type state struct {
@@ -36,7 +36,7 @@ type state struct {
type Pipeline struct {
num int
cfg Config
consts synchronizer.SCConsts
consts common.SCConsts
// state
state state
@@ -57,7 +57,7 @@ type Pipeline struct {
purger *Purger
stats synchronizer.Stats
vars synchronizer.SCVariables
vars common.SCVariables
statsVarsCh chan statsVars
ctx context.Context
@@ -90,7 +90,7 @@ func NewPipeline(ctx context.Context,
coord *Coordinator,
txManager *TxManager,
provers []prover.Client,
scConsts *synchronizer.SCConsts,
scConsts *common.SCConsts,
) (*Pipeline, error) {
proversPool := NewProversPool(len(provers))
proversPoolSize := 0
@@ -125,7 +125,7 @@ func NewPipeline(ctx context.Context,
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
vars *synchronizer.SCVariablesPtr) {
vars *common.SCVariablesPtr) {
select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done():
@@ -134,7 +134,7 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
stats *synchronizer.Stats, vars *common.SCVariables) error {
p.state = state{
batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
@@ -195,7 +195,7 @@ func (p *Pipeline) reset(batchNum common.BatchNum,
return nil
}
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&p.vars, vars)
}
@@ -256,7 +256,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
stats *synchronizer.Stats, vars *common.SCVariables) error {
if p.started {
log.Fatal("Pipeline already started")
}

View File

@@ -206,11 +206,7 @@ PoolTransfer(0) User2-User3: 300 (126)
require.NoError(t, err)
}
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{
Rollup: *syncSCVars.Rollup,
Auction: *syncSCVars.Auction,
WDelayer: *syncSCVars.WDelayer,
})
err = pipeline.reset(batchNum, syncStats, syncSCVars)
require.NoError(t, err)
// Sanity check
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()

View File

@@ -31,10 +31,10 @@ type TxManager struct {
batchCh chan *BatchInfo
chainID *big.Int
account accounts.Account
consts synchronizer.SCConsts
consts common.SCConsts
stats synchronizer.Stats
vars synchronizer.SCVariables
vars common.SCVariables
statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum
@@ -55,7 +55,7 @@ type TxManager struct {
// NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (
coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (
*TxManager, error) {
chainID, err := ethClient.EthChainID()
if err != nil {
@@ -104,7 +104,7 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
vars *synchronizer.SCVariablesPtr) {
vars *common.SCVariablesPtr) {
select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done():
@@ -120,7 +120,7 @@ func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
}
}
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&t.vars, vars)
}