From 3b3d96e07cc38e2a8a7a03d3022b234349621f8b Mon Sep 17 00:00:00 2001 From: Pantani Date: Tue, 23 Mar 2021 01:40:32 -0300 Subject: [PATCH 01/11] create the metrics package for a better app instrumenting --- cli/node/cfg.buidler.toml | 2 +- metric/metric.go | 192 +++++++++++++++++++++++++++++++++++ synchronizer/metrics.go | 44 -------- synchronizer/synchronizer.go | 10 +- txselector/metrics.go | 53 ---------- txselector/txselector.go | 22 ++-- 6 files changed, 211 insertions(+), 112 deletions(-) create mode 100644 metric/metric.go delete mode 100644 synchronizer/metrics.go delete mode 100644 txselector/metrics.go diff --git a/cli/node/cfg.buidler.toml b/cli/node/cfg.buidler.toml index f2d561e..bfd8675 100644 --- a/cli/node/cfg.buidler.toml +++ b/cli/node/cfg.buidler.toml @@ -35,7 +35,7 @@ Symbol = "SUSHI" Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2" [Debug] -APIAddress = "localhost:12345" +APIAddress = "0.0.0.0:12345" MeddlerLogs = true GinDebugMode = true diff --git a/metric/metric.go b/metric/metric.go new file mode 100644 index 0000000..a3cc1f5 --- /dev/null +++ b/metric/metric.go @@ -0,0 +1,192 @@ +package metric + +import ( + "time" + + "github.com/hermeznetwork/hermez-node/log" + "github.com/prometheus/client_golang/prometheus" +) + +type ( + // Metric represents the metric type + Metric string +) + +const ( + namespaceError = "error" + namespaceSync = "synchronizer" + namespaceTxSelector = "txselector" + namespaceAPI = "api" +) + +var ( + // Errors errors count metric. + Errors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespaceError, + Name: "errors", + Help: "", + }, []string{"error"}) + + // WaitServerProof duration time to get the calculated + // proof from the server. + WaitServerProof = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: namespaceSync, + Name: "wait_server_proof", + Help: "", + }, []string{"batch_number", "pipeline_number"}) + + // Reorgs block reorg count + Reorgs = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceSync, + Name: "reorgs", + Help: "", + }) + + // LastBlockNum last block synced + LastBlockNum = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceSync, + Name: "synced_last_block_num", + Help: "", + }) + + // EthLastBlockNum last eth block synced + EthLastBlockNum = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceSync, + Name: "eth_last_block_num", + Help: "", + }) + + // LastBatchNum last batch synced + LastBatchNum = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceSync, + Name: "synced_last_batch_num", + Help: "", + }) + + // EthLastBatchNum last eth batch synced + EthLastBatchNum = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceSync, + Name: "eth_last_batch_num", + Help: "", + }) + + // GetL2TxSelection L2 tx selection count + GetL2TxSelection = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceTxSelector, + Name: "get_l2_txselection_total", + Help: "", + }) + + // GetL1L2TxSelection L1L2 tx selection count + GetL1L2TxSelection = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceTxSelector, + Name: "get_l1_l2_txselection_total", + Help: "", + }) + + // SelectedL1CoordinatorTxs selected L1 coordinator tx count + SelectedL1CoordinatorTxs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceTxSelector, + Name: "selected_l1_coordinator_txs", + Help: "", + }) + + // SelectedL1UserTxs selected L1 user tx count + SelectedL1UserTxs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceTxSelector, + Name: "selected_l1_user_txs", + Help: "", + }) + + // SelectedL2Txs selected L2 tx count + SelectedL2Txs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceTxSelector, + Name: "selected_l2_txs", + Help: "", + }) + + // DiscardedL2Txs discarded L2 tx count + DiscardedL2Txs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceTxSelector, + Name: "discarded_l2_txs", + Help: "", + }) +) + +func init() { + if err := registerCollectors(); err != nil { + log.Error(err) + } +} +func registerCollectors() error { + if err := registerCollector(Errors); err != nil { + return err + } + if err := registerCollector(WaitServerProof); err != nil { + return err + } + if err := registerCollector(Reorgs); err != nil { + return err + } + if err := registerCollector(LastBlockNum); err != nil { + return err + } + if err := registerCollector(LastBatchNum); err != nil { + return err + } + if err := registerCollector(EthLastBlockNum); err != nil { + return err + } + if err := registerCollector(EthLastBatchNum); err != nil { + return err + } + if err := registerCollector(GetL2TxSelection); err != nil { + return err + } + if err := registerCollector(GetL1L2TxSelection); err != nil { + return err + } + if err := registerCollector(SelectedL1CoordinatorTxs); err != nil { + return err + } + if err := registerCollector(SelectedL1UserTxs); err != nil { + return err + } + return registerCollector(DiscardedL2Txs) +} + +func registerCollector(collector prometheus.Collector) error { + err := prometheus.Register(collector) + if err != nil { + if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + return err + } + } + return nil +} + +// MeasureDuration measure the method execution duration +// and save it into a histogram metric +func MeasureDuration(histogram *prometheus.HistogramVec, start time.Time, lvs ...string) { + duration := time.Since(start) + histogram.WithLabelValues(lvs...).Observe(float64(duration.Milliseconds())) +} + +// CollectError collect the error message and increment +// the error count +func CollectError(err error) { + Errors.With(map[string]string{"error": err.Error()}).Inc() +} diff --git a/synchronizer/metrics.go b/synchronizer/metrics.go deleted file mode 100644 index bff876b..0000000 --- a/synchronizer/metrics.go +++ /dev/null @@ -1,44 +0,0 @@ -package synchronizer - -import "github.com/prometheus/client_golang/prometheus" - -var ( - metricReorgsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "sync_reorgs", - Help: "", - }, - ) - metricSyncedLastBlockNum = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "sync_synced_last_block_num", - Help: "", - }, - ) - metricEthLastBlockNum = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "sync_eth_last_block_num", - Help: "", - }, - ) - metricSyncedLastBatchNum = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "sync_synced_last_batch_num", - Help: "", - }, - ) - metricEthLastBatchNum = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "sync_eth_last_batch_num", - Help: "", - }, - ) -) - -func init() { - prometheus.MustRegister(metricReorgsCount) - prometheus.MustRegister(metricSyncedLastBlockNum) - prometheus.MustRegister(metricEthLastBlockNum) - prometheus.MustRegister(metricSyncedLastBatchNum) - prometheus.MustRegister(metricEthLastBatchNum) -} diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 0d1b9fb..63bb1d5 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -15,6 +15,7 @@ import ( "github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/log" + "github.com/hermeznetwork/hermez-node/metric" "github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/tracerr" ) @@ -549,6 +550,7 @@ func (s *Synchronizer) Sync(ctx context.Context, return nil, nil, tracerr.Wrap(err) } discarded := lastSavedBlock.Num - lastDBBlockNum + metric.Reorgs.Inc() return nil, &discarded, nil } } @@ -641,16 +643,16 @@ func (s *Synchronizer) Sync(ctx context.Context, } for _, batchData := range rollupData.Batches { - metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum)) - metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum)) + metric.LastBatchNum.Set(float64(batchData.Batch.BatchNum)) + metric.EthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum)) log.Debugw("Synced batch", "syncLastBatch", batchData.Batch.BatchNum, "syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum), "ethLastBatch", s.stats.Eth.LastBatchNum, ) } - metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num)) - metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num)) + metric.LastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num)) + metric.EthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num)) log.Debugw("Synced block", "syncLastBlockNum", s.stats.Sync.LastBlock.Num, "syncBlocksPerc", s.stats.blocksPerc(), diff --git a/txselector/metrics.go b/txselector/metrics.go deleted file mode 100644 index cb4d780..0000000 --- a/txselector/metrics.go +++ /dev/null @@ -1,53 +0,0 @@ -package txselector - -import "github.com/prometheus/client_golang/prometheus" - -var ( - metricGetL2TxSelection = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "txsel_get_l2_txselecton_total", - Help: "", - }, - ) - metricGetL1L2TxSelection = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "txsel_get_l1_l2_txselecton_total", - Help: "", - }, - ) - - metricSelectedL1CoordinatorTxs = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "txsel_selected_l1_coordinator_txs", - Help: "", - }, - ) - metricSelectedL1UserTxs = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "txsel_selected_l1_user_txs", - Help: "", - }, - ) - metricSelectedL2Txs = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "txsel_selected_l2_txs", - Help: "", - }, - ) - metricDiscardedL2Txs = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "txsel_discarded_l2_txs", - Help: "", - }, - ) -) - -func init() { - prometheus.MustRegister(metricGetL2TxSelection) - prometheus.MustRegister(metricGetL1L2TxSelection) - - prometheus.MustRegister(metricSelectedL1CoordinatorTxs) - prometheus.MustRegister(metricSelectedL1UserTxs) - prometheus.MustRegister(metricSelectedL2Txs) - prometheus.MustRegister(metricDiscardedL2Txs) -} diff --git a/txselector/txselector.go b/txselector/txselector.go index 46338b7..1453367 100644 --- a/txselector/txselector.go +++ b/txselector/txselector.go @@ -13,6 +13,7 @@ import ( "github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/log" + "github.com/hermeznetwork/hermez-node/metric" "github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/tracerr" "github.com/iden3/go-iden3-crypto/babyjub" @@ -123,7 +124,7 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, // included in the next batch. func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx, [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { - metricGetL2TxSelection.Inc() + metric.GetL2TxSelection.Inc() coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{}) return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, @@ -141,7 +142,7 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { - metricGetL1L2TxSelection.Inc() + metric.GetL1L2TxSelection.Inc() coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs) return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, @@ -221,10 +222,11 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) } - metricSelectedL1UserTxs.Set(float64(len(l1UserTxs))) - metricSelectedL1CoordinatorTxs.Set(0) - metricSelectedL2Txs.Set(0) - metricDiscardedL2Txs.Set(float64(len(discardedL2Txs))) + metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs))) + metric.SelectedL1CoordinatorTxs.Set(0) + metric.SelectedL2Txs.Set(0) + metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs))) + return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil } @@ -320,10 +322,10 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) } - metricSelectedL1UserTxs.Set(float64(len(l1UserTxs))) - metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs))) - metricSelectedL2Txs.Set(float64(len(validTxs))) - metricDiscardedL2Txs.Set(float64(len(discardedL2Txs))) + metric.SelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs))) + metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs))) + metric.SelectedL2Txs.Set(float64(len(validTxs))) + metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs))) return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil } From 6d84d143a2a87838f8ccdcd9e00e8d684ea7b429 Mon Sep 17 00:00:00 2001 From: Pantani Date: Tue, 23 Mar 2021 01:40:45 -0300 Subject: [PATCH 02/11] Measure the server proof duration --- coordinator/batch.go | 1 + coordinator/pipeline.go | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/coordinator/batch.go b/coordinator/batch.go index 8df15d8..75f35ad 100644 --- a/coordinator/batch.go +++ b/coordinator/batch.go @@ -80,6 +80,7 @@ type BatchInfo struct { PipelineNum int BatchNum common.BatchNum ServerProof prover.Client + ProofStart time.Time ZKInputs *common.ZKInputs Proof *prover.Proof PublicInputs []*big.Int diff --git a/coordinator/pipeline.go b/coordinator/pipeline.go index aa690b4..939815e 100644 --- a/coordinator/pipeline.go +++ b/coordinator/pipeline.go @@ -5,6 +5,7 @@ import ( "database/sql" "fmt" "math/big" + "strconv" "sync" "time" @@ -14,6 +15,7 @@ import ( "github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/log" + "github.com/hermeznetwork/hermez-node/metric" "github.com/hermeznetwork/hermez-node/prover" "github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/txselector" @@ -246,6 +248,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, // 3. Send the ZKInputs to the proof server batchInfo.ServerProof = serverProof + batchInfo.ProofStart = time.Now() if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil { return nil, ctx.Err() } else if err != nil { @@ -602,6 +605,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, // waitServerProof gets the generated zkProof & sends it to the SmartContract func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error { + defer metric.MeasureDuration(metric.WaitServerProof, batchInfo.ProofStart, + batchInfo.BatchNum.BigInt().String(), strconv.Itoa(batchInfo.PipelineNum)) + proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, // until not resolved don't continue. Returns when the proof server has calculated the proof if err != nil { From 2125812e90ab32eb14cc53b21c69043600a751d3 Mon Sep 17 00:00:00 2001 From: Oleksandr Brezhniev Date: Tue, 23 Mar 2021 15:40:11 +0200 Subject: [PATCH 03/11] Faster synchronization with usage of HeaderByNumber instead of BlockByNumber --- eth/ethereum.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/eth/ethereum.go b/eth/ethereum.go index fcfba50..de3c99f 100644 --- a/eth/ethereum.go +++ b/eth/ethereum.go @@ -245,15 +245,15 @@ func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*c if number == -1 { blockNum = nil } - block, err := c.client.BlockByNumber(ctx, blockNum) + header, err := c.client.HeaderByNumber(ctx, blockNum) if err != nil { return nil, tracerr.Wrap(err) } b := &common.Block{ - Num: block.Number().Int64(), - Timestamp: time.Unix(int64(block.Time()), 0), - ParentHash: block.ParentHash(), - Hash: block.Hash(), + Num: header.Number.Int64(), + Timestamp: time.Unix(int64(header.Time), 0), + ParentHash: header.ParentHash, + Hash: header.Hash(), } return b, nil } From a1eea4344342e52ffac002111d6a81711874e490 Mon Sep 17 00:00:00 2001 From: Pantani Date: Wed, 24 Mar 2021 10:41:32 -0300 Subject: [PATCH 04/11] fix the invalid goarch build and avoid calling the migration-pack each build --- .gitignore | 3 ++- .goreleaser.yml | 7 +++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 6dd29b7..a5d8f72 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -bin/ \ No newline at end of file +bin/ +dist/ diff --git a/.goreleaser.yml b/.goreleaser.yml index 8f5efec..645a2b7 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,6 +1,7 @@ before: hooks: - go mod download + - make migration-pack builds: - main: ./cli/node/main.go @@ -9,10 +10,8 @@ builds: goos: - linux - darwin - - windows - hooks: - pre: make migration-pack - post: make migration-clean + goarch: + - amd64 archives: - replacements: From 14ead3ddf12a87dfaa34a79fd595757f57724263 Mon Sep 17 00:00:00 2001 From: Mikelle Date: Wed, 24 Mar 2021 22:20:46 +0300 Subject: [PATCH 05/11] added chainId to config API --- api/accountcreationauths.go | 2 +- api/api.go | 3 +-- api/api_test.go | 1 + api/config.go | 1 + api/swagger.yml | 5 +++++ api/txspool.go | 2 +- 6 files changed, 10 insertions(+), 4 deletions(-) diff --git a/api/accountcreationauths.go b/api/accountcreationauths.go index f5a562d..46e55a8 100644 --- a/api/accountcreationauths.go +++ b/api/accountcreationauths.go @@ -21,7 +21,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) { } // API to common + verify signature commonAuth := accountCreationAuthAPIToCommon(&apiAuth) - if !commonAuth.VerifySignature(a.chainID, a.hermezAddress) { + if !commonAuth.VerifySignature(a.cg.ChainID, a.hermezAddress) { retBadReq(errors.New("invalid signature"), c) return } diff --git a/api/api.go b/api/api.go index 926bc86..4399534 100644 --- a/api/api.go +++ b/api/api.go @@ -15,7 +15,6 @@ type API struct { h *historydb.HistoryDB cg *configAPI l2 *l2db.L2DB - chainID uint16 hermezAddress ethCommon.Address } @@ -44,9 +43,9 @@ func NewAPI( RollupConstants: *newRollupConstants(consts.Rollup), AuctionConstants: consts.Auction, WDelayerConstants: consts.WDelayer, + ChainID: consts.ChainID, }, l2: l2db, - chainID: consts.ChainID, hermezAddress: consts.HermezAddress, } diff --git a/api/api_test.go b/api/api_test.go index a6ff1a9..7c6c05c 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -215,6 +215,7 @@ func TestMain(m *testing.M) { chainID := uint16(0) _config := getConfigTest(chainID) config = configAPI{ + ChainID: chainID, RollupConstants: *newRollupConstants(_config.RollupConstants), AuctionConstants: _config.AuctionConstants, WDelayerConstants: _config.WDelayerConstants, diff --git a/api/config.go b/api/config.go index 93292c9..7efd056 100644 --- a/api/config.go +++ b/api/config.go @@ -57,6 +57,7 @@ type Config struct { } type configAPI struct { + ChainID uint16 `json:"chainId"` RollupConstants rollupConstants `json:"hermez"` AuctionConstants common.AuctionConstants `json:"auction"` WDelayerConstants common.WDelayerConstants `json:"withdrawalDelayer"` diff --git a/api/swagger.yml b/api/swagger.yml index a1e70e8..04e430b 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -3040,10 +3040,15 @@ components: - maxEmergencyModeTime - hermezRollup additionalProperties: false + chainId: + type: integer + description: Id of the chain + example: 27 required: - hermez - auction - withdrawalDelayer + - chainId additionalProperties: false Error: type: object diff --git a/api/txspool.go b/api/txspool.go index 11cd878..06d5224 100644 --- a/api/txspool.go +++ b/api/txspool.go @@ -187,7 +187,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error { poolTx.TokenID, account.TokenID)) } // Check signature - if !poolTx.VerifySignature(a.chainID, account.BJJ) { + if !poolTx.VerifySignature(a.cg.ChainID, account.BJJ) { return tracerr.Wrap(errors.New("wrong signature")) } return nil From 7b6dd0899ee0dd6f2fc54b1345e0a599ae12e472 Mon Sep 17 00:00:00 2001 From: Mikelle Date: Wed, 24 Mar 2021 22:36:07 +0300 Subject: [PATCH 06/11] added checker and log.warn to updateMethodTypeStatic --- priceupdater/priceupdater.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/priceupdater/priceupdater.go b/priceupdater/priceupdater.go index 7c56bf3..3d8814b 100644 --- a/priceupdater/priceupdater.go +++ b/priceupdater/priceupdater.go @@ -173,6 +173,9 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) { tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr) case UpdateMethodTypeStatic: tokenPrice = token.StaticValue + if tokenPrice == float64(0) { + log.Warn("token price is set to 0. Probably StaticValue is not put in the configuration file") + } case UpdateMethodTypeIgnore: continue } From f9ddf88c934bb512a4a8ed1c4cb28aa7e38cbb24 Mon Sep 17 00:00:00 2001 From: arnaubennassar Date: Wed, 24 Mar 2021 16:41:48 +0100 Subject: [PATCH 07/11] Add configuration option to choose recommended fee strategy, and add static strategy --- api/api_test.go | 9 +++- api/stateapiupdater/stateapiupdater.go | 67 +++++++++++++++++++++++--- cli/node/cfg.buidler.toml | 8 +++ config/config.go | 6 ++- node/node.go | 11 ++++- 5 files changed, 88 insertions(+), 13 deletions(-) diff --git a/api/api_test.go b/api/api_test.go index a6ff1a9..25046fc 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -522,11 +522,16 @@ func TestMain(m *testing.M) { WithdrawalDelay: uint64(3000), } - stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{ + stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{ Rollup: rollupVars, Auction: auctionVars, WDelayer: wdelayerVars, - }, constants) + }, constants, &stateapiupdater.RecommendedFeePolicy{ + PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour, + }) + if err != nil { + panic(err) + } // Generate test data, as expected to be received/sended from/to the API testCoords := genTestCoordinators(commonCoords) diff --git a/api/stateapiupdater/stateapiupdater.go b/api/stateapiupdater/stateapiupdater.go index d8cc325..55eff6f 100644 --- a/api/stateapiupdater/stateapiupdater.go +++ b/api/stateapiupdater/stateapiupdater.go @@ -2,10 +2,12 @@ package stateapiupdater import ( "database/sql" + "fmt" "sync" "github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/db/historydb" + "github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/tracerr" ) @@ -17,11 +19,45 @@ type Updater struct { vars common.SCVariablesPtr consts historydb.Constants rw sync.RWMutex + rfp *RecommendedFeePolicy +} + +// RecommendedFeePolicy describes how the recommended fee is calculated +type RecommendedFeePolicy struct { + PolicyType RecommendedFeePolicyType `validate:"required"` + StaticValue float64 +} + +// RecommendedFeePolicyType describes the different available recommended fee strategies +type RecommendedFeePolicyType string + +const ( + // RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee + RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static" + // RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour + RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour" +) + +func (rfp *RecommendedFeePolicy) valid() bool { + switch rfp.PolicyType { + case RecommendedFeePolicyTypeStatic: + if rfp.StaticValue == 0 { + log.Warn("RcommendedFee is set to 0 USD, and the policy is static") + } + return true + case RecommendedFeePolicyTypeAvgLastHour: + return true + default: + return false + } } // NewUpdater creates a new Updater func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables, - consts *historydb.Constants) *Updater { + consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) { + if ok := rfp.valid(); !ok { + return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType)) + } u := Updater{ hdb: hdb, config: *config, @@ -31,9 +67,10 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co ForgeDelay: config.ForgeDelay, }, }, + rfp: rfp, } u.SetSCVars(vars.AsPtr()) - return &u + return &u, nil } // Store the State in the HistoryDB @@ -65,13 +102,27 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) { // UpdateRecommendedFee update Status.RecommendedFee information func (u *Updater) UpdateRecommendedFee() error { - recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD) - if err != nil { - return tracerr.Wrap(err) + switch u.rfp.PolicyType { + case RecommendedFeePolicyTypeStatic: + u.rw.Lock() + u.state.RecommendedFee = common.RecommendedFee{ + ExistingAccount: u.rfp.StaticValue, + CreatesAccount: u.rfp.StaticValue, + CreatesAccountInternal: u.rfp.StaticValue, + } + u.rw.Unlock() + case RecommendedFeePolicyTypeAvgLastHour: + recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD) + if err != nil { + return tracerr.Wrap(err) + } + u.rw.Lock() + u.state.RecommendedFee = *recommendedFee + u.rw.Unlock() + default: + return tracerr.New("Invalid recommende fee policy") } - u.rw.Lock() - u.state.RecommendedFee = *recommendedFee - u.rw.Unlock() + return nil } diff --git a/cli/node/cfg.buidler.toml b/cli/node/cfg.buidler.toml index f2d561e..e9f0467 100644 --- a/cli/node/cfg.buidler.toml +++ b/cli/node/cfg.buidler.toml @@ -145,3 +145,11 @@ Coordinator = true BatchPath = "/tmp/iden3-test/hermez/batchesdebug" LightScrypt = true # RollupVerifierIndex = 0 + +[RecommendedFeePolicy] +# Strategy used to calculate the recommended fee that the API will expose. +# Available options: +# - Static: always return the same value (StaticValue) in USD +# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour +PolicyType = "Static" +StaticValue = 0.99 \ No newline at end of file diff --git a/config/config.go b/config/config.go index 419617f..0fb0e13 100644 --- a/config/config.go +++ b/config/config.go @@ -8,6 +8,7 @@ import ( "github.com/BurntSushi/toml" ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/hermeznetwork/hermez-node/api/stateapiupdater" "github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/priceupdater" "github.com/hermeznetwork/tracerr" @@ -347,8 +348,9 @@ type Node struct { // can wait to stablish a SQL connection SQLConnectionTimeout Duration } `validate:"required"` - Debug NodeDebug `validate:"required"` - Coordinator Coordinator `validate:"-"` + RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"` + Debug NodeDebug `validate:"required"` + Coordinator Coordinator `validate:"-"` } // APIServer is the api server configuration parameters diff --git a/node/node.go b/node/node.go index 3445933..900ab1c 100644 --- a/node/node.go +++ b/node/node.go @@ -288,7 +288,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) { return nil, tracerr.Wrap(err) } - stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts) + stateAPIUpdater, err := stateapiupdater.NewUpdater( + historyDB, + &hdbNodeCfg, + initSCVars, + &hdbConsts, + &cfg.RecommendedFeePolicy, + ) + if err != nil { + return nil, tracerr.Wrap(err) + } var coord *coordinator.Coordinator if mode == ModeCoordinator { From 9245247ee41864943d8d3168bf5337fb49ed4c19 Mon Sep 17 00:00:00 2001 From: Pantani Date: Thu, 25 Mar 2021 09:31:29 -0300 Subject: [PATCH 08/11] create a gin middleware to collect request metrics and export them to the Prometheus route --- api/api.go | 7 +++++ api/handlers.go | 6 +++- metric/request.go | 78 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 metric/request.go diff --git a/api/api.go b/api/api.go index 926bc86..89f9661 100644 --- a/api/api.go +++ b/api/api.go @@ -7,6 +7,7 @@ import ( "github.com/gin-gonic/gin" "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/l2db" + "github.com/hermeznetwork/hermez-node/metric" "github.com/hermeznetwork/tracerr" ) @@ -50,6 +51,12 @@ func NewAPI( hermezAddress: consts.HermezAddress, } + middleware, err := metric.PrometheusMiddleware() + if err != nil { + return nil, err + } + server.Use(middleware) + v1 := server.Group("/v1") // Add coordinator endpoints diff --git a/api/handlers.go b/api/handlers.go index 6e9a2e9..aeefd04 100644 --- a/api/handlers.go +++ b/api/handlers.go @@ -8,6 +8,7 @@ import ( "github.com/gin-gonic/gin" "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/log" + "github.com/hermeznetwork/hermez-node/metric" "github.com/hermeznetwork/tracerr" "github.com/lib/pq" "github.com/russross/meddler" @@ -46,7 +47,9 @@ var ( func retSQLErr(err error, c *gin.Context) { log.Warnw("HTTP API SQL request error", "err", err) - errMsg := tracerr.Unwrap(err).Error() + unwrapErr := tracerr.Unwrap(err) + metric.CollectError(unwrapErr) + errMsg := unwrapErr.Error() retDupKey := func(errCode pq.ErrorCode) { // https://www.postgresql.org/docs/current/errcodes-appendix.html if errCode == "23505" { @@ -80,6 +83,7 @@ func retSQLErr(err error, c *gin.Context) { func retBadReq(err error, c *gin.Context) { log.Warnw("HTTP API Bad request error", "err", err) + metric.CollectError(err) c.JSON(http.StatusBadRequest, errorMsg{ Message: err.Error(), }) diff --git a/metric/request.go b/metric/request.go new file mode 100644 index 0000000..b9dcf8d --- /dev/null +++ b/metric/request.go @@ -0,0 +1,78 @@ +package metric + +import ( + "strconv" + "time" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + favicon = "/favicon.ico" +) + +// Prometheus contains the metrics gathered by the instance and its path +type Prometheus struct { + reqCnt *prometheus.CounterVec + reqDur *prometheus.HistogramVec +} + +// NewPrometheus generates a new set of metrics with a certain subsystem name +func NewPrometheus() (*Prometheus, error) { + reqCnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespaceAPI, + Name: "requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method", + }, + []string{"code", "method", "path"}, + ) + if err := registerCollector(reqCnt); err != nil { + return nil, err + } + reqDur := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: namespaceAPI, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds", + }, + []string{"code", "method", "path"}, + ) + if err := registerCollector(reqDur); err != nil { + return nil, err + } + return &Prometheus{ + reqCnt: reqCnt, + reqDur: reqDur, + }, nil +} + +// PrometheusMiddleware creates the prometheus collector and +// defines status handler function for the middleware +func PrometheusMiddleware() (gin.HandlerFunc, error) { + p, err := NewPrometheus() + if err != nil { + return nil, err + } + return p.Middleware(), nil +} + +// Middleware defines status handler function for middleware +func (p *Prometheus) Middleware() gin.HandlerFunc { + return func(c *gin.Context) { + if c.Request.URL.Path == favicon { + c.Next() + return + } + start := time.Now() + c.Next() + + status := strconv.Itoa(c.Writer.Status()) + elapsed := float64(time.Since(start)) / float64(time.Second) + fullPath := c.FullPath() + + p.reqDur.WithLabelValues(status, c.Request.Method, fullPath).Observe(elapsed) + p.reqCnt.WithLabelValues(status, c.Request.Method, fullPath).Inc() + } +} From 561f491d5337033a641b13e369be2ad5869dec9c Mon Sep 17 00:00:00 2001 From: Mikelle Date: Thu, 25 Mar 2021 21:42:50 +0300 Subject: [PATCH 09/11] added token symbol to UpdateMethodTypeStatic log --- priceupdater/priceupdater.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/priceupdater/priceupdater.go b/priceupdater/priceupdater.go index 3d8814b..1658633 100644 --- a/priceupdater/priceupdater.go +++ b/priceupdater/priceupdater.go @@ -174,7 +174,8 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) { case UpdateMethodTypeStatic: tokenPrice = token.StaticValue if tokenPrice == float64(0) { - log.Warn("token price is set to 0. Probably StaticValue is not put in the configuration file") + log.Warn("token price is set to 0. Probably StaticValue is not put in the configuration file,", + "token", token.Symbol) } case UpdateMethodTypeIgnore: continue From 4c99640b8c0025b70e1752a98e449082696a5e95 Mon Sep 17 00:00:00 2001 From: arnaubennassar Date: Fri, 26 Mar 2021 16:08:02 +0100 Subject: [PATCH 10/11] Add package level documentation for db, historydb, l2db and api/... --- api/api.go | 35 +++++++++++++++++++++++--- api/apitypes/apitypes.go | 8 ++++++ api/stateapiupdater/stateapiupdater.go | 7 ++++++ db/historydb/historydb.go | 21 ++++++++++++++++ db/l2db/l2db.go | 17 +++++++++++++ db/utils.go | 7 ++++++ 6 files changed, 92 insertions(+), 3 deletions(-) diff --git a/api/api.go b/api/api.go index 926bc86..2660c6e 100644 --- a/api/api.go +++ b/api/api.go @@ -1,3 +1,27 @@ +/* +Package api implements the public interface of the hermez-node using a HTTP REST API. +There are two subsets of endpoints: +- coordinatorEndpoints: used to receive L2 transactions and account creation authorizations. Targeted for wallets. +- explorerEndpoints: used to provide all sorts of information about the network. Targeted for explorers and similar services. + +About the configuration of the API: +- The API is supposed to be launched using the cli found at the package cli/node, and configured through the configuration file. +- The mentioned configuration file allows exposing any combination of the endpoint subsets. +- Although the API can run in a "standalone" manner using the serveapi command, it won't work properly +unless another process acting as a coord or sync is filling the HistoryDB. + +Design principles and considerations: +- In order to decouple the API process from the rest of the node, all the communication between this package and the rest of +the system is done through the SQL database. As a matter of fact, the only public function of the package is the constructor NewAPI. +All the information needed for the API to work should be obtained through the configuration file of the cli or the database. +- The format of the requests / responses doesn't match directly with the common types, and for this reason, the package api/apitypes is used +to facilitate the format conversion. Most of the time, this is done directly at the db level. +- The API endpoints are fully documented using OpenAPI aka Swagger. All the endpoints are tested against the spec to ensure consistency +between implementation and specification. To get a sense of which endpoints exist and how they work, it's strongly recommended to check this specification. +The specification can be found at api/swagger.yml. +- In general, all the API endpoints produce queries to the SQL database in order to retrieve / insert the requested information. The most notable exceptions to this are +the /config endpoint, which returns a static object generated at construction time, and the /state, which also is retrieved from the database, but it's generated by API/stateapiupdater package. +*/ package api import ( @@ -27,7 +51,6 @@ func NewAPI( l2db *l2db.L2DB, ) (*API, error) { // Check input - // TODO: is stateDB only needed for explorer endpoints or for both? if coordinatorEndpoints && l2db == nil { return nil, tracerr.Wrap(errors.New("cannot serve Coordinator endpoints without L2DB")) } @@ -54,7 +77,7 @@ func NewAPI( // Add coordinator endpoints if coordinatorEndpoints { - // Account + // Account creation authorization v1.POST("/account-creation-authorization", a.postAccountCreationAuth) v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth) // Transaction @@ -72,17 +95,23 @@ func NewAPI( // Transaction v1.GET("/transactions-history", a.getHistoryTxs) v1.GET("/transactions-history/:id", a.getHistoryTx) - // Status + // Batches v1.GET("/batches", a.getBatches) v1.GET("/batches/:batchNum", a.getBatch) v1.GET("/full-batches/:batchNum", a.getFullBatch) + // Slots v1.GET("/slots", a.getSlots) v1.GET("/slots/:slotNum", a.getSlot) + // Bids v1.GET("/bids", a.getBids) + // State v1.GET("/state", a.getState) + // Config v1.GET("/config", a.getConfig) + // Tokens v1.GET("/tokens", a.getTokens) v1.GET("/tokens/:id", a.getToken) + // Coordinators v1.GET("/coordinators", a.getCoordinators) } diff --git a/api/apitypes/apitypes.go b/api/apitypes/apitypes.go index b9e25e9..d74ddc7 100644 --- a/api/apitypes/apitypes.go +++ b/api/apitypes/apitypes.go @@ -1,3 +1,11 @@ +/* +Package apitypes is used to map the common types used across the node with the format expected by the API. + +This is done using different strategies: +- Marshallers: they get triggered when the API marshals the response structs into JSONs +- Scanners/Valuers: they get triggered when a struct is sent/received to/from the SQL database +- Adhoc functions: when the already mentioned strategies are not suitable, functions are added to the structs to facilitate the conversions +*/ package apitypes import ( diff --git a/api/stateapiupdater/stateapiupdater.go b/api/stateapiupdater/stateapiupdater.go index 55eff6f..d9d68af 100644 --- a/api/stateapiupdater/stateapiupdater.go +++ b/api/stateapiupdater/stateapiupdater.go @@ -1,3 +1,10 @@ +/* +Package stateapiupdater is responsible for generating and storing the object response of the GET /state endpoint exposed through the api package. +This object is extensively defined at the OpenAPI spec located at api/swagger.yml. + +Deployment considerations: in a setup where multiple processes are used (dedicated api process, separated coord / sync, ...), only one process should care +of using this package. +*/ package stateapiupdater import ( diff --git a/db/historydb/historydb.go b/db/historydb/historydb.go index 46581ba..077ade9 100644 --- a/db/historydb/historydb.go +++ b/db/historydb/historydb.go @@ -1,3 +1,24 @@ +/* +Package historydb is responsible for storing and retrieving the historic data of the Hermez network. +It's mostly but not exclusively used by the API and the synchronizer. + +Apart from the logic defined in this package, it's important to notice that there are some triggers defined in the +migration files that have to be taken into consideration to understanding the results of some queries. This is especially true +for reorgs: all the data is directly or indirectly related to a block, this makes handling reorgs as easy as deleting the +reorged blocks from the block table, and all related items will be dropped in cascade. This is not the only case, in general +functions defined in this package that get affected somehow by the SQL level defined logic has a special mention on the function description. + +Some of the database tooling used in this package such as meddler and migration tools is explained in the db package. + +This package is spitted in different files following these ideas: +- historydb.go: constructor and functions used by packages other than the api. +- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore +to restrict the maximum concurrent connections to the database. +- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however +most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables. +In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats. +- nodeinfo.go: used to handle the interfaces and structs that allow communication across running in different machines/process but sharing the same database. +*/ package historydb import ( diff --git a/db/l2db/l2db.go b/db/l2db/l2db.go index ef602b9..cddc1ec 100644 --- a/db/l2db/l2db.go +++ b/db/l2db/l2db.go @@ -1,3 +1,20 @@ +/* +Package l2db is responsible for storing and retrieving the data received by the coordinator through the api. +Note that this data will be different for each coordinator in the network, as this represents the L2 information. + +The data managed by this package is fundamentally PoolL2Tx and AccountCreationAuth. All this data come from +the API sent by clients and is used by the txselector to decide which transactions are selected to forge a batch. + +Some of the database tooling used in this package such as meddler and migration tools is explained in the db package. + +This package is spitted in different files following these ideas: +- l2db.go: constructor and functions used by packages other than the api. +- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore +to restrict the maximum concurrent connections to the database. +- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however +most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables. +In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats. +*/ package l2db import ( diff --git a/db/utils.go b/db/utils.go index f7bdcc5..b3cd335 100644 --- a/db/utils.go +++ b/db/utils.go @@ -1,3 +1,10 @@ +/* +Package db have some common utilities shared by db/l2db and db/historydb, the most relevant ones are: +- SQL connection utilities +- Managing the SQL schema: this is done using migration files placed under db/migrations. The files are executed by +order of the file name. +- Custom meddlers: used to easily transform struct <==> table +*/ package db import ( From 6f1a44df027de033f02a57bccf9813dc3296a274 Mon Sep 17 00:00:00 2001 From: arnaucube Date: Tue, 30 Mar 2021 13:24:40 +0200 Subject: [PATCH 11/11] TxSel avoid L1CoordTx for L2Tx that have a L1UserTx in the frozen queue For the L2Txs of TransferToEthAddr & TransferToBJJ for a not-yet existing accounts, in the TxSelector check if L2Tx receiver account will be created by a L1UserFrozenTxs (in the next batch, the current frozen queue). In that case, the L2Tx will be discarded at the current batch, even if there is an AccountCreationAuth for the account, as there is a L1UserTx in the frozen queue that will create the receiver Account. The L2Tx is discarded to avoid the Coordinator creating a new L1CoordinatorTx to create the receiver account, which will be also created in the next batch from the L1UserFrozenTx, ending with the user having 2 different accounts for the same TokenID. The double account creation is supported by the Hermez zkRollup specification, but it was decided to mitigate it at the TxSelector level for the explained cases. --- common/l1tx.go | 2 +- coordinator/pipeline.go | 19 +++- db/historydb/historydb.go | 18 ++++ db/historydb/historydb_test.go | 33 +++++-- test/zkproof/flows_test.go | 14 +-- txselector/txselector.go | 71 ++++++++++---- txselector/txselector_test.go | 171 +++++++++++++++++++++++++++------ 7 files changed, 264 insertions(+), 64 deletions(-) diff --git a/common/l1tx.go b/common/l1tx.go index 1a2ec43..69a572d 100644 --- a/common/l1tx.go +++ b/common/l1tx.go @@ -22,7 +22,7 @@ type L1Tx struct { // - L1UserTx: 0 // - L1CoordinatorTx: 1 TxID TxID `meddler:"id"` - // ToForgeL1TxsNum indicates in which the tx was forged / will be forged + // ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` Position int `meddler:"position"` // UserOrigin is set to true if the tx was originated by a user, false if it was diff --git a/coordinator/pipeline.go b/coordinator/pipeline.go index 939815e..5c75577 100644 --- a/coordinator/pipeline.go +++ b/coordinator/pipeline.go @@ -523,15 +523,30 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, if err != nil { return nil, nil, tracerr.Wrap(err) } + // l1UserFutureTxs are the l1UserTxs that are not being forged + // in the next batch, but that are also in the queue for the + // future batches + l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1) + if err != nil { + return nil, nil, tracerr.Wrap(err) + } + coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err = - p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs) + p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs) if err != nil { return nil, nil, tracerr.Wrap(err) } } else { + // get l1UserFutureTxs which are all the l1 pending in all the + // queues + l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd + if err != nil { + return nil, nil, tracerr.Wrap(err) + } + // 2b: only L2 txs coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = - p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig) + p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs) if err != nil { return nil, nil, tracerr.Wrap(err) } diff --git a/db/historydb/historydb.go b/db/historydb/historydb.go index 46581ba..804a21d 100644 --- a/db/historydb/historydb.go +++ b/db/historydb/historydb.go @@ -751,6 +751,24 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) } +// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch +// with toForgeL1TxsNum (in one of the future batches, not in the next one). +func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) { + var txs []*common.L1Tx + err := meddler.QueryAll( + hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null + `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, + tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, + tx.amount, NULL AS effective_amount, + tx.deposit_amount, NULL AS effective_deposit_amount, + tx.eth_block_num, tx.type, tx.batch_num + FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1 + ORDER BY position;`, + toForgeL1TxsNum, + ) + return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) +} + // GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in // open or frozen queues that are not yet forged) func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) { diff --git a/db/historydb/historydb_test.go b/db/historydb/historydb_test.go index 744da1f..92a8730 100644 --- a/db/historydb/historydb_test.go +++ b/db/historydb/historydb_test.go @@ -699,35 +699,56 @@ func TestGetUnforgedL1UserTxs(t *testing.T) { CreateAccountDeposit(1) B: 5 CreateAccountDeposit(1) C: 5 CreateAccountDeposit(1) D: 5 + > block + > batchL1 > block + + CreateAccountDeposit(1) E: 5 + CreateAccountDeposit(1) F: 5 + > block + ` tc := til.NewContext(uint16(0), 128) blocks, err := tc.GenerateBlocks(set) require.NoError(t, err) // Sanity check - require.Equal(t, 1, len(blocks)) + require.Equal(t, 3, len(blocks)) require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs)) - toForgeL1TxsNum := int64(1) - for i := range blocks { err = historyDB.AddBlockSCData(&blocks[i]) require.NoError(t, err) } - l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum) + l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0) + require.NoError(t, err) + assert.Equal(t, 7, len(l1UserTxs)) + + l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1) require.NoError(t, err) assert.Equal(t, 5, len(l1UserTxs)) assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs) + l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1) + require.NoError(t, err) + assert.Equal(t, 2, len(l1UserTxs)) + count, err := historyDB.GetUnforgedL1UserTxsCount() require.NoError(t, err) - assert.Equal(t, 5, count) + assert.Equal(t, 7, count) - // No l1UserTxs for this toForgeL1TxsNum l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2) require.NoError(t, err) + assert.Equal(t, 2, len(l1UserTxs)) + + l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2) + require.NoError(t, err) + assert.Equal(t, 0, len(l1UserTxs)) + + // No l1UserTxs for this toForgeL1TxsNum + l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3) + require.NoError(t, err) assert.Equal(t, 0, len(l1UserTxs)) } diff --git a/test/zkproof/flows_test.go b/test/zkproof/flows_test.go index 407a15a..c0cf176 100644 --- a/test/zkproof/flows_test.go +++ b/test/zkproof/flows_test.go @@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { } // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum]) // TxSelector select the transactions for the next Batch coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) { // TxSelector select the transactions for the next Batch l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := - txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) + txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) require.NoError(t, err) // BatchBuilder build Batch zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) @@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) { require.NoError(t, err) addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(txprocConfig, nil) + txsel.GetL1L2TxSelection(txprocConfig, nil, nil) require.NoError(t, err) assert.Equal(t, 1, len(coordIdxs)) assert.Equal(t, 0, len(oL1UserTxs)) diff --git a/txselector/txselector.go b/txselector/txselector.go index 1453367..6bc252f 100644 --- a/txselector/txselector.go +++ b/txselector/txselector.go @@ -85,7 +85,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) { // check if CoordinatorAccount for TokenID is already pending to create - if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID, + if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID, txsel.coordAccount.Addr, txsel.coordAccount.BJJ) { return nil, positionL1, nil } @@ -122,11 +122,12 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, // but there is a transactions to them and the authorization of account // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // included in the next batch. -func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx, +func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { metric.GetL2TxSelection.Inc() coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, - discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{}) + discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, + []common.L1Tx{}, l1UserFutureTxs) return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err) } @@ -140,11 +141,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([ // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // included in the next batch. func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, - l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, + l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { metric.GetL1L2TxSelection.Inc() coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, - discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs) + discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs) return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err) } @@ -158,7 +159,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // included in the next batch. func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, - l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, + l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { // WIP.0: the TxSelector is not optimized and will need a redesign. The // current version is implemented in order to have a functional @@ -235,7 +236,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, var validTxs, discardedL2Txs []common.PoolL2Tx l2TxsForgable = sortL2Txs(l2TxsForgable) accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err = - txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), + txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs, l2TxsForgable, validTxs, discardedL2Txs) if err != nil { return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) @@ -249,8 +250,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, var l1CoordinatorTxs2 []common.L1Tx accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err = txsel.processL2Txs(tp, selectionConfig, - len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable, - validTxs, discardedL2Txs) + len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs, + l2TxsNonForgable, validTxs, discardedL2Txs) if err != nil { return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) } @@ -331,8 +332,9 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, } func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, - selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ( - [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { + selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx, + l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx, + []common.PoolL2Tx, []common.PoolL2Tx, error) { var l1CoordinatorTxs []common.L1Tx positionL1 := nL1Txs var accAuths [][]byte @@ -434,7 +436,8 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case validL2Tx, l1CoordinatorTx, accAuth, err := txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, - nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i]) + nL1Txs, l1UserFutureTxs, l1CoordinatorTxs, + positionL1, l2Txs[i]) if err != nil { log.Debugw("txsel.processTxToEthAddrBJJ", "err", err) // Discard L2Tx, and update Info parameter of @@ -574,18 +577,35 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, // l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs // array. func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx, - selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx, - positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx, - *common.AccountCreationAuth, error) { + selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs, + l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) ( + *common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) { // if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a // previous L2Tx in the current process already created a // L1CoordinatorTx of this type, in the DB there still seem that needs // to create a new L1CoordinatorTx, but as is already created, the tx // is valid - if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { + if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { return &l2Tx, nil, nil, nil } + // check if L2Tx receiver account will be created by a L1UserFutureTxs + // (in the next batch, the current frozen queue). In that case, the L2Tx + // will be discarded at the current batch, even if there is an + // AccountCreationAuth for the account, as there is a L1UserTx in the + // frozen queue that will create the receiver Account. The L2Tx is + // discarded to avoid the Coordinator creating a new L1CoordinatorTx to + // create the receiver account, which will be also created in the next + // batch from the L1UserFutureTx, ending with the user having 2 + // different accounts for the same TokenID. The double account creation + // is supported by the Hermez zkRollup specification, but it was decided + // to mitigate it at the TxSelector level for the explained cases. + if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { + return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" + + " receiver account does not exist yet, and there is a L1UserTx that" + + " will create that account in a future batch.") + } + var l1CoordinatorTx *common.L1Tx var accAuth *common.AccountCreationAuth if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr { @@ -688,7 +708,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx, return &l2Tx, l1CoordinatorTx, accAuth, nil } -func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, +func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { for i := 0; i < len(l1CoordinatorTxs); i++ { if l1CoordinatorTxs[i].FromEthAddr == addr && @@ -700,6 +720,23 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common. return false } +func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID, + addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { + for i := 0; i < len(l1UserFutureTxs); i++ { + if l1UserFutureTxs[i].FromEthAddr == addr && + l1UserFutureTxs[i].TokenID == tokenID && + l1UserFutureTxs[i].FromBJJ == bjj { + return true + } + if l1UserFutureTxs[i].FromEthAddr == addr && + l1UserFutureTxs[i].TokenID == tokenID && + common.EmptyBJJComp == bjj { + return true + } + } + return false +} + // sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx { // Sort by absolute fee with SliceStable, so that txs with same diff --git a/txselector/txselector_test.go b/txselector/txselector_test.go index 78e7834..3831b9a 100644 --- a/txselector/txselector_test.go +++ b/txselector/txselector_test.go @@ -182,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:1") l1UserTxs := []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -193,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:2") l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -204,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:3") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 2, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -217,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:4") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -231,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:5") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -245,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { log.Debug("block:0 batch:6") l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -279,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0]) @@ -328,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress())) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, 0, len(accAuths)) @@ -372,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) { assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, []common.Idx{263}, coordIdxs) assert.Equal(t, 0, len(accAuths)) @@ -434,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { } // batch1 l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // 1st TransferToEthAddr expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" @@ -456,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -481,7 +481,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -500,7 +500,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) { // initial PoolExit, which now is valid as B has enough Balance l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1CoordTxs)) @@ -550,7 +550,7 @@ func TestTransferToBjj(t *testing.T) { // batch1 to freeze L1UserTxs that will create some accounts with // positive balance l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // Transfer is ToBJJ to a BJJ-only account that doesn't exist @@ -568,7 +568,7 @@ func TestTransferToBjj(t *testing.T) { l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 4, len(oL1UserTxs)) // We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx @@ -595,7 +595,7 @@ func TestTransferToBjj(t *testing.T) { l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) // Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs @@ -623,7 +623,7 @@ func TestTransferToBjj(t *testing.T) { l1UserTxs = []common.L1Tx{} _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 0, len(oL1UserTxs)) // We expect the coordinator to add an L1CoordTx to create an account @@ -678,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // 8 transfers from the same account @@ -710,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) { // transfers from account A l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -760,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) batchPoolL2 := ` @@ -794,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { // select L1 & L2 txs _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -809,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { // batch 3 l1UserTxs = []common.L1Tx{} _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -825,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) { // batch 4 l1UserTxs = []common.L1Tx{} _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -873,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) - // 8 transfers from the same account + // 3 transfers from the same account batchPoolL2 := ` Type: PoolL2 PoolTransfer(0) A-B: 10 (126) @@ -889,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) { // add the PoolL2Txs to the l2DB addL2Txs(t, txsel, poolL2Txs) - // batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A + // batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) assert.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -968,7 +968,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { } // batch1 to freeze L1UserTxs l1UserTxs := []common.L1Tx{} - _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) // batch 2 to crate the accounts (from L1UserTxs) @@ -976,7 +976,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { // select L1 & L2 txs _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 0, len(oL1CoordTxs)) @@ -1014,7 +1014,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { addL2Txs(t, txsel, poolL2Txs) l1UserTxs = []common.L1Tx{} _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -1029,7 +1029,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { // batch 4. In this Batch, account B has enough balance to send the txs _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = - txsel.GetL1L2TxSelection(tpc, l1UserTxs) + txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) require.NoError(t, err) require.Equal(t, 0, len(oL1UserTxs)) @@ -1038,3 +1038,112 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) { require.Equal(t, 3, len(discardedL2Txs)) require.Equal(t, 0, len(accAuths)) } + +func TestL1UserFutureTxs(t *testing.T) { + set := ` + Type: Blockchain + + CreateAccountDeposit(0) Coord: 0 + CreateAccountDeposit(0) A: 100 + + > batchL1 // freeze L1User{2} + CreateAccountDeposit(0) B: 18 + > batchL1 // forge L1User{2}, freeze L1User{1} + > batchL1 // forge L1User{1} + > block + ` + + chainID := uint16(0) + tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) + blocks, err := tc.GenerateBlocks(set) + assert.NoError(t, err) + + hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") + txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) + + // restart nonces of TilContext, as will be set by generating directly + // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs + tc.RestartNonces() + + tpc := txprocessor.Config{ + NLevels: 16, + MaxFeeTx: 10, + MaxTx: 10, + MaxL1Tx: 10, + ChainID: chainID, + } + // batch1 to freeze L1UserTxs + l1UserTxs := []common.L1Tx{} + l1UserFutureTxs := []common.L1Tx{} + _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs) + require.NoError(t, err) + + batchPoolL2 := ` + Type: PoolL2 + PoolTransferToEthAddr(0) A-B: 10 (126) + ` + poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2) + require.NoError(t, err) + require.Equal(t, 1, len(poolL2Txs)) + + // add AccountCreationAuth for B + _ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B") + // add the PoolL2Txs to the l2DB + addL2Txs(t, txsel, poolL2Txs) + // batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A + l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) + l1UserFutureTxs = + til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) + require.Equal(t, 2, len(l1UserTxs)) + require.Equal(t, 1, len(l1UserFutureTxs)) + _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := + txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs) + require.NoError(t, err) + assert.Equal(t, 2, len(oL1UserTxs)) + require.Equal(t, 0, len(oL1CoordTxs)) + // no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed + // at the next batch once the L1UserTx of CreateAccount B is processed, + // despite that there is an AccountCreationAuth for Account B. + assert.Equal(t, 0, len(oL2Txs)) + assert.Equal(t, 1, len(discardedL2Txs)) + assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+ + " discarded at the current batch, as the receiver account does"+ + " not exist yet, and there is a L1UserTx that will create that"+ + " account in a future batch.", + discardedL2Txs[0].Info) + + err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), + txsel.localAccountsDB.CurrentBatch()) + require.NoError(t, err) + + l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) + l1UserFutureTxs = []common.L1Tx{} + _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = + txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs) + require.NoError(t, err) + assert.Equal(t, 1, len(oL1UserTxs)) + require.Equal(t, 0, len(oL1CoordTxs)) + // L2Tx selected as now the L1UserTx of CreateAccount B is processed + assert.Equal(t, 1, len(oL2Txs)) + assert.Equal(t, 0, len(discardedL2Txs)) + err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), + txsel.localAccountsDB.CurrentBatch()) + require.NoError(t, err) + + // generate a new L2Tx A-B and check that is processed + poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2) + require.NoError(t, err) + require.Equal(t, 1, len(poolL2Txs)) + // add the PoolL2Txs to the l2DB + addL2Txs(t, txsel, poolL2Txs) + _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = + txsel.GetL1L2TxSelection(tpc, nil, nil) + require.NoError(t, err) + assert.Equal(t, 0, len(oL1UserTxs)) + require.Equal(t, 0, len(oL1CoordTxs)) + assert.Equal(t, 1, len(oL2Txs)) + assert.Equal(t, 0, len(discardedL2Txs)) + err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), + txsel.localAccountsDB.CurrentBatch()) + require.NoError(t, err) +}