Compare commits

..

1 Commits

Author SHA1 Message Date
Oleksandr Brezhniev
5aa2b0e977 Faster synchronization with asynchronous deletion of old checkpoints 2021-03-30 15:00:27 +03:00
32 changed files with 333 additions and 842 deletions

View File

@@ -1,29 +0,0 @@
name: goreleaser
on:
push:
tags:
- '*'
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

3
.gitignore vendored
View File

@@ -1,2 +1 @@
bin/ bin/
dist/

View File

@@ -1,35 +0,0 @@
before:
hooks:
- go mod download
- make migration-pack
builds:
- main: ./cli/node/main.go
binary: node
id: node
goos:
- linux
- darwin
goarch:
- amd64
archives:
- replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

View File

@@ -3,8 +3,8 @@
# Project variables. # Project variables.
PACKAGE := github.com/hermeznetwork/hermez-node PACKAGE := github.com/hermeznetwork/hermez-node
VERSION := $(shell git describe --tags --always) VERSION := $(shell git describe --tags --always)
COMMIT := $(shell git rev-parse --short HEAD) BUILD := $(shell git rev-parse --short HEAD)
DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z) BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
PROJECT_NAME := $(shell basename "$(PWD)") PROJECT_NAME := $(shell basename "$(PWD)")
# Go related variables. # Go related variables.
@@ -23,7 +23,7 @@ CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
POSTGRES_PASS ?= yourpasswordhere POSTGRES_PASS ?= yourpasswordhere
# Use linker flags to provide version/build settings. # Use linker flags to provide version/build settings.
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE)" LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
# PID file will keep the process id of the server. # PID file will keep the process id of the server.
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
@@ -94,11 +94,11 @@ install:
@echo " > Checking if there is any missing dependencies..." @echo " > Checking if there is any missing dependencies..."
$(GOENVVARS) go get $(GOCMD)/... $(get) $(GOENVVARS) go get $(GOCMD)/... $(get)
## run-node: Run Hermez node. ## run: Run Hermez node.
run-node: run:
@bash -c "$(MAKE) clean build" @bash -c "$(MAKE) clean build"
@echo " > Running $(PROJECT_NAME)" @echo " > Running $(PROJECT_NAME)"
@$(GOBIN)/$(GOBINARY) run --mode $(MODE) --cfg $(CONFIG) @$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
## run-proof-mock: Run proof server mock API. ## run-proof-mock: Run proof server mock API.
run-proof-mock: stop-proof-mock run-proof-mock: stop-proof-mock

View File

@@ -25,13 +25,13 @@ there are more information about the config file into [cli/node/README.md](cli/n
After setting the config, you can build and run the Hermez Node as a synchronizer: After setting the config, you can build and run the Hermez Node as a synchronizer:
```shell ```shell
$ make run-node $ make run
``` ```
Or build and run as a coordinator, and also passing the config file from other location: Or build and run as a coordinator, and also passing the config file from other location:
```shell ```shell
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run-node $ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
``` ```
To check the useful make commands: To check the useful make commands:

View File

@@ -7,7 +7,6 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -51,12 +50,6 @@ func NewAPI(
hermezAddress: consts.HermezAddress, hermezAddress: consts.HermezAddress,
} }
middleware, err := metric.PrometheusMiddleware()
if err != nil {
return nil, err
}
server.Use(middleware)
v1 := server.Group("/v1") v1 := server.Group("/v1")
// Add coordinator endpoints // Add coordinator endpoints

View File

@@ -522,16 +522,11 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000), WithdrawalDelay: uint64(3000),
} }
stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{ stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars, Rollup: rollupVars,
Auction: auctionVars, Auction: auctionVars,
WDelayer: wdelayerVars, WDelayer: wdelayerVars,
}, constants, &stateapiupdater.RecommendedFeePolicy{ }, constants)
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
})
if err != nil {
panic(err)
}
// Generate test data, as expected to be received/sended from/to the API // Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords) testCoords := genTestCoordinators(commonCoords)

View File

@@ -8,7 +8,6 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/russross/meddler" "github.com/russross/meddler"
@@ -47,9 +46,7 @@ var (
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
unwrapErr := tracerr.Unwrap(err) errMsg := tracerr.Unwrap(err).Error()
metric.CollectError(unwrapErr)
errMsg := unwrapErr.Error()
retDupKey := func(errCode pq.ErrorCode) { retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html // https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" { if errCode == "23505" {
@@ -83,7 +80,6 @@ func retSQLErr(err error, c *gin.Context) {
func retBadReq(err error, c *gin.Context) { func retBadReq(err error, c *gin.Context) {
log.Warnw("HTTP API Bad request error", "err", err) log.Warnw("HTTP API Bad request error", "err", err)
metric.CollectError(err)
c.JSON(http.StatusBadRequest, errorMsg{ c.JSON(http.StatusBadRequest, errorMsg{
Message: err.Error(), Message: err.Error(),
}) })

View File

@@ -2,12 +2,10 @@ package stateapiupdater
import ( import (
"database/sql" "database/sql"
"fmt"
"sync" "sync"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -19,45 +17,11 @@ type Updater struct {
vars common.SCVariablesPtr vars common.SCVariablesPtr
consts historydb.Constants consts historydb.Constants
rw sync.RWMutex rw sync.RWMutex
rfp *RecommendedFeePolicy
}
// RecommendedFeePolicy describes how the recommended fee is calculated
type RecommendedFeePolicy struct {
PolicyType RecommendedFeePolicyType `validate:"required"`
StaticValue float64
}
// RecommendedFeePolicyType describes the different available recommended fee strategies
type RecommendedFeePolicyType string
const (
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
)
func (rfp *RecommendedFeePolicy) valid() bool {
switch rfp.PolicyType {
case RecommendedFeePolicyTypeStatic:
if rfp.StaticValue == 0 {
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
}
return true
case RecommendedFeePolicyTypeAvgLastHour:
return true
default:
return false
}
} }
// NewUpdater creates a new Updater // NewUpdater creates a new Updater
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables, func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) { consts *historydb.Constants) *Updater {
if ok := rfp.valid(); !ok {
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
}
u := Updater{ u := Updater{
hdb: hdb, hdb: hdb,
config: *config, config: *config,
@@ -67,10 +31,9 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
ForgeDelay: config.ForgeDelay, ForgeDelay: config.ForgeDelay,
}, },
}, },
rfp: rfp,
} }
u.SetSCVars(vars.AsPtr()) u.SetSCVars(vars.AsPtr())
return &u, nil return &u
} }
// Store the State in the HistoryDB // Store the State in the HistoryDB
@@ -102,27 +65,13 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
// UpdateRecommendedFee update Status.RecommendedFee information // UpdateRecommendedFee update Status.RecommendedFee information
func (u *Updater) UpdateRecommendedFee() error { func (u *Updater) UpdateRecommendedFee() error {
switch u.rfp.PolicyType { recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
case RecommendedFeePolicyTypeStatic: if err != nil {
u.rw.Lock() return tracerr.Wrap(err)
u.state.RecommendedFee = common.RecommendedFee{
ExistingAccount: u.rfp.StaticValue,
CreatesAccount: u.rfp.StaticValue,
CreatesAccountInternal: u.rfp.StaticValue,
}
u.rw.Unlock()
case RecommendedFeePolicyTypeAvgLastHour:
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
default:
return tracerr.New("Invalid recommende fee policy")
} }
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil return nil
} }

View File

@@ -73,9 +73,6 @@ of the node configuration. Please, check the `type APIServer` at
monitor the size of the folder to avoid running out of space. monitor the size of the folder to avoid running out of space.
- The node requires a PostgreSQL database. The parameters of the server and - The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section. database must be set in the `PostgreSQL` section.
- The node requires a web3 RPC server to work. The node has only been tested
with geth and may not work correctly with other ethereum nodes
implementations.
## Building ## Building

View File

@@ -35,7 +35,7 @@ Symbol = "SUSHI"
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2" Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
[Debug] [Debug]
APIAddress = "0.0.0.0:12345" APIAddress = "localhost:12345"
MeddlerLogs = true MeddlerLogs = true
GinDebugMode = true GinDebugMode = true
@@ -145,11 +145,3 @@ Coordinator = true
BatchPath = "/tmp/iden3-test/hermez/batchesdebug" BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
LightScrypt = true LightScrypt = true
# RollupVerifierIndex = 0 # RollupVerifierIndex = 0
[RecommendedFeePolicy]
# Strategy used to calculate the recommended fee that the API will expose.
# Available options:
# - Static: always return the same value (StaticValue) in USD
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
PolicyType = "Static"
StaticValue = 0.99

View File

@@ -35,18 +35,18 @@ const (
) )
var ( var (
// version represents the program based on the git tag // Version represents the program based on the git tag
version = "v0.1.0" Version = "v0.1.0"
// commit represents the program based on the git commit // Build represents the program based on the git commit
commit = "dev" Build = "dev"
// date represents the date of application was built // Date represents the date of application was built
date = "" Date = ""
) )
func cmdVersion(c *cli.Context) error { func cmdVersion(c *cli.Context) error {
fmt.Printf("Version = \"%v\"\n", version) fmt.Printf("Version = \"%v\"\n", Version)
fmt.Printf("Build = \"%v\"\n", commit) fmt.Printf("Build = \"%v\"\n", Build)
fmt.Printf("Date = \"%v\"\n", date) fmt.Printf("Date = \"%v\"\n", Date)
return nil return nil
} }
@@ -421,7 +421,7 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
func main() { func main() {
app := cli.NewApp() app := cli.NewApp()
app.Name = "hermez-node" app.Name = "hermez-node"
app.Version = version app.Version = Version
flags := []cli.Flag{ flags := []cli.Flag{
&cli.StringFlag{ &cli.StringFlag{
Name: flagMode, Name: flagMode,

View File

@@ -22,7 +22,7 @@ type L1Tx struct {
// - L1UserTx: 0 // - L1UserTx: 0
// - L1CoordinatorTx: 1 // - L1CoordinatorTx: 1
TxID TxID `meddler:"id"` TxID TxID `meddler:"id"`
// ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged // ToForgeL1TxsNum indicates in which the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
Position int `meddler:"position"` Position int `meddler:"position"`
// UserOrigin is set to true if the tx was originated by a user, false if it was // UserOrigin is set to true if the tx was originated by a user, false if it was

View File

@@ -8,7 +8,6 @@ import (
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/priceupdater" "github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
@@ -300,8 +299,7 @@ type Node struct {
} `validate:"required"` } `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"` PostgreSQL PostgreSQL `validate:"required"`
Web3 struct { Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server. Only // URL is the URL of the web3 ethereum-node RPC server
// geth is officially supported.
URL string `validate:"required"` URL string `validate:"required"`
} `validate:"required"` } `validate:"required"`
Synchronizer struct { Synchronizer struct {
@@ -348,9 +346,8 @@ type Node struct {
// can wait to stablish a SQL connection // can wait to stablish a SQL connection
SQLConnectionTimeout Duration SQLConnectionTimeout Duration
} `validate:"required"` } `validate:"required"`
RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"` Debug NodeDebug `validate:"required"`
Debug NodeDebug `validate:"required"` Coordinator Coordinator `validate:"-"`
Coordinator Coordinator `validate:"-"`
} }
// APIServer is the api server configuration parameters // APIServer is the api server configuration parameters

View File

@@ -80,7 +80,6 @@ type BatchInfo struct {
PipelineNum int PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ProofStart time.Time
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
Proof *prover.Proof Proof *prover.Proof
PublicInputs []*big.Int PublicInputs []*big.Int

View File

@@ -1,43 +1,3 @@
/*
Package coordinator handles all the logic related to forging batches as a
coordinator in the hermez network.
The forging of batches is done with a pipeline in order to allow multiple
batches being forged in parallel. The maximum number of batches that can be
forged in parallel is determined by the number of available proof servers.
The Coordinator begins with the pipeline stopped. The main Coordinator
goroutine keeps listening for synchronizer events sent by the node package,
which allow the coordinator to determine if the configured forger address is
allowed to forge at the current block or not. When the forger address becomes
allowed to forge, the pipeline is started, and when it terminates being allowed
to forge, the pipeline is stopped.
The Pipeline consists of two goroutines. The first one is in charge of
preparing a batch internally, which involves making a selection of transactions
and calculating the ZKInputs for the batch proof, and sending these ZKInputs to
an idle proof server. This goroutine will keep preparing batches while there
are idle proof servers, if the forging policy determines that a batch should be
forged in the current state. The second goroutine is in charge of waiting for
the proof server to finish computing the proof, retreiving it, prepare the
arguments for the `forgeBatch` Rollup transaction, and sending the result to
the TxManager. All the batch information moves between functions and
goroutines via the BatchInfo struct.
Finally, the TxManager contains a single goroutine that makes forgeBatch
ethereum transactions for the batches sent by the Pipeline, and keeps them in a
list to check them periodically. In the periodic checks, the ethereum
transaction is checked for successfulness, and it's only forgotten after a
number of confirmation blocks have passed after being successfully mined. At
any point if a transaction failure is detected, the TxManager can signal the
Coordinator to reset the Pipeline in order to reforge the failed batches.
The Coordinator goroutine acts as a manager. The synchronizer events (which
notify about new blocks and associated new state) that it receives are
broadcasted to the Pipeline and the TxManager. This allows the Coordinator,
Pipeline and TxManager to have a copy of the current hermez network state
required to perform their duties.
*/
package coordinator package coordinator
import ( import (

View File

@@ -5,7 +5,6 @@ import (
"database/sql" "database/sql"
"fmt" "fmt"
"math/big" "math/big"
"strconv"
"sync" "sync"
"time" "time"
@@ -15,7 +14,6 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/prover" "github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/txselector" "github.com/hermeznetwork/hermez-node/txselector"
@@ -248,7 +246,6 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// 3. Send the ZKInputs to the proof server // 3. Send the ZKInputs to the proof server
batchInfo.ServerProof = serverProof batchInfo.ServerProof = serverProof
batchInfo.ProofStart = time.Now()
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil { if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
@@ -523,30 +520,15 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
// l1UserFutureTxs are the l1UserTxs that are not being forged
// in the next batch, but that are also in the queue for the
// future batches
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs) p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
} else { } else {
// get l1UserFutureTxs which are all the l1 pending in all the
// queues
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
// 2b: only L2 txs // 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs) p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -620,9 +602,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
// waitServerProof gets the generated zkProof & sends it to the SmartContract // waitServerProof gets the generated zkProof & sends it to the SmartContract
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error { func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
defer metric.MeasureDuration(metric.WaitServerProof, batchInfo.ProofStart,
batchInfo.BatchNum.BigInt().String(), strconv.Itoa(batchInfo.PipelineNum))
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
// until not resolved don't continue. Returns when the proof server has calculated the proof // until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil { if err != nil {

View File

@@ -751,24 +751,6 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
} }
// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch
// with toForgeL1TxsNum (in one of the future batches, not in the next one).
func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
var txs []*common.L1Tx
err := meddler.QueryAll(
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, NULL AS effective_amount,
tx.deposit_amount, NULL AS effective_deposit_amount,
tx.eth_block_num, tx.type, tx.batch_num
FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1
ORDER BY position;`,
toForgeL1TxsNum,
)
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
}
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in // GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
// open or frozen queues that are not yet forged) // open or frozen queues that are not yet forged)
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) { func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {

View File

@@ -699,55 +699,34 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
CreateAccountDeposit(1) B: 5 CreateAccountDeposit(1) B: 5
CreateAccountDeposit(1) C: 5 CreateAccountDeposit(1) C: 5
CreateAccountDeposit(1) D: 5 CreateAccountDeposit(1) D: 5
> block
> batchL1
> block > block
CreateAccountDeposit(1) E: 5
CreateAccountDeposit(1) F: 5
> block
` `
tc := til.NewContext(uint16(0), 128) tc := til.NewContext(uint16(0), 128)
blocks, err := tc.GenerateBlocks(set) blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err) require.NoError(t, err)
// Sanity check // Sanity check
require.Equal(t, 3, len(blocks)) require.Equal(t, 1, len(blocks))
require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs))
toForgeL1TxsNum := int64(1)
for i := range blocks { for i := range blocks {
err = historyDB.AddBlockSCData(&blocks[i]) err = historyDB.AddBlockSCData(&blocks[i])
require.NoError(t, err) require.NoError(t, err)
} }
l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0) l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum)
require.NoError(t, err)
assert.Equal(t, 7, len(l1UserTxs))
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 5, len(l1UserTxs)) assert.Equal(t, 5, len(l1UserTxs))
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs) assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1)
require.NoError(t, err)
assert.Equal(t, 2, len(l1UserTxs))
count, err := historyDB.GetUnforgedL1UserTxsCount() count, err := historyDB.GetUnforgedL1UserTxsCount()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 7, count) assert.Equal(t, 5, count)
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
require.NoError(t, err)
assert.Equal(t, 2, len(l1UserTxs))
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2)
require.NoError(t, err)
assert.Equal(t, 0, len(l1UserTxs))
// No l1UserTxs for this toForgeL1TxsNum // No l1UserTxs for this toForgeL1TxsNum
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3) l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(l1UserTxs)) assert.Equal(t, 0, len(l1UserTxs))
} }

View File

@@ -49,6 +49,8 @@ type KVDB struct {
CurrentIdx common.Idx CurrentIdx common.Idx
CurrentBatch common.BatchNum CurrentBatch common.BatchNum
m sync.Mutex m sync.Mutex
mutexDelOld sync.Mutex
wg sync.WaitGroup
last *Last last *Last
} }
@@ -444,10 +446,15 @@ func (k *KVDB) MakeCheckpoint() error {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
// delete old checkpoints
if err := k.deleteOldCheckpoints(); err != nil { k.wg.Add(1)
return tracerr.Wrap(err) go func() {
} delErr := k.DeleteOldCheckpoints()
if delErr != nil {
log.Errorw("delete old checkpoints failed", "err", delErr)
}
k.wg.Done()
}()
return nil return nil
} }
@@ -509,9 +516,12 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
return checkpoints, nil return checkpoints, nil
} }
// deleteOldCheckpoints deletes old checkpoints when there are more than // DeleteOldCheckpoints deletes old checkpoints when there are more than
// `s.keep` checkpoints // `s.keep` checkpoints
func (k *KVDB) deleteOldCheckpoints() error { func (k *KVDB) DeleteOldCheckpoints() error {
k.mutexDelOld.Lock()
defer k.mutexDelOld.Unlock()
list, err := k.ListCheckpoints() list, err := k.ListCheckpoints()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -584,4 +594,6 @@ func (k *KVDB) Close() {
if k.last != nil { if k.last != nil {
k.last.close() k.last.close()
} }
// wait for deletion of old checkpoints
k.wg.Wait()
} }

View File

@@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"sync"
"testing" "testing"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
@@ -190,12 +191,67 @@ func TestDeleteOldCheckpoints(t *testing.T) {
for i := 0; i < numCheckpoints; i++ { for i := 0; i < numCheckpoints; i++ {
err = db.MakeCheckpoint() err = db.MakeCheckpoint()
require.NoError(t, err) require.NoError(t, err)
err = db.DeleteOldCheckpoints()
require.NoError(t, err)
checkpoints, err := db.ListCheckpoints() checkpoints, err := db.ListCheckpoints()
require.NoError(t, err) require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep) assert.LessOrEqual(t, len(checkpoints), keep)
} }
} }
func TestConcurrentDeleteOldCheckpoints(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
db, err := NewKVDB(Config{Path: dir, Keep: keep})
require.NoError(t, err)
numCheckpoints := 32
var wg sync.WaitGroup
wg.Add(numCheckpoints)
// do checkpoints and check that we never have more than `keep`
// checkpoints.
// 1 async DeleteOldCheckpoint after 1 MakeCheckpoint
for i := 0; i < numCheckpoints; i++ {
err = db.MakeCheckpoint()
require.NoError(t, err)
go func() {
err = db.DeleteOldCheckpoints()
require.NoError(t, err)
wg.Done()
}()
}
wg.Wait()
checkpoints, err := db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
wg.Add(numCheckpoints)
// do checkpoints and check that we never have more than `keep`
// checkpoints
// 32 concurrent DeleteOldCheckpoint after 32 MakeCheckpoint
for i := 0; i < numCheckpoints; i++ {
err = db.MakeCheckpoint()
require.NoError(t, err)
}
for i := 0; i < numCheckpoints; i++ {
go func() {
err = db.DeleteOldCheckpoints()
require.NoError(t, err)
wg.Done()
}()
}
wg.Wait()
checkpoints, err = db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
}
func TestGetCurrentIdx(t *testing.T) { func TestGetCurrentIdx(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)

View File

@@ -227,6 +227,12 @@ func (s *StateDB) MakeCheckpoint() error {
return s.db.MakeCheckpoint() return s.db.MakeCheckpoint()
} }
// DeleteOldCheckpoints deletes old checkpoints when there are more than
// `cfg.keep` checkpoints
func (s *StateDB) DeleteOldCheckpoints() error {
return s.db.DeleteOldCheckpoints()
}
// CurrentBatch returns the current in-memory CurrentBatch of the StateDB.db // CurrentBatch returns the current in-memory CurrentBatch of the StateDB.db
func (s *StateDB) CurrentBatch() common.BatchNum { func (s *StateDB) CurrentBatch() common.BatchNum {
return s.db.CurrentBatch return s.db.CurrentBatch

View File

@@ -7,6 +7,7 @@ import (
"math/big" "math/big"
"os" "os"
"strings" "strings"
"sync"
"testing" "testing"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
@@ -588,6 +589,48 @@ func TestDeleteOldCheckpoints(t *testing.T) {
for i := 0; i < numCheckpoints; i++ { for i := 0; i < numCheckpoints; i++ {
err = sdb.MakeCheckpoint() err = sdb.MakeCheckpoint()
require.NoError(t, err) require.NoError(t, err)
err := sdb.DeleteOldCheckpoints()
require.NoError(t, err)
checkpoints, err := sdb.db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
}
}
// TestConcurrentDeleteOldCheckpoints performs almost the same test than
// kvdb/kvdb_test.go TestConcurrentDeleteOldCheckpoints, but over the StateDB
func TestConcurrentDeleteOldCheckpoints(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
require.NoError(t, err)
numCheckpoints := 32
// do checkpoints and check that we never have more than `keep`
// checkpoints
for i := 0; i < numCheckpoints; i++ {
err = sdb.MakeCheckpoint()
require.NoError(t, err)
wg := sync.WaitGroup{}
n := 10
wg.Add(n)
for j := 0; j < n; j++ {
go func() {
err := sdb.DeleteOldCheckpoints()
require.NoError(t, err)
checkpoints, err := sdb.db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
wg.Done()
}()
_, err := sdb.db.ListCheckpoints()
// only checking here for absence of errors, not the count of checkpoints
require.NoError(t, err)
}
wg.Wait()
checkpoints, err := sdb.db.ListCheckpoints() checkpoints, err := sdb.db.ListCheckpoints()
require.NoError(t, err) require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep) assert.LessOrEqual(t, len(checkpoints), keep)

View File

@@ -1,192 +0,0 @@
package metric
import (
"time"
"github.com/hermeznetwork/hermez-node/log"
"github.com/prometheus/client_golang/prometheus"
)
type (
// Metric represents the metric type
Metric string
)
const (
namespaceError = "error"
namespaceSync = "synchronizer"
namespaceTxSelector = "txselector"
namespaceAPI = "api"
)
var (
// Errors errors count metric.
Errors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespaceError,
Name: "errors",
Help: "",
}, []string{"error"})
// WaitServerProof duration time to get the calculated
// proof from the server.
WaitServerProof = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespaceSync,
Name: "wait_server_proof",
Help: "",
}, []string{"batch_number", "pipeline_number"})
// Reorgs block reorg count
Reorgs = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceSync,
Name: "reorgs",
Help: "",
})
// LastBlockNum last block synced
LastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "synced_last_block_num",
Help: "",
})
// EthLastBlockNum last eth block synced
EthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "eth_last_block_num",
Help: "",
})
// LastBatchNum last batch synced
LastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "synced_last_batch_num",
Help: "",
})
// EthLastBatchNum last eth batch synced
EthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "eth_last_batch_num",
Help: "",
})
// GetL2TxSelection L2 tx selection count
GetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceTxSelector,
Name: "get_l2_txselection_total",
Help: "",
})
// GetL1L2TxSelection L1L2 tx selection count
GetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceTxSelector,
Name: "get_l1_l2_txselection_total",
Help: "",
})
// SelectedL1CoordinatorTxs selected L1 coordinator tx count
SelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l1_coordinator_txs",
Help: "",
})
// SelectedL1UserTxs selected L1 user tx count
SelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l1_user_txs",
Help: "",
})
// SelectedL2Txs selected L2 tx count
SelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l2_txs",
Help: "",
})
// DiscardedL2Txs discarded L2 tx count
DiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "discarded_l2_txs",
Help: "",
})
)
func init() {
if err := registerCollectors(); err != nil {
log.Error(err)
}
}
func registerCollectors() error {
if err := registerCollector(Errors); err != nil {
return err
}
if err := registerCollector(WaitServerProof); err != nil {
return err
}
if err := registerCollector(Reorgs); err != nil {
return err
}
if err := registerCollector(LastBlockNum); err != nil {
return err
}
if err := registerCollector(LastBatchNum); err != nil {
return err
}
if err := registerCollector(EthLastBlockNum); err != nil {
return err
}
if err := registerCollector(EthLastBatchNum); err != nil {
return err
}
if err := registerCollector(GetL2TxSelection); err != nil {
return err
}
if err := registerCollector(GetL1L2TxSelection); err != nil {
return err
}
if err := registerCollector(SelectedL1CoordinatorTxs); err != nil {
return err
}
if err := registerCollector(SelectedL1UserTxs); err != nil {
return err
}
return registerCollector(DiscardedL2Txs)
}
func registerCollector(collector prometheus.Collector) error {
err := prometheus.Register(collector)
if err != nil {
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
return err
}
}
return nil
}
// MeasureDuration measure the method execution duration
// and save it into a histogram metric
func MeasureDuration(histogram *prometheus.HistogramVec, start time.Time, lvs ...string) {
duration := time.Since(start)
histogram.WithLabelValues(lvs...).Observe(float64(duration.Milliseconds()))
}
// CollectError collect the error message and increment
// the error count
func CollectError(err error) {
Errors.With(map[string]string{"error": err.Error()}).Inc()
}

View File

@@ -1,78 +0,0 @@
package metric
import (
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
)
const (
favicon = "/favicon.ico"
)
// Prometheus contains the metrics gathered by the instance and its path
type Prometheus struct {
reqCnt *prometheus.CounterVec
reqDur *prometheus.HistogramVec
}
// NewPrometheus generates a new set of metrics with a certain subsystem name
func NewPrometheus() (*Prometheus, error) {
reqCnt := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespaceAPI,
Name: "requests_total",
Help: "How many HTTP requests processed, partitioned by status code and HTTP method",
},
[]string{"code", "method", "path"},
)
if err := registerCollector(reqCnt); err != nil {
return nil, err
}
reqDur := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespaceAPI,
Name: "request_duration_seconds",
Help: "The HTTP request latencies in seconds",
},
[]string{"code", "method", "path"},
)
if err := registerCollector(reqDur); err != nil {
return nil, err
}
return &Prometheus{
reqCnt: reqCnt,
reqDur: reqDur,
}, nil
}
// PrometheusMiddleware creates the prometheus collector and
// defines status handler function for the middleware
func PrometheusMiddleware() (gin.HandlerFunc, error) {
p, err := NewPrometheus()
if err != nil {
return nil, err
}
return p.Middleware(), nil
}
// Middleware defines status handler function for middleware
func (p *Prometheus) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
if c.Request.URL.Path == favicon {
c.Next()
return
}
start := time.Now()
c.Next()
status := strconv.Itoa(c.Writer.Status())
elapsed := float64(time.Since(start)) / float64(time.Second)
fullPath := c.FullPath()
p.reqDur.WithLabelValues(status, c.Request.Method, fullPath).Observe(elapsed)
p.reqCnt.WithLabelValues(status, c.Request.Method, fullPath).Inc()
}
}

View File

@@ -1,18 +1,3 @@
/*
Package node does the initialization of all the required objects to either run
as a synchronizer or as a coordinator.
The Node contains several goroutines that run in the background or that
periodically perform tasks. One of this goroutines periodically calls the
`Synchronizer.Sync` function, allowing the synchronization of one block at a
time. After every call to `Synchronizer.Sync`, the Node sends a message to the
Coordinator to notify it about the new synced block (and associated state) or
reorg (and resetted state) in case one happens.
Other goroutines perform tasks such as: updating the token prices, update
metrics stored in the historyDB, update recommended fee stored in the
historyDB, run the http API server, run the debug http API server, etc.
*/
package node package node
import ( import (
@@ -288,16 +273,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
stateAPIUpdater, err := stateapiupdater.NewUpdater( stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
historyDB,
&hdbNodeCfg,
initSCVars,
&hdbConsts,
&cfg.RecommendedFeePolicy,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
if mode == ModeCoordinator { if mode == ModeCoordinator {

44
synchronizer/metrics.go Normal file
View File

@@ -0,0 +1,44 @@
package synchronizer
import "github.com/prometheus/client_golang/prometheus"
var (
metricReorgsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "sync_reorgs",
Help: "",
},
)
metricSyncedLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_block_num",
Help: "",
},
)
metricEthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_block_num",
Help: "",
},
)
metricSyncedLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_batch_num",
Help: "",
},
)
metricEthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_batch_num",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricReorgsCount)
prometheus.MustRegister(metricSyncedLastBlockNum)
prometheus.MustRegister(metricEthLastBlockNum)
prometheus.MustRegister(metricSyncedLastBatchNum)
prometheus.MustRegister(metricEthLastBatchNum)
}

View File

@@ -1,35 +1,3 @@
/*
Package synchronizer synchronizes the hermez network state by querying events
emitted by the three smart contracts: `Hermez.sol` (referred as Rollup here),
`HermezAuctionProtocol.sol` (referred as Auction here) and
`WithdrawalDelayer.sol` (referred as WDelayer here).
The main entry point for synchronization is the `Sync` function, which at most
will synchronize one ethereum block, and all the hermez events that happened in
that block. During a `Sync` call, a reorg can be detected; in such case, uncle
blocks will be discarded, and only in a future `Sync` call correct blocks will
be synced.
The synchronization of the events in each smart contracts are done
in the methods `rollupSync`, `auctionSync` and `wdelayerSync`, which in turn
use the interface code to read each smart contract state and events found in
"github.com/hermeznetwork/hermez-node/eth". After these three methods are
called, an object of type `common.BlockData` is built containing all the
updates and events that happened in that block, and it is inserted in the
HistoryDB in a single SQL transaction.
`rollupSync` is the method that synchronizes batches sent via the `forgeBatch`
transaction in `Hermez.sol`. In `rollupSync`, for every batch, the accounts
state is updated in the StateDB by processing all transactions that have been
forged in that batch.
The consistency of the stored data is guaranteed by the HistoryDB: All the
block information is inserted in a single SQL transaction at the end of the
`Sync` method, once the StateDB has been updated. And every time the
Synchronizer starts, it continues from the last block in the HistoryDB. The
StateDB stores updates organized by checkpoints for every batch, and each batch
is only accessed if it appears in the HistoryDB.
*/
package synchronizer package synchronizer
import ( import (
@@ -47,7 +15,6 @@ import (
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -582,7 +549,6 @@ func (s *Synchronizer) Sync(ctx context.Context,
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
discarded := lastSavedBlock.Num - lastDBBlockNum discarded := lastSavedBlock.Num - lastDBBlockNum
metric.Reorgs.Inc()
return nil, &discarded, nil return nil, &discarded, nil
} }
} }
@@ -675,16 +641,16 @@ func (s *Synchronizer) Sync(ctx context.Context,
} }
for _, batchData := range rollupData.Batches { for _, batchData := range rollupData.Batches {
metric.LastBatchNum.Set(float64(batchData.Batch.BatchNum)) metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
metric.EthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum)) metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
log.Debugw("Synced batch", log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum, "syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatchNum,
) )
} }
metric.LastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num)) metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metric.EthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num)) metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
log.Debugw("Synced block", log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num, "syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(), "syncBlocksPerc", s.stats.blocksPerc(),

View File

@@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
} }
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(txprocConfig, nil, nil) txsel.GetL1L2TxSelection(txprocConfig, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(coordIdxs)) assert.Equal(t, 1, len(coordIdxs))
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))

53
txselector/metrics.go Normal file
View File

@@ -0,0 +1,53 @@
package txselector
import "github.com/prometheus/client_golang/prometheus"
var (
metricGetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l2_txselecton_total",
Help: "",
},
)
metricGetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l1_l2_txselecton_total",
Help: "",
},
)
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_coordinator_txs",
Help: "",
},
)
metricSelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_user_txs",
Help: "",
},
)
metricSelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l2_txs",
Help: "",
},
)
metricDiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_discarded_l2_txs",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricGetL2TxSelection)
prometheus.MustRegister(metricGetL1L2TxSelection)
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
prometheus.MustRegister(metricSelectedL1UserTxs)
prometheus.MustRegister(metricSelectedL2Txs)
prometheus.MustRegister(metricDiscardedL2Txs)
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
@@ -85,7 +84,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error)
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) { tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
// check if CoordinatorAccount for TokenID is already pending to create // check if CoordinatorAccount for TokenID is already pending to create
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID, if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID,
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) { txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
return nil, positionL1, nil return nil, positionL1, nil
} }
@@ -122,12 +121,11 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
// but there is a transactions to them and the authorization of account // but there is a transactions to them and the authorization of account
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx, func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx,
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metric.GetL2TxSelection.Inc() metricGetL2TxSelection.Inc()
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{})
[]common.L1Tx{}, l1UserFutureTxs)
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err) discardedL2Txs, tracerr.Wrap(err)
} }
@@ -141,11 +139,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metric.GetL1L2TxSelection.Inc() metricGetL1L2TxSelection.Inc()
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs) discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err) discardedL2Txs, tracerr.Wrap(err)
} }
@@ -159,7 +157,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
// WIP.0: the TxSelector is not optimized and will need a redesign. The // WIP.0: the TxSelector is not optimized and will need a redesign. The
// current version is implemented in order to have a functional // current version is implemented in order to have a functional
@@ -223,11 +221,10 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
} }
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs))) metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metric.SelectedL1CoordinatorTxs.Set(0) metricSelectedL1CoordinatorTxs.Set(0)
metric.SelectedL2Txs.Set(0) metricSelectedL2Txs.Set(0)
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs))) metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
} }
@@ -236,7 +233,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
var validTxs, discardedL2Txs []common.PoolL2Tx var validTxs, discardedL2Txs []common.PoolL2Tx
l2TxsForgable = sortL2Txs(l2TxsForgable) l2TxsForgable = sortL2Txs(l2TxsForgable)
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err = accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs, txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs),
l2TxsForgable, validTxs, discardedL2Txs) l2TxsForgable, validTxs, discardedL2Txs)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
@@ -250,8 +247,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
var l1CoordinatorTxs2 []common.L1Tx var l1CoordinatorTxs2 []common.L1Tx
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err = accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig, txsel.processL2Txs(tp, selectionConfig,
len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs, len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable,
l2TxsNonForgable, validTxs, discardedL2Txs) validTxs, discardedL2Txs)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
} }
@@ -323,18 +320,17 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
} }
metric.SelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs))) metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs))) metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metric.SelectedL2Txs.Set(float64(len(validTxs))) metricSelectedL2Txs.Set(float64(len(validTxs)))
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs))) metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
} }
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor, func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx, selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) (
l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx, [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
[]common.PoolL2Tx, []common.PoolL2Tx, error) {
var l1CoordinatorTxs []common.L1Tx var l1CoordinatorTxs []common.L1Tx
positionL1 := nL1Txs positionL1 := nL1Txs
var accAuths [][]byte var accAuths [][]byte
@@ -436,8 +432,7 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
validL2Tx, l1CoordinatorTx, accAuth, err := validL2Tx, l1CoordinatorTx, accAuth, err :=
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
nL1Txs, l1UserFutureTxs, l1CoordinatorTxs, nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i])
positionL1, l2Txs[i])
if err != nil { if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err) log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
@@ -577,35 +572,18 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs // l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
// array. // array.
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx, func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs, selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx,
l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) ( positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx,
*common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) { *common.AccountCreationAuth, error) {
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a // if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
// previous L2Tx in the current process already created a // previous L2Tx in the current process already created a
// L1CoordinatorTx of this type, in the DB there still seem that needs // L1CoordinatorTx of this type, in the DB there still seem that needs
// to create a new L1CoordinatorTx, but as is already created, the tx // to create a new L1CoordinatorTx, but as is already created, the tx
// is valid // is valid
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
return &l2Tx, nil, nil, nil return &l2Tx, nil, nil, nil
} }
// check if L2Tx receiver account will be created by a L1UserFutureTxs
// (in the next batch, the current frozen queue). In that case, the L2Tx
// will be discarded at the current batch, even if there is an
// AccountCreationAuth for the account, as there is a L1UserTx in the
// frozen queue that will create the receiver Account. The L2Tx is
// discarded to avoid the Coordinator creating a new L1CoordinatorTx to
// create the receiver account, which will be also created in the next
// batch from the L1UserFutureTx, ending with the user having 2
// different accounts for the same TokenID. The double account creation
// is supported by the Hermez zkRollup specification, but it was decided
// to mitigate it at the TxSelector level for the explained cases.
if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" +
" receiver account does not exist yet, and there is a L1UserTx that" +
" will create that account in a future batch.")
}
var l1CoordinatorTx *common.L1Tx var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth var accAuth *common.AccountCreationAuth
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr { if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
@@ -708,7 +686,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
return &l2Tx, l1CoordinatorTx, accAuth, nil return &l2Tx, l1CoordinatorTx, accAuth, nil
} }
func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ { for i := 0; i < len(l1CoordinatorTxs); i++ {
if l1CoordinatorTxs[i].FromEthAddr == addr && if l1CoordinatorTxs[i].FromEthAddr == addr &&
@@ -720,23 +698,6 @@ func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID commo
return false return false
} }
func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1UserFutureTxs); i++ {
if l1UserFutureTxs[i].FromEthAddr == addr &&
l1UserFutureTxs[i].TokenID == tokenID &&
l1UserFutureTxs[i].FromBJJ == bjj {
return true
}
if l1UserFutureTxs[i].FromEthAddr == addr &&
l1UserFutureTxs[i].TokenID == tokenID &&
common.EmptyBJJComp == bjj {
return true
}
}
return false
}
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce // sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx { func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
// Sort by absolute fee with SliceStable, so that txs with same // Sort by absolute fee with SliceStable, so that txs with same

View File

@@ -182,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:1") log.Debug("block:0 batch:1")
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -193,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:2") log.Debug("block:0 batch:2")
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -204,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:3") log.Debug("block:0 batch:3")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 2, len(oL1UserTxs)) assert.Equal(t, 2, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -217,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:4") log.Debug("block:0 batch:4")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -231,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:5") log.Debug("block:0 batch:5")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -245,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:6") log.Debug("block:0 batch:6")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -279,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0]) assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
@@ -328,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress())) assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, 0, len(accAuths)) assert.Equal(t, 0, len(accAuths))
@@ -372,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []common.Idx{263}, coordIdxs) assert.Equal(t, []common.Idx{263}, coordIdxs)
assert.Equal(t, 0, len(accAuths)) assert.Equal(t, 0, len(accAuths))
@@ -434,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
} }
// batch1 // batch1
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// 1st TransferToEthAddr // 1st TransferToEthAddr
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965"
@@ -456,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 3, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -481,7 +481,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -500,7 +500,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
// initial PoolExit, which now is valid as B has enough Balance // initial PoolExit, which now is valid as B has enough Balance
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -550,7 +550,7 @@ func TestTransferToBjj(t *testing.T) {
// batch1 to freeze L1UserTxs that will create some accounts with // batch1 to freeze L1UserTxs that will create some accounts with
// positive balance // positive balance
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist // Transfer is ToBJJ to a BJJ-only account that doesn't exist
@@ -568,7 +568,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs)) assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx // We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
@@ -595,7 +595,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs // Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
@@ -623,7 +623,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account // We expect the coordinator to add an L1CoordTx to create an account
@@ -678,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// 8 transfers from the same account // 8 transfers from the same account
@@ -710,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
// transfers from account A // transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -760,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
batchPoolL2 := ` batchPoolL2 := `
@@ -794,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// select L1 & L2 txs // select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -809,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// batch 3 // batch 3
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs)) require.Equal(t, 0, len(oL1UserTxs))
@@ -825,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// batch 4 // batch 4
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs)) require.Equal(t, 0, len(oL1UserTxs))
@@ -873,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// 3 transfers from the same account // 8 transfers from the same account
batchPoolL2 := ` batchPoolL2 := `
Type: PoolL2 Type: PoolL2
PoolTransfer(0) A-B: 10 (126) PoolTransfer(0) A-B: 10 (126)
@@ -889,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) {
// add the PoolL2Txs to the l2DB // add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs) addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A // batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -968,7 +968,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
// batch 2 to crate the accounts (from L1UserTxs) // batch 2 to crate the accounts (from L1UserTxs)
@@ -976,7 +976,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// select L1 & L2 txs // select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -1014,7 +1014,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
addL2Txs(t, txsel, poolL2Txs) addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs)) require.Equal(t, 0, len(oL1UserTxs))
@@ -1029,7 +1029,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// batch 4. In this Batch, account B has enough balance to send the txs // batch 4. In this Batch, account B has enough balance to send the txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil) txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs)) require.Equal(t, 0, len(oL1UserTxs))
@@ -1038,112 +1038,3 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
require.Equal(t, 3, len(discardedL2Txs)) require.Equal(t, 3, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths)) require.Equal(t, 0, len(accAuths))
} }
func TestL1UserFutureTxs(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 100
> batchL1 // freeze L1User{2}
CreateAccountDeposit(0) B: 18
> batchL1 // forge L1User{2}, freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
l1UserFutureTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
batchPoolL2 := `
Type: PoolL2
PoolTransferToEthAddr(0) A-B: 10 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 1, len(poolL2Txs))
// add AccountCreationAuth for B
_ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B")
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
l1UserFutureTxs =
til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
require.Equal(t, 2, len(l1UserTxs))
require.Equal(t, 1, len(l1UserFutureTxs))
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
assert.Equal(t, 2, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed
// at the next batch once the L1UserTx of CreateAccount B is processed,
// despite that there is an AccountCreationAuth for Account B.
assert.Equal(t, 0, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+
" discarded at the current batch, as the receiver account does"+
" not exist yet, and there is a L1UserTx that will create that"+
" account in a future batch.",
discardedL2Txs[0].Info)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
l1UserFutureTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// L2Tx selected as now the L1UserTx of CreateAccount B is processed
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// generate a new L2Tx A-B and check that is processed
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 1, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, nil, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}