Browse Source

Merge branch 'develop' into feature/apiWithoutError

# Conflicts:
#	api/api.go
develop^2
Mikelle 3 years ago
parent
commit
e9b80d0c6d
31 changed files with 776 additions and 207 deletions
  1. +2
    -1
      .gitignore
  2. +3
    -4
      .goreleaser.yml
  3. +1
    -1
      api/accountcreationauths.go
  4. +40
    -5
      api/api.go
  5. +8
    -2
      api/api_test.go
  6. +8
    -0
      api/apitypes/apitypes.go
  7. +1
    -0
      api/config.go
  8. +5
    -1
      api/handlers.go
  9. +66
    -8
      api/stateapiupdater/stateapiupdater.go
  10. +5
    -0
      api/swagger.yml
  11. +1
    -1
      api/txspool.go
  12. +9
    -1
      cli/node/cfg.buidler.toml
  13. +1
    -1
      common/l1tx.go
  14. +4
    -2
      config/config.go
  15. +1
    -0
      coordinator/batch.go
  16. +23
    -2
      coordinator/pipeline.go
  17. +39
    -0
      db/historydb/historydb.go
  18. +27
    -6
      db/historydb/historydb_test.go
  19. +17
    -0
      db/l2db/l2db.go
  20. +7
    -0
      db/utils.go
  21. +5
    -5
      eth/ethereum.go
  22. +192
    -0
      metric/metric.go
  23. +78
    -0
      metric/request.go
  24. +10
    -1
      node/node.go
  25. +4
    -0
      priceupdater/priceupdater.go
  26. +0
    -44
      synchronizer/metrics.go
  27. +6
    -4
      synchronizer/synchronizer.go
  28. +7
    -7
      test/zkproof/flows_test.go
  29. +0
    -53
      txselector/metrics.go
  30. +66
    -27
      txselector/txselector.go
  31. +140
    -31
      txselector/txselector_test.go

+ 2
- 1
.gitignore

@ -1 +1,2 @@
bin/
bin/
dist/

+ 3
- 4
.goreleaser.yml

@ -1,6 +1,7 @@
before:
hooks:
- go mod download
- make migration-pack
builds:
- main: ./cli/node/main.go
@ -9,10 +10,8 @@ builds:
goos:
- linux
- darwin
- windows
hooks:
pre: make migration-pack
post: make migration-clean
goarch:
- amd64
archives:
- replacements:

+ 1
- 1
api/accountcreationauths.go

@ -21,7 +21,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
}
// API to common + verify signature
commonAuth := accountCreationAuthAPIToCommon(&apiAuth)
if !commonAuth.VerifySignature(a.chainID, a.hermezAddress) {
if !commonAuth.VerifySignature(a.cg.ChainID, a.hermezAddress) {
retBadReq(errors.New("invalid signature"), c)
return
}

+ 40
- 5
api/api.go

@ -1,3 +1,27 @@
/*
Package api implements the public interface of the hermez-node using a HTTP REST API.
There are two subsets of endpoints:
- coordinatorEndpoints: used to receive L2 transactions and account creation authorizations. Targeted for wallets.
- explorerEndpoints: used to provide all sorts of information about the network. Targeted for explorers and similar services.
About the configuration of the API:
- The API is supposed to be launched using the cli found at the package cli/node, and configured through the configuration file.
- The mentioned configuration file allows exposing any combination of the endpoint subsets.
- Although the API can run in a "standalone" manner using the serveapi command, it won't work properly
unless another process acting as a coord or sync is filling the HistoryDB.
Design principles and considerations:
- In order to decouple the API process from the rest of the node, all the communication between this package and the rest of
the system is done through the SQL database. As a matter of fact, the only public function of the package is the constructor NewAPI.
All the information needed for the API to work should be obtained through the configuration file of the cli or the database.
- The format of the requests / responses doesn't match directly with the common types, and for this reason, the package api/apitypes is used
to facilitate the format conversion. Most of the time, this is done directly at the db level.
- The API endpoints are fully documented using OpenAPI aka Swagger. All the endpoints are tested against the spec to ensure consistency
between implementation and specification. To get a sense of which endpoints exist and how they work, it's strongly recommended to check this specification.
The specification can be found at api/swagger.yml.
- In general, all the API endpoints produce queries to the SQL database in order to retrieve / insert the requested information. The most notable exceptions to this are
the /config endpoint, which returns a static object generated at construction time, and the /state, which also is retrieved from the database, but it's generated by API/stateapiupdater package.
*/
package api
import (
@ -7,6 +31,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/tracerr"
)
@ -15,7 +40,6 @@ type API struct {
h *historydb.HistoryDB
cg *configAPI
l2 *l2db.L2DB
chainID uint16
hermezAddress ethCommon.Address
}
@ -27,7 +51,6 @@ func NewAPI(
l2db *l2db.L2DB,
) (*API, error) {
// Check input
// TODO: is stateDB only needed for explorer endpoints or for both?
if coordinatorEndpoints && l2db == nil {
return nil, tracerr.Wrap(errors.New("cannot serve Coordinator endpoints without L2DB"))
}
@ -44,19 +67,25 @@ func NewAPI(
RollupConstants: *newRollupConstants(consts.Rollup),
AuctionConstants: consts.Auction,
WDelayerConstants: consts.WDelayer,
ChainID: consts.ChainID,
},
l2: l2db,
chainID: consts.ChainID,
hermezAddress: consts.HermezAddress,
}
middleware, err := metric.PrometheusMiddleware()
if err != nil {
return nil, err
}
server.Use(middleware)
server.NoRoute(a.noRoute)
v1 := server.Group("/v1")
// Add coordinator endpoints
if coordinatorEndpoints {
// Account
// Account creation authorization
v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
// Transaction
@ -74,17 +103,23 @@ func NewAPI(
// Transaction
v1.GET("/transactions-history", a.getHistoryTxs)
v1.GET("/transactions-history/:id", a.getHistoryTx)
// Status
// Batches
v1.GET("/batches", a.getBatches)
v1.GET("/batches/:batchNum", a.getBatch)
v1.GET("/full-batches/:batchNum", a.getFullBatch)
// Slots
v1.GET("/slots", a.getSlots)
v1.GET("/slots/:slotNum", a.getSlot)
// Bids
v1.GET("/bids", a.getBids)
// State
v1.GET("/state", a.getState)
// Config
v1.GET("/config", a.getConfig)
// Tokens
v1.GET("/tokens", a.getTokens)
v1.GET("/tokens/:id", a.getToken)
// Coordinators
v1.GET("/coordinators", a.getCoordinators)
}

+ 8
- 2
api/api_test.go

@ -216,6 +216,7 @@ func TestMain(m *testing.M) {
chainID := uint16(0)
_config := getConfigTest(chainID)
config = configAPI{
ChainID: chainID,
RollupConstants: *newRollupConstants(_config.RollupConstants),
AuctionConstants: _config.AuctionConstants,
WDelayerConstants: _config.WDelayerConstants,
@ -523,11 +524,16 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000),
}
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
}, constants, &stateapiupdater.RecommendedFeePolicy{
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
})
if err != nil {
panic(err)
}
// Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords)

+ 8
- 0
api/apitypes/apitypes.go

@ -1,3 +1,11 @@
/*
Package apitypes is used to map the common types used across the node with the format expected by the API.
This is done using different strategies:
- Marshallers: they get triggered when the API marshals the response structs into JSONs
- Scanners/Valuers: they get triggered when a struct is sent/received to/from the SQL database
- Adhoc functions: when the already mentioned strategies are not suitable, functions are added to the structs to facilitate the conversions
*/
package apitypes
import (

+ 1
- 0
api/config.go

@ -57,6 +57,7 @@ type Config struct {
}
type configAPI struct {
ChainID uint16 `json:"chainId"`
RollupConstants rollupConstants `json:"hermez"`
AuctionConstants common.AuctionConstants `json:"auction"`
WDelayerConstants common.WDelayerConstants `json:"withdrawalDelayer"`

+ 5
- 1
api/handlers.go

@ -8,6 +8,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/tracerr"
"github.com/lib/pq"
"github.com/russross/meddler"
@ -46,7 +47,9 @@ var (
func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error()
unwrapErr := tracerr.Unwrap(err)
metric.CollectError(unwrapErr)
errMsg := unwrapErr.Error()
retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" {
@ -80,6 +83,7 @@ func retSQLErr(err error, c *gin.Context) {
func retBadReq(err error, c *gin.Context) {
log.Warnw("HTTP API Bad request error", "err", err)
metric.CollectError(err)
c.JSON(http.StatusBadRequest, errorMsg{
Message: err.Error(),
})

+ 66
- 8
api/stateapiupdater/stateapiupdater.go

@ -1,11 +1,20 @@
/*
Package stateapiupdater is responsible for generating and storing the object response of the GET /state endpoint exposed through the api package.
This object is extensively defined at the OpenAPI spec located at api/swagger.yml.
Deployment considerations: in a setup where multiple processes are used (dedicated api process, separated coord / sync, ...), only one process should care
of using this package.
*/
package stateapiupdater
import (
"database/sql"
"fmt"
"sync"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
)
@ -17,11 +26,45 @@ type Updater struct {
vars common.SCVariablesPtr
consts historydb.Constants
rw sync.RWMutex
rfp *RecommendedFeePolicy
}
// RecommendedFeePolicy describes how the recommended fee is calculated
type RecommendedFeePolicy struct {
PolicyType RecommendedFeePolicyType `validate:"required"`
StaticValue float64
}
// RecommendedFeePolicyType describes the different available recommended fee strategies
type RecommendedFeePolicyType string
const (
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
)
func (rfp *RecommendedFeePolicy) valid() bool {
switch rfp.PolicyType {
case RecommendedFeePolicyTypeStatic:
if rfp.StaticValue == 0 {
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
}
return true
case RecommendedFeePolicyTypeAvgLastHour:
return true
default:
return false
}
}
// NewUpdater creates a new Updater
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants) *Updater {
consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) {
if ok := rfp.valid(); !ok {
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
}
u := Updater{
hdb: hdb,
config: *config,
@ -31,9 +74,10 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
ForgeDelay: config.ForgeDelay,
},
},
rfp: rfp,
}
u.SetSCVars(vars.AsPtr())
return &u
return &u, nil
}
// Store the State in the HistoryDB
@ -65,13 +109,27 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *Updater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
switch u.rfp.PolicyType {
case RecommendedFeePolicyTypeStatic:
u.rw.Lock()
u.state.RecommendedFee = common.RecommendedFee{
ExistingAccount: u.rfp.StaticValue,
CreatesAccount: u.rfp.StaticValue,
CreatesAccountInternal: u.rfp.StaticValue,
}
u.rw.Unlock()
case RecommendedFeePolicyTypeAvgLastHour:
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
default:
return tracerr.New("Invalid recommende fee policy")
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
}

+ 5
- 0
api/swagger.yml

@ -3040,10 +3040,15 @@ components:
- maxEmergencyModeTime
- hermezRollup
additionalProperties: false
chainId:
type: integer
description: Id of the chain
example: 27
required:
- hermez
- auction
- withdrawalDelayer
- chainId
additionalProperties: false
Error:
type: object

+ 1
- 1
api/txspool.go

@ -187,7 +187,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
poolTx.TokenID, account.TokenID))
}
// Check signature
if !poolTx.VerifySignature(a.chainID, account.BJJ) {
if !poolTx.VerifySignature(a.cg.ChainID, account.BJJ) {
return tracerr.Wrap(errors.New("wrong signature"))
}
return nil

+ 9
- 1
cli/node/cfg.buidler.toml

@ -35,7 +35,7 @@ Symbol = "SUSHI"
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
[Debug]
APIAddress = "localhost:12345"
APIAddress = "0.0.0.0:12345"
MeddlerLogs = true
GinDebugMode = true
@ -145,3 +145,11 @@ Coordinator = true
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
LightScrypt = true
# RollupVerifierIndex = 0
[RecommendedFeePolicy]
# Strategy used to calculate the recommended fee that the API will expose.
# Available options:
# - Static: always return the same value (StaticValue) in USD
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
PolicyType = "Static"
StaticValue = 0.99

+ 1
- 1
common/l1tx.go

@ -22,7 +22,7 @@ type L1Tx struct {
// - L1UserTx: 0
// - L1CoordinatorTx: 1
TxID TxID `meddler:"id"`
// ToForgeL1TxsNum indicates in which the tx was forged / will be forged
// ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
Position int `meddler:"position"`
// UserOrigin is set to true if the tx was originated by a user, false if it was

+ 4
- 2
config/config.go

@ -8,6 +8,7 @@ import (
"github.com/BurntSushi/toml"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/tracerr"
@ -347,8 +348,9 @@ type Node struct {
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
Debug NodeDebug `validate:"required"`
Coordinator Coordinator `validate:"-"`
RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"`
Debug NodeDebug `validate:"required"`
Coordinator Coordinator `validate:"-"`
}
// APIServer is the api server configuration parameters

+ 1
- 0
coordinator/batch.go

@ -80,6 +80,7 @@ type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum
ServerProof prover.Client
ProofStart time.Time
ZKInputs *common.ZKInputs
Proof *prover.Proof
PublicInputs []*big.Int

+ 23
- 2
coordinator/pipeline.go

@ -5,6 +5,7 @@ import (
"database/sql"
"fmt"
"math/big"
"strconv"
"sync"
"time"
@ -14,6 +15,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/txselector"
@ -246,6 +248,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// 3. Send the ZKInputs to the proof server
batchInfo.ServerProof = serverProof
batchInfo.ProofStart = time.Now()
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {
@ -520,15 +523,30 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
// l1UserFutureTxs are the l1UserTxs that are not being forged
// in the next batch, but that are also in the queue for the
// future batches
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
} else {
// get l1UserFutureTxs which are all the l1 pending in all the
// queues
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
// 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
@ -602,6 +620,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
// waitServerProof gets the generated zkProof & sends it to the SmartContract
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
defer metric.MeasureDuration(metric.WaitServerProof, batchInfo.ProofStart,
batchInfo.BatchNum.BigInt().String(), strconv.Itoa(batchInfo.PipelineNum))
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
// until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil {

+ 39
- 0
db/historydb/historydb.go

@ -1,3 +1,24 @@
/*
Package historydb is responsible for storing and retrieving the historic data of the Hermez network.
It's mostly but not exclusively used by the API and the synchronizer.
Apart from the logic defined in this package, it's important to notice that there are some triggers defined in the
migration files that have to be taken into consideration to understanding the results of some queries. This is especially true
for reorgs: all the data is directly or indirectly related to a block, this makes handling reorgs as easy as deleting the
reorged blocks from the block table, and all related items will be dropped in cascade. This is not the only case, in general
functions defined in this package that get affected somehow by the SQL level defined logic has a special mention on the function description.
Some of the database tooling used in this package such as meddler and migration tools is explained in the db package.
This package is spitted in different files following these ideas:
- historydb.go: constructor and functions used by packages other than the api.
- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore
to restrict the maximum concurrent connections to the database.
- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however
most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables.
In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats.
- nodeinfo.go: used to handle the interfaces and structs that allow communication across running in different machines/process but sharing the same database.
*/
package historydb
import (
@ -751,6 +772,24 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
}
// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch
// with toForgeL1TxsNum (in one of the future batches, not in the next one).
func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
var txs []*common.L1Tx
err := meddler.QueryAll(
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, NULL AS effective_amount,
tx.deposit_amount, NULL AS effective_deposit_amount,
tx.eth_block_num, tx.type, tx.batch_num
FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1
ORDER BY position;`,
toForgeL1TxsNum,
)
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
}
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
// open or frozen queues that are not yet forged)
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {

+ 27
- 6
db/historydb/historydb_test.go

@ -699,35 +699,56 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
CreateAccountDeposit(1) B: 5
CreateAccountDeposit(1) C: 5
CreateAccountDeposit(1) D: 5
> block
> batchL1
> block
CreateAccountDeposit(1) E: 5
CreateAccountDeposit(1) F: 5
> block
`
tc := til.NewContext(uint16(0), 128)
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
// Sanity check
require.Equal(t, 1, len(blocks))
require.Equal(t, 3, len(blocks))
require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs))
toForgeL1TxsNum := int64(1)
for i := range blocks {
err = historyDB.AddBlockSCData(&blocks[i])
require.NoError(t, err)
}
l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum)
l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0)
require.NoError(t, err)
assert.Equal(t, 7, len(l1UserTxs))
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1)
require.NoError(t, err)
assert.Equal(t, 5, len(l1UserTxs))
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1)
require.NoError(t, err)
assert.Equal(t, 2, len(l1UserTxs))
count, err := historyDB.GetUnforgedL1UserTxsCount()
require.NoError(t, err)
assert.Equal(t, 5, count)
assert.Equal(t, 7, count)
// No l1UserTxs for this toForgeL1TxsNum
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
require.NoError(t, err)
assert.Equal(t, 2, len(l1UserTxs))
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2)
require.NoError(t, err)
assert.Equal(t, 0, len(l1UserTxs))
// No l1UserTxs for this toForgeL1TxsNum
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3)
require.NoError(t, err)
assert.Equal(t, 0, len(l1UserTxs))
}

+ 17
- 0
db/l2db/l2db.go

@ -1,3 +1,20 @@
/*
Package l2db is responsible for storing and retrieving the data received by the coordinator through the api.
Note that this data will be different for each coordinator in the network, as this represents the L2 information.
The data managed by this package is fundamentally PoolL2Tx and AccountCreationAuth. All this data come from
the API sent by clients and is used by the txselector to decide which transactions are selected to forge a batch.
Some of the database tooling used in this package such as meddler and migration tools is explained in the db package.
This package is spitted in different files following these ideas:
- l2db.go: constructor and functions used by packages other than the api.
- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore
to restrict the maximum concurrent connections to the database.
- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however
most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables.
In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats.
*/
package l2db
import (

+ 7
- 0
db/utils.go

@ -1,3 +1,10 @@
/*
Package db have some common utilities shared by db/l2db and db/historydb, the most relevant ones are:
- SQL connection utilities
- Managing the SQL schema: this is done using migration files placed under db/migrations. The files are executed by
order of the file name.
- Custom meddlers: used to easily transform struct <==> table
*/
package db
import (

+ 5
- 5
eth/ethereum.go

@ -245,15 +245,15 @@ func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*c
if number == -1 {
blockNum = nil
}
block, err := c.client.BlockByNumber(ctx, blockNum)
header, err := c.client.HeaderByNumber(ctx, blockNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
b := &common.Block{
Num: block.Number().Int64(),
Timestamp: time.Unix(int64(block.Time()), 0),
ParentHash: block.ParentHash(),
Hash: block.Hash(),
Num: header.Number.Int64(),
Timestamp: time.Unix(int64(header.Time), 0),
ParentHash: header.ParentHash,
Hash: header.Hash(),
}
return b, nil
}

+ 192
- 0
metric/metric.go

@ -0,0 +1,192 @@
package metric
import (
"time"
"github.com/hermeznetwork/hermez-node/log"
"github.com/prometheus/client_golang/prometheus"
)
type (
// Metric represents the metric type
Metric string
)
const (
namespaceError = "error"
namespaceSync = "synchronizer"
namespaceTxSelector = "txselector"
namespaceAPI = "api"
)
var (
// Errors errors count metric.
Errors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespaceError,
Name: "errors",
Help: "",
}, []string{"error"})
// WaitServerProof duration time to get the calculated
// proof from the server.
WaitServerProof = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespaceSync,
Name: "wait_server_proof",
Help: "",
}, []string{"batch_number", "pipeline_number"})
// Reorgs block reorg count
Reorgs = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceSync,
Name: "reorgs",
Help: "",
})
// LastBlockNum last block synced
LastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "synced_last_block_num",
Help: "",
})
// EthLastBlockNum last eth block synced
EthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "eth_last_block_num",
Help: "",
})
// LastBatchNum last batch synced
LastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "synced_last_batch_num",
Help: "",
})
// EthLastBatchNum last eth batch synced
EthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "eth_last_batch_num",
Help: "",
})
// GetL2TxSelection L2 tx selection count
GetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceTxSelector,
Name: "get_l2_txselection_total",
Help: "",
})
// GetL1L2TxSelection L1L2 tx selection count
GetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceTxSelector,
Name: "get_l1_l2_txselection_total",
Help: "",
})
// SelectedL1CoordinatorTxs selected L1 coordinator tx count
SelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l1_coordinator_txs",
Help: "",
})
// SelectedL1UserTxs selected L1 user tx count
SelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l1_user_txs",
Help: "",
})
// SelectedL2Txs selected L2 tx count
SelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l2_txs",
Help: "",
})
// DiscardedL2Txs discarded L2 tx count
DiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "discarded_l2_txs",
Help: "",
})
)
func init() {
if err := registerCollectors(); err != nil {
log.Error(err)
}
}
func registerCollectors() error {
if err := registerCollector(Errors); err != nil {
return err
}
if err := registerCollector(WaitServerProof); err != nil {
return err
}
if err := registerCollector(Reorgs); err != nil {
return err
}
if err := registerCollector(LastBlockNum); err != nil {
return err
}
if err := registerCollector(LastBatchNum); err != nil {
return err
}
if err := registerCollector(EthLastBlockNum); err != nil {
return err
}
if err := registerCollector(EthLastBatchNum); err != nil {
return err
}
if err := registerCollector(GetL2TxSelection); err != nil {
return err
}
if err := registerCollector(GetL1L2TxSelection); err != nil {
return err
}
if err := registerCollector(SelectedL1CoordinatorTxs); err != nil {
return err
}
if err := registerCollector(SelectedL1UserTxs); err != nil {
return err
}
return registerCollector(DiscardedL2Txs)
}
func registerCollector(collector prometheus.Collector) error {
err := prometheus.Register(collector)
if err != nil {
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
return err
}
}
return nil
}
// MeasureDuration measure the method execution duration
// and save it into a histogram metric
func MeasureDuration(histogram *prometheus.HistogramVec, start time.Time, lvs ...string) {
duration := time.Since(start)
histogram.WithLabelValues(lvs...).Observe(float64(duration.Milliseconds()))
}
// CollectError collect the error message and increment
// the error count
func CollectError(err error) {
Errors.With(map[string]string{"error": err.Error()}).Inc()
}

+ 78
- 0
metric/request.go

@ -0,0 +1,78 @@
package metric
import (
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
)
const (
favicon = "/favicon.ico"
)
// Prometheus contains the metrics gathered by the instance and its path
type Prometheus struct {
reqCnt *prometheus.CounterVec
reqDur *prometheus.HistogramVec
}
// NewPrometheus generates a new set of metrics with a certain subsystem name
func NewPrometheus() (*Prometheus, error) {
reqCnt := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespaceAPI,
Name: "requests_total",
Help: "How many HTTP requests processed, partitioned by status code and HTTP method",
},
[]string{"code", "method", "path"},
)
if err := registerCollector(reqCnt); err != nil {
return nil, err
}
reqDur := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespaceAPI,
Name: "request_duration_seconds",
Help: "The HTTP request latencies in seconds",
},
[]string{"code", "method", "path"},
)
if err := registerCollector(reqDur); err != nil {
return nil, err
}
return &Prometheus{
reqCnt: reqCnt,
reqDur: reqDur,
}, nil
}
// PrometheusMiddleware creates the prometheus collector and
// defines status handler function for the middleware
func PrometheusMiddleware() (gin.HandlerFunc, error) {
p, err := NewPrometheus()
if err != nil {
return nil, err
}
return p.Middleware(), nil
}
// Middleware defines status handler function for middleware
func (p *Prometheus) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
if c.Request.URL.Path == favicon {
c.Next()
return
}
start := time.Now()
c.Next()
status := strconv.Itoa(c.Writer.Status())
elapsed := float64(time.Since(start)) / float64(time.Second)
fullPath := c.FullPath()
p.reqDur.WithLabelValues(status, c.Request.Method, fullPath).Observe(elapsed)
p.reqCnt.WithLabelValues(status, c.Request.Method, fullPath).Inc()
}
}

+ 10
- 1
node/node.go

@ -288,7 +288,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
stateAPIUpdater, err := stateapiupdater.NewUpdater(
historyDB,
&hdbNodeCfg,
initSCVars,
&hdbConsts,
&cfg.RecommendedFeePolicy,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
var coord *coordinator.Coordinator
if mode == ModeCoordinator {

+ 4
- 0
priceupdater/priceupdater.go

@ -173,6 +173,10 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr)
case UpdateMethodTypeStatic:
tokenPrice = token.StaticValue
if tokenPrice == float64(0) {
log.Warn("token price is set to 0. Probably StaticValue is not put in the configuration file,",
"token", token.Symbol)
}
case UpdateMethodTypeIgnore:
continue
}

+ 0
- 44
synchronizer/metrics.go

@ -1,44 +0,0 @@
package synchronizer
import "github.com/prometheus/client_golang/prometheus"
var (
metricReorgsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "sync_reorgs",
Help: "",
},
)
metricSyncedLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_block_num",
Help: "",
},
)
metricEthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_block_num",
Help: "",
},
)
metricSyncedLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_batch_num",
Help: "",
},
)
metricEthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_batch_num",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricReorgsCount)
prometheus.MustRegister(metricSyncedLastBlockNum)
prometheus.MustRegister(metricEthLastBlockNum)
prometheus.MustRegister(metricSyncedLastBatchNum)
prometheus.MustRegister(metricEthLastBatchNum)
}

+ 6
- 4
synchronizer/synchronizer.go

@ -47,6 +47,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr"
)
@ -581,6 +582,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
return nil, nil, tracerr.Wrap(err)
}
discarded := lastSavedBlock.Num - lastDBBlockNum
metric.Reorgs.Inc()
return nil, &discarded, nil
}
}
@ -673,16 +675,16 @@ func (s *Synchronizer) Sync(ctx context.Context,
}
for _, batchData := range rollupData.Batches {
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
metric.LastBatchNum.Set(float64(batchData.Batch.BatchNum))
metric.EthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum,
)
}
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
metric.LastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metric.EthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(),

+ 7
- 7
test/zkproof/flows_test.go

@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
}
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
// TxSelector select the transactions for the next Batch
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
require.NoError(t, err)
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(txprocConfig, nil)
txsel.GetL1L2TxSelection(txprocConfig, nil, nil)
require.NoError(t, err)
assert.Equal(t, 1, len(coordIdxs))
assert.Equal(t, 0, len(oL1UserTxs))

+ 0
- 53
txselector/metrics.go

@ -1,53 +0,0 @@
package txselector
import "github.com/prometheus/client_golang/prometheus"
var (
metricGetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l2_txselecton_total",
Help: "",
},
)
metricGetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l1_l2_txselecton_total",
Help: "",
},
)
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_coordinator_txs",
Help: "",
},
)
metricSelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_user_txs",
Help: "",
},
)
metricSelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l2_txs",
Help: "",
},
)
metricDiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_discarded_l2_txs",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricGetL2TxSelection)
prometheus.MustRegister(metricGetL1L2TxSelection)
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
prometheus.MustRegister(metricSelectedL1UserTxs)
prometheus.MustRegister(metricSelectedL2Txs)
prometheus.MustRegister(metricDiscardedL2Txs)
}

+ 66
- 27
txselector/txselector.go

@ -13,6 +13,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub"
@ -84,7 +85,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error)
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
// check if CoordinatorAccount for TokenID is already pending to create
if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID,
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID,
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
return nil, positionL1, nil
}
@ -121,11 +122,12 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
// but there is a transactions to them and the authorization of account
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch.
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx,
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx,
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL2TxSelection.Inc()
metric.GetL2TxSelection.Inc()
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{})
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig,
[]common.L1Tx{}, l1UserFutureTxs)
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err)
}
@ -139,11 +141,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch.
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL1L2TxSelection.Inc()
metric.GetL1L2TxSelection.Inc()
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs)
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err)
}
@ -157,7 +159,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch.
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
// WIP.0: the TxSelector is not optimized and will need a redesign. The
// current version is implemented in order to have a functional
@ -221,10 +223,11 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL1CoordinatorTxs.Set(0)
metricSelectedL2Txs.Set(0)
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metric.SelectedL1CoordinatorTxs.Set(0)
metric.SelectedL2Txs.Set(0)
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
}
@ -233,7 +236,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
var validTxs, discardedL2Txs []common.PoolL2Tx
l2TxsForgable = sortL2Txs(l2TxsForgable)
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs),
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs,
l2TxsForgable, validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
@ -247,8 +250,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
var l1CoordinatorTxs2 []common.L1Tx
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig,
len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable,
validTxs, discardedL2Txs)
len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs,
l2TxsNonForgable, validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
@ -320,17 +323,18 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL2Txs.Set(float64(len(validTxs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
metric.SelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metric.SelectedL2Txs.Set(float64(len(validTxs)))
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
}
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) (
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx,
l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx,
[]common.PoolL2Tx, []common.PoolL2Tx, error) {
var l1CoordinatorTxs []common.L1Tx
positionL1 := nL1Txs
var accAuths [][]byte
@ -432,7 +436,8 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
validL2Tx, l1CoordinatorTx, accAuth, err :=
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i])
nL1Txs, l1UserFutureTxs, l1CoordinatorTxs,
positionL1, l2Txs[i])
if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of
@ -572,18 +577,35 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
// array.
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx,
positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx,
*common.AccountCreationAuth, error) {
selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs,
l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) (
*common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) {
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
// previous L2Tx in the current process already created a
// L1CoordinatorTx of this type, in the DB there still seem that needs
// to create a new L1CoordinatorTx, but as is already created, the tx
// is valid
if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
return &l2Tx, nil, nil, nil
}
// check if L2Tx receiver account will be created by a L1UserFutureTxs
// (in the next batch, the current frozen queue). In that case, the L2Tx
// will be discarded at the current batch, even if there is an
// AccountCreationAuth for the account, as there is a L1UserTx in the
// frozen queue that will create the receiver Account. The L2Tx is
// discarded to avoid the Coordinator creating a new L1CoordinatorTx to
// create the receiver account, which will be also created in the next
// batch from the L1UserFutureTx, ending with the user having 2
// different accounts for the same TokenID. The double account creation
// is supported by the Hermez zkRollup specification, but it was decided
// to mitigate it at the TxSelector level for the explained cases.
if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" +
" receiver account does not exist yet, and there is a L1UserTx that" +
" will create that account in a future batch.")
}
var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
@ -686,7 +708,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
return &l2Tx, l1CoordinatorTx, accAuth, nil
}
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ {
if l1CoordinatorTxs[i].FromEthAddr == addr &&
@ -698,6 +720,23 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
return false
}
func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1UserFutureTxs); i++ {
if l1UserFutureTxs[i].FromEthAddr == addr &&
l1UserFutureTxs[i].TokenID == tokenID &&
l1UserFutureTxs[i].FromBJJ == bjj {
return true
}
if l1UserFutureTxs[i].FromEthAddr == addr &&
l1UserFutureTxs[i].TokenID == tokenID &&
common.EmptyBJJComp == bjj {
return true
}
}
return false
}
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
// Sort by absolute fee with SliceStable, so that txs with same

+ 140
- 31
txselector/txselector_test.go

@ -182,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:1")
l1UserTxs := []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -193,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:2")
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -204,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:3")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 2, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -217,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:4")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -231,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:5")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -245,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:6")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -279,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
@ -328,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, 0, len(accAuths))
@ -372,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, []common.Idx{263}, coordIdxs)
assert.Equal(t, 0, len(accAuths))
@ -434,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
}
// batch1
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
// 1st TransferToEthAddr
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965"
@ -456,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -481,7 +481,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -500,7 +500,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
// initial PoolExit, which now is valid as B has enough Balance
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@ -550,7 +550,7 @@ func TestTransferToBjj(t *testing.T) {
// batch1 to freeze L1UserTxs that will create some accounts with
// positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
@ -568,7 +568,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
@ -595,7 +595,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
@ -623,7 +623,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
@ -678,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
// 8 transfers from the same account
@ -710,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
// transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
@ -760,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
batchPoolL2 := `
@ -794,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
@ -809,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// batch 3
l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
@ -825,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// batch 4
l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
@ -873,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) {
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
// 8 transfers from the same account
// 3 transfers from the same account
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) A-B: 10 (126)
@ -889,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) {
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
// batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
@ -968,7 +968,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
// batch 2 to crate the accounts (from L1UserTxs)
@ -976,7 +976,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
@ -1014,7 +1014,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
@ -1029,7 +1029,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// batch 4. In this Batch, account B has enough balance to send the txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
@ -1038,3 +1038,112 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
require.Equal(t, 3, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
}
func TestL1UserFutureTxs(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 100
> batchL1 // freeze L1User{2}
CreateAccountDeposit(0) B: 18
> batchL1 // forge L1User{2}, freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
l1UserFutureTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
batchPoolL2 := `
Type: PoolL2
PoolTransferToEthAddr(0) A-B: 10 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 1, len(poolL2Txs))
// add AccountCreationAuth for B
_ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B")
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
l1UserFutureTxs =
til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
require.Equal(t, 2, len(l1UserTxs))
require.Equal(t, 1, len(l1UserFutureTxs))
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
assert.Equal(t, 2, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed
// at the next batch once the L1UserTx of CreateAccount B is processed,
// despite that there is an AccountCreationAuth for Account B.
assert.Equal(t, 0, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+
" discarded at the current batch, as the receiver account does"+
" not exist yet, and there is a L1UserTx that will create that"+
" account in a future batch.",
discardedL2Txs[0].Info)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
l1UserFutureTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// L2Tx selected as now the L1UserTx of CreateAccount B is processed
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// generate a new L2Tx A-B and check that is processed
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 1, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, nil, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}

Loading…
Cancel
Save