Compare commits

...

13 Commits

Author SHA1 Message Date
arnaubennassar
e52ba3d258 Fix mock server 2021-03-26 13:27:05 +01:00
a_bennassar
2184084408 Merge pull request #659 from hermeznetwork/feature/move-api-subpackages
Move apitypes & stateapiupdater into api dir
2021-03-22 17:02:38 +01:00
arnaucube
206c8e6e8f Move apitypes & stateapiupdater into api dir 2021-03-22 16:51:10 +01:00
Eduard S
c2c74e14f1 Merge pull request #655 from hermeznetwork/feature/api-versioning
Add API versioning
2021-03-22 16:38:12 +01:00
a_bennassar
80e20f3cf1 Merge pull request #658 from hermeznetwork/feature/update-sql-func
Add account_state view
2021-03-22 16:37:46 +01:00
arnaucube
d9741da43b Add API versioning 2021-03-22 16:33:38 +01:00
laisolizq
7b297c77da Add account_state view 2021-03-22 14:43:24 +01:00
Eduard S
d80e3a8988 Merge pull request #648 from hermeznetwork/feature/txsel-forgablepriority
Update TxSel selection to prioritize the forgable
2021-03-22 13:14:53 +01:00
arnaucube
2873ce875a Update TxSel selection to prioritize the forgable
Previous to this commit, there were cases where being
len(nonForgableL2Txs)>maxL2Txs and nonForgableL2Txs have bigger fee than
forgableL2Txs, the forgableTxs where never forged, neither the
nonForgableTxs.  Now, the TxSelector first forges the forgableTxs (which
are forgable for the initial state of the accounts (balances & nonces),
and then the nonForgableL2Txs, which may be unblocked once the forgable
ones have been processed.
2021-03-22 13:03:47 +01:00
Eduard S
10cfc91250 Merge pull request #651 from hermeznetwork/feature/total-txs-in-pool
API change names and add poolLoad, add maxFeeUSD
2021-03-22 11:59:09 +01:00
Eduard S
f6765f82bf Merge pull request #654 from hermeznetwork/feature/api-pending-l1s
Add pending L1 txs to API
2021-03-22 11:58:48 +01:00
arnaubennassar
90126a03a2 API change names and add poolLoad, add maxFeeUSD 2021-03-22 11:52:23 +01:00
arnaubennassar
3fcec947b4 Add pending L1 txs to API 2021-03-22 11:50:37 +01:00
33 changed files with 502 additions and 255 deletions

View File

@@ -5,7 +5,7 @@ import (
"strconv"
"testing"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure"

View File

@@ -7,7 +7,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub"
)

View File

@@ -50,38 +50,40 @@ func NewAPI(
hermezAddress: consts.HermezAddress,
}
v1 := server.Group("/v1")
// Add coordinator endpoints
if coordinatorEndpoints {
// Account
server.POST("/account-creation-authorization", a.postAccountCreationAuth)
server.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
// Transaction
server.POST("/transactions-pool", a.postPoolTx)
server.GET("/transactions-pool/:id", a.getPoolTx)
v1.POST("/transactions-pool", a.postPoolTx)
v1.GET("/transactions-pool/:id", a.getPoolTx)
}
// Add explorer endpoints
if explorerEndpoints {
// Account
server.GET("/accounts", a.getAccounts)
server.GET("/accounts/:accountIndex", a.getAccount)
server.GET("/exits", a.getExits)
server.GET("/exits/:batchNum/:accountIndex", a.getExit)
v1.GET("/accounts", a.getAccounts)
v1.GET("/accounts/:accountIndex", a.getAccount)
v1.GET("/exits", a.getExits)
v1.GET("/exits/:batchNum/:accountIndex", a.getExit)
// Transaction
server.GET("/transactions-history", a.getHistoryTxs)
server.GET("/transactions-history/:id", a.getHistoryTx)
v1.GET("/transactions-history", a.getHistoryTxs)
v1.GET("/transactions-history/:id", a.getHistoryTx)
// Status
server.GET("/batches", a.getBatches)
server.GET("/batches/:batchNum", a.getBatch)
server.GET("/full-batches/:batchNum", a.getFullBatch)
server.GET("/slots", a.getSlots)
server.GET("/slots/:slotNum", a.getSlot)
server.GET("/bids", a.getBids)
server.GET("/state", a.getState)
server.GET("/config", a.getConfig)
server.GET("/tokens", a.getTokens)
server.GET("/tokens/:id", a.getToken)
server.GET("/coordinators", a.getCoordinators)
v1.GET("/batches", a.getBatches)
v1.GET("/batches/:batchNum", a.getBatch)
v1.GET("/full-batches/:batchNum", a.getFullBatch)
v1.GET("/slots", a.getSlots)
v1.GET("/slots/:slotNum", a.getSlot)
v1.GET("/bids", a.getBids)
v1.GET("/state", a.getState)
v1.GET("/config", a.getConfig)
v1.GET("/tokens", a.getTokens)
v1.GET("/tokens/:id", a.getToken)
v1.GET("/coordinators", a.getCoordinators)
}
return a, nil

View File

@@ -19,12 +19,12 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
swagger "github.com/getkin/kin-openapi/openapi3filter"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/stateapiupdater"
"github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til"
"github.com/hermeznetwork/hermez-node/test/txsets"
@@ -40,8 +40,8 @@ type Pendinger interface {
New() Pendinger
}
const apiAddr = ":4010"
const apiURL = "http://localhost" + apiAddr + "/"
const apiPort = "4010"
const apiURL = "http://localhost:" + apiPort + "/v1/"
var SetBlockchain = `
Type: Blockchain
@@ -209,7 +209,7 @@ func TestMain(m *testing.M) {
panic(err)
}
// L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 1000.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants)
chainID := uint16(0)
@@ -240,6 +240,7 @@ func TestMain(m *testing.M) {
nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10,
MinFeeUSD: 0,
MaxFeeUSD: 10000000000,
}
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err)
@@ -257,7 +258,7 @@ func TestMain(m *testing.M) {
panic(err)
}
// Start server
listener, err := net.Listen("tcp", apiAddr) //nolint:gosec
listener, err := net.Listen("tcp", ":"+apiPort) //nolint:gosec
if err != nil {
panic(err)
}
@@ -615,13 +616,13 @@ func TestTimeout(t *testing.T) {
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
require.NoError(t, err)
// L2DB
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO)
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 1000.0, 24*time.Hour, apiConnConTO)
// API
apiGinTO := gin.Default()
finishWait := make(chan interface{})
startWait := make(chan interface{})
apiGinTO.GET("/wait", func(c *gin.Context) {
apiGinTO.GET("/v1/wait", func(c *gin.Context) {
cancel, err := apiConnConTO.Acquire()
defer cancel()
require.NoError(t, err)
@@ -649,9 +650,9 @@ func TestTimeout(t *testing.T) {
require.NoError(t, err)
client := &http.Client{}
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil)
httpReq, err := http.NewRequest("GET", "http://localhost:4444/v1/tokens", nil)
require.NoError(t, err)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/v1/wait", nil)
require.NoError(t, err)
// Request that will get timed out
var wg sync.WaitGroup

View File

@@ -7,7 +7,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure"

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure"

View File

@@ -4,7 +4,7 @@ import (
"math/big"
"testing"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/stretchr/testify/assert"
@@ -145,8 +145,8 @@ func TestUpdateMetrics(t *testing.T) {
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TokenAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.Wallets, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
}
@@ -210,8 +210,8 @@ func TestGetState(t *testing.T) {
assert.Greater(t, status.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, status.Metrics.BatchFrequency, float64(0))
assert.Greater(t, status.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, status.Metrics.TotalAccounts, int64(0))
assert.Greater(t, status.Metrics.TotalBJJs, int64(0))
assert.Greater(t, status.Metrics.TokenAccounts, int64(0))
assert.Greater(t, status.Metrics.Wallets, int64(0))
assert.Greater(t, status.Metrics.AvgTransactionFee, float64(0))
// Recommended fee
// TODO: perform real asserts (not just greater than 0)

View File

@@ -27,7 +27,7 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
config: *config,
consts: *consts,
state: historydb.StateAPI{
NodePublicConfig: historydb.NodePublicConfig{
NodePublicInfo: historydb.NodePublicInfo{
ForgeDelay: config.ForgeDelay,
},
},
@@ -65,7 +65,7 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *Updater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
@@ -84,12 +84,13 @@ func (u *Updater) UpdateMetrics() error {
return nil
}
lastBatchNum := lastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
metrics, poolLoad, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.Metrics = *metrics
u.state.NodePublicInfo.PoolLoad = poolLoad
u.rw.Unlock()
return nil
}
@@ -145,11 +146,17 @@ func (u *Updater) UpdateNetworkInfo(
}
}
}
// Update pending L1s
pendingL1s, err := u.hdb.GetUnforgedL1UserTxsCount()
if err != nil {
return tracerr.Wrap(err)
}
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers
u.state.Network.PendingL1Txs = pendingL1s
u.rw.Unlock()
return nil
}

View File

@@ -59,10 +59,10 @@ externalDocs:
description: Find out more about Hermez network.
url: 'https://hermez.io'
servers:
- description: Hosted mock up
url: https://apimock.hermez.network
- description: Localhost mock Up
url: http://localhost:4010
- description: Testnet api, connected to Rinkeby deployment
url: https://api.testnet.hermez.io/v1
- description: Localhost usefull for testing/developing the api
url: http://localhost:4010/v1
tags:
- name: Coordinator
description: Endpoints used by the nodes running in coordinator mode. They are used to interact with the network.
@@ -2569,9 +2569,9 @@ components:
description: List of next coordinators to forge.
items:
$ref: '#/components/schemas/NextForger'
NodeConfig:
Node:
type: object
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
description: Configuration and metrics of the coordinator node. Note that this is specific for each coordinator.
properties:
forgeDelay:
type: number
@@ -2581,9 +2581,14 @@ components:
forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value.
example: 193.4
poolLoad:
type: number
description: Number of pending transactions in the pool
example: 23201
additionalProperties: false
required:
- forgeDelay
- poolLoad
State:
type: object
description: Gobal variables of the network
@@ -2600,8 +2605,8 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee:
$ref: '#/components/schemas/RecommendedFee'
nodeConfig:
$ref: '#/components/schemas/NodeConfig'
node:
$ref: '#/components/schemas/Node'
additionalProperties: false
required:
- network
@@ -2610,7 +2615,7 @@ components:
- auction
- withdrawalDelayer
- recommendedFee
- nodeConfig
- node
StateNetwork:
type: object
description: Gobal statistics of the network
@@ -2634,6 +2639,10 @@ components:
- example: 2334
nextForgers:
$ref: '#/components/schemas/NextForgers'
pendingL1Transactions:
type: number
description: Number of pending L1 transactions (added in the smart contract queue but not forged).
example: 22
additionalProperties: false
required:
- lastEthereumBlock
@@ -2809,11 +2818,11 @@ components:
type: number
description: Average transactions per second in the last 24 hours.
example: 302.3
totalAccounts:
tokenAccounts:
type: integer
description: Number of created accounts.
example: 90473
totalBJJs:
wallets:
type: integer
description: Number of different registered BJJs.
example: 23067
@@ -2830,8 +2839,8 @@ components:
- transactionsPerBatch
- batchFrequency
- transactionsPerSecond
- totalAccounts
- totalBJJs
- tokenAccounts
- wallets
- avgTransactionFee
- estimatedTimeToForgeL1
PendingItems:

View File

@@ -8,7 +8,7 @@ import (
"testing"
"time"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/test"

View File

@@ -8,7 +8,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/tracerr"

View File

@@ -99,6 +99,7 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
SafetyPeriod = 10
MaxTxs = 512
MinFeeUSD = 0.0
MaxFeeUSD = 50.0
TTL = "24h"
PurgeBatchDelay = 10
InvalidateBatchDelay = 20

View File

@@ -318,6 +318,7 @@ func cmdDiscard(c *cli.Context) error {
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)

View File

@@ -144,6 +144,10 @@ type Coordinator struct {
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// MaxFeeUSD is the maximum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with greater than
// maximum fee will be rejected at the API level.
MaxFeeUSD float64 `validate:"required"`
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"`
@@ -376,6 +380,10 @@ type APIServer struct {
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// MaxFeeUSD is the maximum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with greater than
// maximum fee will be rejected at the API level.
MaxFeeUSD float64 `validate:"required"`
} `validate:"required"`
}
Debug NodeDebug `validate:"required"`

View File

@@ -105,7 +105,7 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
test.WipeDB(db)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
test.WipeDB(db)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
}
func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -8,7 +8,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/tracerr"
@@ -1083,12 +1083,8 @@ func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVaria
}
// GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
var metrics MetricsAPI
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (metrics *MetricsAPI, poolLoad int64, err error) {
metrics = &MetricsAPI{}
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
@@ -1106,7 +1102,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
return nil, 0, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
@@ -1115,7 +1111,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err)
return nil, 0, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
@@ -1141,7 +1137,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err)
return nil, 0, tracerr.Wrap(err)
}
// Set batch frequency
metrics.BatchFrequency = seconds / float64(nBatches)
@@ -1152,7 +1148,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
)
var nL2Txs int
if err := row.Scan(&nL2Txs); err != nil {
return nil, tracerr.Wrap(err)
return nil, 0, tracerr.Wrap(err)
}
if nL2Txs > 0 {
metrics.AvgTransactionFee = totalFee / float64(nL2Txs)
@@ -1161,18 +1157,18 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
}
// Get and set amount of registered accounts
type registeredAccounts struct {
TotalIdx int64 `meddler:"total_idx"`
TotalBJJ int64 `meddler:"total_bjj"`
TokenAccounts int64 `meddler:"token_accounts"`
Wallets int64 `meddler:"wallets"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
`SELECT COUNT(*) AS token_accounts, COUNT(DISTINCT(bjj)) AS wallets FROM account;`,
); err != nil {
return nil, tracerr.Wrap(err)
return nil, 0, tracerr.Wrap(err)
}
metrics.TotalAccounts = ra.TotalIdx
metrics.TotalBJJs = ra.TotalBJJ
metrics.TokenAccounts = ra.TokenAccounts
metrics.Wallets = ra.Wallets
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
@@ -1184,10 +1180,18 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err)
return nil, 0, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil
// Get amount of txs in the pool
row = hdb.dbRead.QueryRow(
`SELECT COUNT(*) FROM tx_pool WHERE state = $1 AND NOT external_delete;`,
common.PoolL2TxStatePending,
)
if err := row.Scan(&poolLoad); err != nil {
return nil, 0, tracerr.Wrap(err)
}
return metrics, poolLoad, nil
}
// GetStateAPI returns the StateAPI

View File

@@ -1181,7 +1181,7 @@ const (
)
// GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD, maxFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
@@ -1217,11 +1217,11 @@ func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedF
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount =
math.Max(avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccount =
math.Max(CreateAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccountInternal =
math.Max(CreateAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.ExistingAccount = math.Min(maxFeeUSD,
math.Max(avgTransactionFee, minFeeUSD))
recommendedFee.CreatesAccount = math.Min(maxFeeUSD,
math.Max(CreateAccountExtraFeePercentage*avgTransactionFee, minFeeUSD))
recommendedFee.CreatesAccountInternal = math.Min(maxFeeUSD,
math.Max(CreateAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD))
return &recommendedFee, nil
}

View File

@@ -11,7 +11,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log"
@@ -1177,7 +1177,7 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err)
}
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
res, _, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
@@ -1186,8 +1186,8 @@ func TestGetMetricsAPI(t *testing.T) {
// There is a -2 as time for first and last batch is not taken into account
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs)
assert.Equal(t, int64(3), res.TokenAccounts)
assert.Equal(t, int64(3), res.Wallets)
// Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
@@ -1255,22 +1255,22 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
assert.NoError(t, err)
}
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
res, _, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs)
assert.Equal(t, int64(3), res.TokenAccounts)
assert.Equal(t, int64(3), res.Wallets)
// Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsInternalAPI(0)
_, _, err := historyDBWithACC.GetMetricsInternalAPI(0)
assert.NoError(t, err)
}
@@ -1485,7 +1485,7 @@ func TestNodeInfo(t *testing.T) {
addr := ethCommon.HexToAddress("0x1234")
hash := ethCommon.HexToHash("0x5678")
stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{
NodePublicInfo: NodePublicInfo{
ForgeDelay: 3.1,
},
Network: NetworkAPI{
@@ -1544,7 +1544,7 @@ func TestNodeInfo(t *testing.T) {
},
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TotalAccounts: 42,
TokenAccounts: 42,
},
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),

View File

@@ -4,7 +4,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
@@ -32,18 +32,20 @@ type NetworkAPI struct {
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
PendingL1Txs int `json:"pendingL1Transactions"`
}
// NodePublicConfig is the configuration of the node that is exposed via API
type NodePublicConfig struct {
// NodePublicInfo is the configuration and metrics of the node that is exposed via API
type NodePublicInfo struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
// PoolLoad amount of transactions in the pool
PoolLoad int64 `json:"poolLoad"`
}
// StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
NodePublicInfo NodePublicInfo `json:"node"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
@@ -63,6 +65,7 @@ type Constants struct {
type NodeConfig struct {
MaxPoolTxs uint32
MinFeeUSD float64
MaxFeeUSD float64
ForgeDelay float64
}

View File

@@ -6,7 +6,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub"
"github.com/iden3/go-merkletree"
@@ -314,8 +314,8 @@ type MetricsAPI struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
TokenAccounts int64 `json:"tokenAccounts"`
Wallets int64 `json:"wallets"`
AvgTransactionFee float64 `json:"avgTransactionFee"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
}

View File

@@ -62,6 +62,10 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)",
feeUSD, l2db.minFeeUSD))
}
if feeUSD > l2db.maxFeeUSD {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) > maxFeeUSD (%v)",
feeUSD, l2db.maxFeeUSD))
}
// Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)

View File

@@ -27,6 +27,7 @@ type L2DB struct {
ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64
maxFeeUSD float64
apiConnCon *db.APIConnectionController
}
@@ -38,6 +39,7 @@ func NewL2DB(
safetyPeriod common.BatchNum,
maxTxs uint32,
minFeeUSD float64,
maxFeeUSD float64,
TTL time.Duration,
apiConnCon *db.APIConnectionController,
) *L2DB {
@@ -48,6 +50,7 @@ func NewL2DB(
ttl: TTL,
maxTxs: maxTxs,
minFeeUSD: minFeeUSD,
maxFeeUSD: maxFeeUSD,
apiConnCon: apiConnCon,
}
}

View File

@@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 1000.0, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 1000.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, db, nil)
// Run tests

View File

@@ -6,7 +6,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub"
)

View File

@@ -677,12 +677,22 @@ CREATE TABLE node_info (
);
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
CREATE VIEW account_state AS SELECT DISTINCT idx,
first_value(nonce) OVER w AS nonce,
first_value(balance) OVER w AS balance,
first_value(eth_block_num) OVER w AS eth_block_num,
first_value(batch_num) OVER w AS batch_num
FROM account_update
window w AS (partition by idx ORDER BY item_id desc);
-- +migrate Down
-- triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
DROP TRIGGER IF EXISTS trigger_set_tx ON tx;
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch;
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool;
-- drop views IF EXISTS
DROP VIEW IF EXISTS account_state;
-- functions
DROP FUNCTION IF EXISTS hez_idx;
DROP FUNCTION IF EXISTS set_token_usd_update;

View File

@@ -15,6 +15,7 @@ import (
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
@@ -27,7 +28,6 @@ import (
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/stateapiupdater"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/test/debugapi"
"github.com/hermeznetwork/hermez-node/txprocessor"
@@ -230,6 +230,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
)
@@ -253,6 +254,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
MaxFeeUSD: cfg.Coordinator.L2DB.MaxFeeUSD,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration.Seconds(),
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
@@ -522,6 +524,7 @@ func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
0,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
0,
apiConnCon,
)

View File

@@ -323,7 +323,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db
test.WipeDB(historyDB.DB())
// Init L2 DB
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
return stateDB, historyDB, l2DB
}

View File

@@ -77,7 +77,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)

View File

@@ -164,10 +164,18 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// implementation that can be used ASAP.
// Steps of this method:
// - getPendingTxs
// - ProcessL1Txs
// - getProfitable (sort by fee & nonce)
// - loop over l2Txs
// - ProcessL1Txs (User txs)
// - getPendingTxs (forgable directly with current state & not forgable
// yet)
// - split between l2TxsForgable & l2TxsNonForgable, where:
// - l2TxsForgable are the txs that are directly forgable with the
// current state
// - l2TxsNonForgable are the txs that are not directly forgable
// with the current state, but that may be forgable once the
// l2TxsForgable ones are processed
// - for l2TxsForgable, and if needed, for l2TxsNonForgable:
// - sort by Fee & Nonce
// - loop over l2Txs (txsel.processL2Txs)
// - Fill tx.TokenID tx.Nonce
// - Check enough Balance on sender
// - Check Nonce
@@ -180,14 +188,9 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// - Distribute AccumulatedFees to CoordIdxs
// - MakeCheckpoint
// get pending l2-tx from tx-pool
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
txselStateDB := txsel.localAccountsDB.StateDB
tp := txprocessor.NewTxProcessor(txselStateDB, selectionConfig)
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
// Process L1UserTxs
for i := 0; i < len(l1UserTxs); i++ {
@@ -198,21 +201,139 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
}
}
var l1CoordinatorTxs []common.L1Tx
positionL1 := len(l1UserTxs)
var accAuths [][]byte
l2TxsFromDB, err := txsel.l2db.GetPendingTxs()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
l2TxsForgable, l2TxsNonForgable := splitL2ForgableAndNonForgable(tp, l2TxsFromDB)
// Sort l2TxsRaw (cropping at MaxTx at this point).
// discardedL2Txs contains an array of the L2Txs that have not been
// selected in this Batch.
l2Txs, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx-uint32(len(l1UserTxs)))
for i := range discardedL2Txs {
discardedL2Txs[i].Info =
"Tx not selected due to low absolute fee (does not fit inside the profitable set)"
// in case that length of l2TxsForgable is 0, no need to continue, there
// is no L2Txs to forge at all
if len(l2TxsForgable) == 0 {
var discardedL2Txs []common.PoolL2Tx
for i := 0; i < len(l2TxsNonForgable); i++ {
l2TxsNonForgable[i].Info =
"Tx not selected due impossibility to be forged with the current state"
discardedL2Txs = append(discardedL2Txs, l2TxsNonForgable[i])
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
var validTxs []common.PoolL2Tx
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL1CoordinatorTxs.Set(0)
metricSelectedL2Txs.Set(0)
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
}
var accAuths [][]byte
var l1CoordinatorTxs []common.L1Tx
var validTxs, discardedL2Txs []common.PoolL2Tx
l2TxsForgable = sortL2Txs(l2TxsForgable)
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs),
l2TxsForgable, validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
// if there is space for more txs get also the NonForgable txs, that may
// be unblocked once the Forgable ones are processed
if len(validTxs) < int(selectionConfig.MaxTx)-(len(l1UserTxs)+len(l1CoordinatorTxs)) {
l2TxsNonForgable = sortL2Txs(l2TxsNonForgable)
var accAuths2 [][]byte
var l1CoordinatorTxs2 []common.L1Tx
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig,
len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable,
validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accAuths = append(accAuths, accAuths2...)
l1CoordinatorTxs = append(l1CoordinatorTxs, l1CoordinatorTxs2...)
} else {
// if there is no space for NonForgable txs, put them at the
// discardedL2Txs array
for i := 0; i < len(l2TxsNonForgable); i++ {
l2TxsNonForgable[i].Info =
"Tx not selected due not available slots for L2Txs"
discardedL2Txs = append(discardedL2Txs, l2TxsNonForgable[i])
}
}
// get CoordIdxsMap for the TokenIDs
coordIdxsMap := make(map[common.TokenID]common.Idx)
for i := 0; i < len(validTxs); i++ {
// get TokenID from tx.Sender
accSender, err := tp.StateDB().GetAccount(validTxs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
tokenID := accSender.TokenID
coordIdx, err := txsel.getCoordIdx(tokenID)
if err != nil {
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
coordIdxsMap[tokenID] = coordIdx
}
var coordIdxs []common.Idx
for _, idx := range coordIdxsMap {
coordIdxs = append(coordIdxs, idx)
}
// sort CoordIdxs
sort.SliceStable(coordIdxs, func(i, j int) bool {
return coordIdxs[i] < coordIdxs[j]
})
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
for idx, accumulatedFee := range tp.AccumulatedFees {
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
if err != nil {
log.Errorw("Can not distribute accumulated fees to coordinator "+
"account: No coord Idx to receive fee", "idx", idx)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
if err != nil {
log.Error(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL2Txs.Set(float64(len(validTxs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
}
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) (
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
var l1CoordinatorTxs []common.L1Tx
positionL1 := nL1Txs
var accAuths [][]byte
// Iterate over l2Txs
// - check Nonces
// - check enough Balance for the Amount+Fee
@@ -221,20 +342,22 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// - put the valid txs into validTxs array
for i := 0; i < len(l2Txs); i++ {
// Check if there is space for more L2Txs in the selection
maxL2Txs := int(selectionConfig.MaxTx) -
len(l1UserTxs) - len(l1CoordinatorTxs)
maxL2Txs := int(selectionConfig.MaxTx) - nL1Txs - len(l1CoordinatorTxs)
if len(validTxs) >= maxL2Txs {
// no more available slots for L2Txs
l2Txs[i].Info =
// no more available slots for L2Txs, so mark this tx
// but also the rest of remaining txs as discarded
for j := i; j < len(l2Txs); j++ {
l2Txs[j].Info =
"Tx not selected due not available slots for L2Txs"
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
discardedL2Txs = append(discardedL2Txs, l2Txs[j])
}
break
}
// get Nonce & TokenID from the Account by l2Tx.FromIdx
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
return nil, nil, nil, nil, tracerr.Wrap(err)
}
l2Txs[i].TokenID = accSender.TokenID
@@ -272,13 +395,13 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
txsel.coordAccountForTokenID(l1CoordinatorTxs,
accSender.TokenID, positionL1)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
return nil, nil, nil, nil, tracerr.Wrap(err)
}
if newL1CoordTx != nil {
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) {
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1Txs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1Txs {
// discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
@@ -294,7 +417,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// process the L1CoordTx
_, _, _, _, err := tp.ProcessL1Tx(nil, newL1CoordTx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
return nil, nil, nil, nil, tracerr.Wrap(err)
}
}
@@ -309,7 +432,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
validL2Tx, l1CoordinatorTx, accAuth, err :=
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of
@@ -321,8 +444,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
}
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) {
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1Txs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1Txs {
// discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
@@ -351,7 +474,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// process the L1CoordTx
_, _, _, _, err := tp.ProcessL1Tx(nil, l1CoordinatorTx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
return nil, nil, nil, nil, tracerr.Wrap(err)
}
}
if validL2Tx == nil {
@@ -413,7 +536,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil, nil, nil,
return nil, nil, nil, nil,
tracerr.Wrap(fmt.Errorf("Could not get CoordIdx for TokenID=%d, "+
"due: %s", tokenID, err))
}
@@ -439,68 +562,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
validTxs = append(validTxs, l2Txs[i])
} // after this loop, no checks to discard txs should be done
// get CoordIdxsMap for the TokenIDs
coordIdxsMap := make(map[common.TokenID]common.Idx)
for i := 0; i < len(validTxs); i++ {
// get TokenID from tx.Sender
accSender, err := tp.StateDB().GetAccount(validTxs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
tokenID := accSender.TokenID
coordIdx, err := txsel.getCoordIdx(tokenID)
if err != nil {
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
coordIdxsMap[tokenID] = coordIdx
}
var coordIdxs []common.Idx
for _, idx := range coordIdxsMap {
coordIdxs = append(coordIdxs, idx)
}
// sort CoordIdxs
sort.SliceStable(coordIdxs, func(i, j int) bool {
return coordIdxs[i] < coordIdxs[j]
})
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
for idx, accumulatedFee := range tp.AccumulatedFees {
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
if err != nil {
log.Errorw("Can not distribute accumulated fees to coordinator "+
"account: No coord Idx to receive fee", "idx", idx)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
if err != nil {
log.Error(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL2Txs.Set(float64(len(validTxs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
// return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
return accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
}
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
@@ -636,26 +698,14 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
return false
}
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx,
[]common.PoolL2Tx) {
// First sort by nonce so that txs from the same account are sorted so
// that they could be applied in succession.
sort.Slice(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
// Sort by absolute fee with SliceStable, so that txs with same
// AbsoluteFee are not rearranged and nonce order is kept in such case
sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
})
discardedL2Txs := []common.PoolL2Tx{}
if len(l2Txs) > int(max) {
discardedL2Txs = l2Txs[max:]
l2Txs = l2Txs[:max]
}
// sort l2Txs by Nonce. This can be done in many different ways, what
// is needed is to output the l2Txs where the Nonce of l2Txs for each
// Account is sorted, but the l2Txs can not be grouped by sender Account
@@ -665,5 +715,29 @@ func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
return l2Txs, discardedL2Txs
return l2Txs
}
func splitL2ForgableAndNonForgable(tp *txprocessor.TxProcessor,
l2Txs []common.PoolL2Tx) ([]common.PoolL2Tx, []common.PoolL2Tx) {
var l2TxsForgable, l2TxsNonForgable []common.PoolL2Tx
for i := 0; i < len(l2Txs); i++ {
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
if err != nil {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
if l2Txs[i].Nonce != accSender.Nonce {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
enoughBalance, _, _ := tp.CheckEnoughBalance(l2Txs[i])
if !enoughBalance {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
l2TxsForgable = append(l2TxsForgable, l2Txs[i])
}
return l2TxsForgable, l2TxsNonForgable
}

View File

@@ -26,11 +26,13 @@ import (
)
func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address,
coordUser *til.User) *TxSelector {
coordUser *til.User) (*TxSelector, *historydb.HistoryDB) {
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil)
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
@@ -65,7 +67,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
test.WipeDB(txsel.l2db.DB())
return txsel
return txsel, historyDB
}
func addAccCreationAuth(t *testing.T, tc *til.Context, txsel *TxSelector, chainID uint16,
@@ -157,7 +159,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -417,7 +419,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -470,11 +472,6 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
tc.RestartNonces()
// batch3
// NOTE: this batch will result with 1 L2Tx, as the PoolExit tx is not
// possible, as the PoolTransferToEthAddr is not processed yet when
// checking availability of PoolExit. This, in a near-future iteration
// of the TxSelector will return the 2 transactions as valid and
// selected, as the TxSelector will handle this kind of combinations.
batchPoolL2 = `
Type: PoolL2
PoolTransferToEthAddr(0) A-B: 50 (126)`
@@ -488,11 +485,11 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs)) // see 'NOTE' at the beginning of 'batch3' of this test
assert.Equal(t, 2, len(discardedL2Txs))
assert.Equal(t, 2, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
assert.Equal(t, expectedTxID2, oL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID1, oL2Txs[1].TxID.String())
assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID1, discardedL2Txs[1].TxID.String())
assert.Equal(t, common.TxTypeTransferToEthAddr, oL2Txs[0].Type)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
@@ -507,12 +504,8 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
// the Exit that was not accepted at the batch2
assert.Equal(t, expectedTxID1, oL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String())
assert.Equal(t, common.TxTypeExit, oL2Txs[0].Type)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
@@ -539,7 +532,7 @@ func TestTransferToBjj(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -670,7 +663,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -752,7 +745,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -865,7 +858,7 @@ func TestProcessL2Selection(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -921,3 +914,127 @@ func TestProcessL2Selection(t *testing.T) {
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// This test recreates the case where there are
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 100
CreateAccountDeposit(0) B: 0
> batchL1 // Batch1: freeze L1User{3}
> batchL1 // Batch2: forge L1User{3}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "Coord",
}
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, historyDB := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// Insert blocks into DB
for i := range blocks {
err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err)
}
err = historyDB.UpdateTokenValue(common.EmptyAddr, 1000)
require.NoError(t, err)
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 5,
MaxTx: 5,
MaxL1Tx: 3,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
// batch 2 to crate the accounts (from L1UserTxs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 0, len(oL2Txs))
require.Equal(t, 0, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// batch 3. The A-B txs have lower fee, but are the only ones possible
// with the current Accounts Balances, as the B-A tx of amount 40 will
// not be included as will be processed first when there is not enough
// balance at B (processed first as the TxSelector sorts by Fee and then
// by Nonce).
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) B-A: 40 (130) // B-A txs are only possible once A-B txs are processed
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) A-B: 20 (20)
PoolTransfer(0) A-B: 25 (150)
PoolTransfer(0) A-B: 20 (20)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 11, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 3, len(oL2Txs)) // the 3 txs A-B
require.Equal(t, 8, len(discardedL2Txs)) // the 8 txs B-A
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// batch 4. In this Batch, account B has enough balance to send the txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 5, len(oL2Txs))
require.Equal(t, 3, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
}