Compare commits

..

1 Commits

57 changed files with 1571 additions and 3075 deletions

1
.gitignore vendored
View File

@@ -1 +0,0 @@
bin/

135
Makefile
View File

@@ -1,135 +0,0 @@
#! /usr/bin/make -f
# Project variables.
PACKAGE := github.com/hermeznetwork/hermez-node
VERSION := $(shell git describe --tags --always)
BUILD := $(shell git rev-parse --short HEAD)
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
PROJECT_NAME := $(shell basename "$(PWD)")
# Go related variables.
GO_FILES ?= $$(find . -name '*.go' | grep -v vendor)
GOBASE := $(shell pwd)
GOBIN := $(GOBASE)/bin
GOPKG := $(.)
GOENVVARS := GOBIN=$(GOBIN)
GOCMD := $(GOBASE)/cli/node
GOPROOF := $(GOBASE)/test/proofserver/cli
GOBINARY := node
# Project configs.
MODE ?= sync
CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
POSTGRES_PASS ?= yourpasswordhere
# Use linker flags to provide version/build settings.
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
# PID file will keep the process id of the server.
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
# Make is verbose in Linux. Make it silent.
MAKEFLAGS += --silent
.PHONY: help
help: Makefile
@echo
@echo " Choose a command run in "$(PROJECT_NAME)":"
@echo
@sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /'
@echo
## test: Run the application check and all tests.
test: govet gocilint test-unit
## test-unit: Run all unit tests.
test-unit:
@echo " > Running unit tests"
$(GOENVVARS) go test -race -p 1 -failfast -timeout 300s -v ./...
## test-api-server: Run the API server using the Go tests.
test-api-server:
@echo " > Running unit tests"
$(GOENVVARS) FAKE_SERVER=yes go test -timeout 0 ./api -p 1 -count 1 -v
## gofmt: Run `go fmt` for all go files.
gofmt:
@echo " > Format all go files"
$(GOENVVARS) gofmt -w ${GO_FILES}
## govet: Run go vet.
govet:
@echo " > Running go vet"
$(GOENVVARS) go vet ./...
## golint: Run default golint.
golint:
@echo " > Running golint"
$(GOENVVARS) golint -set_exit_status ./...
## gocilint: Run Golang CI Lint.
gocilint:
@echo " > Running Golang CI Lint"
$-golangci-lint run --timeout=5m -E whitespace -E gosec -E gci -E misspell -E gomnd -E gofmt -E goimports -E golint --exclude-use-default=false --max-same-issues 0
## exec: Run given command. e.g; make exec run="go test ./..."
exec:
GOBIN=$(GOBIN) $(run)
## clean: Clean build files. Runs `go clean` internally.
clean:
@-rm $(GOBIN)/ 2> /dev/null
@echo " > Cleaning build cache"
$(GOENVVARS) go clean
## build: Build the project.
build: install
@echo " > Building Hermez binary..."
@bash -c "$(MAKE) migration-pack"
$(GOENVVARS) go build $(LDFLAGS) -o $(GOBIN)/$(GOBINARY) $(GOCMD)
@bash -c "$(MAKE) migration-clean"
## install: Install missing dependencies. Runs `go get` internally. e.g; make install get=github.com/foo/bar
install:
@echo " > Checking if there is any missing dependencies..."
$(GOENVVARS) go get $(GOCMD)/... $(get)
## run: Run Hermez node.
run:
@bash -c "$(MAKE) clean build"
@echo " > Running $(PROJECT_NAME)"
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
## run-proof-mock: Run proof server mock API.
run-proof-mock: stop-proof-mock
@echo " > Running Proof Server Mock"
$(GOENVVARS) go build -o $(GOBIN)/proof $(GOPROOF)
@$(GOBIN)/proof 2>&1 & echo $$! > $(PID_PROOF_MOCK)
@cat $(PID_PROOF_MOCK) | sed "/^/s/^/ \> Proof Server Mock PID: /"
## stop-proof-mock: Stop proof server mock API.
stop-proof-mock:
@-touch $(PID_PROOF_MOCK)
@-kill -s INT `cat $(PID_PROOF_MOCK)` 2> /dev/null || true
@-rm $(PID_PROOF_MOCK) $(GOBIN)/proof 2> /dev/null || true
## migration-pack: Pack the database migrations into the binary.
migration-pack:
@echo " > Packing the migrations..."
@cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
@cd $(GOBASE)/db && packr2 && cd -
## migration-clean: Clean the database migrations pack.
migration-clean:
@echo " > Cleaning the migrations..."
@cd $(GOBASE)/db && packr2 clean && cd -
## run-database-container: Run the Postgres container
run-database-container:
@echo " > Running the postgreSQL DB..."
@-docker run --rm --name hermez-db -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$(POSTGRES_PASS)" -d postgres
## stop-database-container: Stop the Postgres container
stop-database-container:
@echo " > Stopping the postgreSQL DB..."
@-docker stop hermez-db

View File

@@ -8,75 +8,42 @@ Go implementation of the Hermez node.
The `hermez-node` has been tested with go version 1.14
### Build
Build the binary and check the current version:
```shell
$ make build
$ bin/node version
```
### Run
First you must edit the default/template config file into [cli/node/cfg.buidler.toml](cli/node/cfg.buidler.toml),
there are more information about the config file into [cli/node/README.md](cli/node/README.md)
After setting the config, you can build and run the Hermez Node as a synchronizer:
```shell
$ make run
```
Or build and run as a coordinator, and also passing the config file from other location:
```shell
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
```
To check the useful make commands:
```shell
$ make help
```
### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can
run PostgreSQL with docker easily this way (where `yourpasswordhere` should
start PostgreSQL with docker easily this way (where `yourpasswordhere` should
be your password):
```shell
$ POSTGRES_PASS="yourpasswordhere" make run-database-container
```
POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
```
Afterward, run the tests with the password as env var:
Afterwards, run the tests with the password as env var:
```shell
$ POSTGRES_PASS="yourpasswordhere" make test
```
POSTGRES_PASS=yourpasswordhere go test -p 1 ./...
```
NOTE: `-p 1` forces execution of package test in serial. Otherwise, they may be
executed in parallel, and the test may find unexpected entries in the SQL database
NOTE: `-p 1` forces execution of package test in serial. Otherwise they may be
executed in paralel and the test may find unexpected entries in the SQL databse
because it's shared among all tests.
There is an extra temporary option that allows you to run the API server using the
Go tests. It will be removed once the API can be properly initialized with data
from the synchronizer. To use this, run:
There is an extra temporary option that allows you to run the API server using
the Go tests. This will be removed once the API can be properly initialized,
with data from the synchronizer and so on. To use this, run:
```shell
$ POSTGRES_PASS="yourpasswordhere" make test-api-server
```
FAKE_SERVER=yes POSTGRES_PASS=yourpasswordhere go test -timeout 0 ./api -p 1 -count 1 -v`
```
### Lint
All Pull Requests need to pass the configured linter.
To run the linter locally, first, install [golangci-lint](https://golangci-lint.run).
Afterward, you can check the lints with this command:
To run the linter locally, first install [golangci-lint](https://golangci-lint.run). Afterwards you can check the lints with this command:
```shell
$ make gocilint
```
golangci-lint run --timeout=5m -E whitespace -E gosec -E gci -E misspell -E gomnd -E gofmt -E goimports -E golint --exclude-use-default=false --max-same-issues 0
```
## Usage
@@ -87,13 +54,13 @@ See [cli/node/README.md](cli/node/README.md)
### Proof Server
The node in mode coordinator requires a proof server (a server capable of
calculating proofs from the zkInputs). There is a mock proof server CLI
at `test/proofserver/cli` for testing purposes.
The node in mode coordinator requires a proof server (a server that is capable
of calculating proofs from the zkInputs). For testing purposes there is a mock
proof server cli at `test/proofserver/cli`.
Usage of `test/proofserver/cli`:
```shell
```
USAGE:
go run ./test/proofserver/cli OPTIONS
@@ -104,19 +71,11 @@ OPTIONS:
proving time duration (default 2s)
```
Also, the Makefile commands can be used to run and stop the proof server
in the background:
```shell
$ make run-proof-mock
$ make stop-proof-mock
```
### `/tmp` as tmpfs
For every processed batch, the node builds a temporary exit tree in a key-value
DB stored in `/tmp`. It is highly recommended that `/tmp` is mounted as a RAM
file system in production to avoid unnecessary reads a writes to disk. This
file system in production to avoid unecessary reads an writes to disk. This
can be done by mounting `/tmp` as tmpfs; for example, by having this line in
`/etc/fstab`:
```

View File

@@ -5,7 +5,7 @@ import (
"strconv"
"testing"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure"

View File

@@ -7,7 +7,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub"
)

View File

@@ -2,19 +2,40 @@ package api
import (
"errors"
"sync"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/tracerr"
)
// TODO: Add correct values to constants
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Status define status of the network
type Status struct {
sync.RWMutex
NodeConfig NodeConfig `json:"nodeConfig"`
Network Network `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// API serves HTTP requests to allow external interaction with the Hermez node
type API struct {
h *historydb.HistoryDB
cg *configAPI
l2 *l2db.L2DB
status Status
chainID uint16
hermezAddress ethCommon.Address
}
@@ -25,6 +46,8 @@ func NewAPI(
server *gin.Engine,
hdb *historydb.HistoryDB,
l2db *l2db.L2DB,
config *Config,
nodeConfig *NodeConfig,
) (*API, error) {
// Check input
// TODO: is stateDB only needed for explorer endpoints or for both?
@@ -34,56 +57,54 @@ func NewAPI(
if explorerEndpoints && hdb == nil {
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
}
consts, err := hdb.GetConstants()
if err != nil {
return nil, err
}
a := &API{
h: hdb,
cg: &configAPI{
RollupConstants: *newRollupConstants(consts.Rollup),
AuctionConstants: consts.Auction,
WDelayerConstants: consts.WDelayer,
RollupConstants: *newRollupConstants(config.RollupConstants),
AuctionConstants: config.AuctionConstants,
WDelayerConstants: config.WDelayerConstants,
},
l2: l2db,
chainID: consts.ChainID,
hermezAddress: consts.HermezAddress,
status: Status{
NodeConfig: *nodeConfig,
},
chainID: config.ChainID,
hermezAddress: config.HermezAddress,
}
v1 := server.Group("/v1")
// Add coordinator endpoints
if coordinatorEndpoints {
// Account
v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
server.POST("/account-creation-authorization", a.postAccountCreationAuth)
server.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
// Transaction
v1.POST("/transactions-pool", a.postPoolTx)
v1.GET("/transactions-pool/:id", a.getPoolTx)
server.POST("/transactions-pool", a.postPoolTx)
server.GET("/transactions-pool/:id", a.getPoolTx)
}
// Add explorer endpoints
if explorerEndpoints {
// Account
v1.GET("/accounts", a.getAccounts)
v1.GET("/accounts/:accountIndex", a.getAccount)
v1.GET("/exits", a.getExits)
v1.GET("/exits/:batchNum/:accountIndex", a.getExit)
server.GET("/accounts", a.getAccounts)
server.GET("/accounts/:accountIndex", a.getAccount)
server.GET("/exits", a.getExits)
server.GET("/exits/:batchNum/:accountIndex", a.getExit)
// Transaction
v1.GET("/transactions-history", a.getHistoryTxs)
v1.GET("/transactions-history/:id", a.getHistoryTx)
server.GET("/transactions-history", a.getHistoryTxs)
server.GET("/transactions-history/:id", a.getHistoryTx)
// Status
v1.GET("/batches", a.getBatches)
v1.GET("/batches/:batchNum", a.getBatch)
v1.GET("/full-batches/:batchNum", a.getFullBatch)
v1.GET("/slots", a.getSlots)
v1.GET("/slots/:slotNum", a.getSlot)
v1.GET("/bids", a.getBids)
v1.GET("/state", a.getState)
v1.GET("/config", a.getConfig)
v1.GET("/tokens", a.getTokens)
v1.GET("/tokens/:id", a.getToken)
v1.GET("/coordinators", a.getCoordinators)
server.GET("/batches", a.getBatches)
server.GET("/batches/:batchNum", a.getBatch)
server.GET("/full-batches/:batchNum", a.getFullBatch)
server.GET("/slots", a.getSlots)
server.GET("/slots/:slotNum", a.getSlot)
server.GET("/bids", a.getBids)
server.GET("/state", a.getState)
server.GET("/config", a.getConfig)
server.GET("/tokens", a.getTokens)
server.GET("/tokens/:id", a.getToken)
server.GET("/coordinators", a.getCoordinators)
}
return a, nil

View File

@@ -19,7 +19,6 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
swagger "github.com/getkin/kin-openapi/openapi3filter"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
@@ -40,8 +39,8 @@ type Pendinger interface {
New() Pendinger
}
const apiPort = "4010"
const apiURL = "http://localhost:" + apiPort + "/v1/"
const apiAddr = ":4010"
const apiURL = "http://localhost" + apiAddr + "/"
var SetBlockchain = `
Type: Blockchain
@@ -181,13 +180,12 @@ type testCommon struct {
auctionVars common.AuctionVariables
rollupVars common.RollupVariables
wdelayerVars common.WDelayerVariables
nextForgers []historydb.NextForgerAPI
nextForgers []NextForger
}
var tc testCommon
var config configAPI
var api *API
var stateAPIUpdater *stateapiupdater.Updater
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
// emulating the task of the synchronizer in order to have data to be returned
@@ -208,8 +206,18 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
// StateDB
dir, err := ioutil.TempDir("", "tmpdb")
if err != nil {
panic(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
}()
// L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 1000.0, 24*time.Hour, apiConnCon)
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants)
chainID := uint16(0)
@@ -222,43 +230,22 @@ func TestMain(m *testing.M) {
// API
apiGin := gin.Default()
// Reset DB
test.WipeDB(hdb.DB())
constants := &historydb.Constants{
SCConsts: common.SCConsts{
Rollup: _config.RollupConstants,
Auction: _config.AuctionConstants,
WDelayer: _config.WDelayerConstants,
},
ChainID: chainID,
HermezAddress: _config.HermezAddress,
}
if err := hdb.SetConstants(constants); err != nil {
panic(err)
}
nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10,
MinFeeUSD: 0,
MaxFeeUSD: 10000000000,
}
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err)
}
api, err = NewAPI(
true,
true,
apiGin,
hdb,
l2DB,
&_config,
&NodeConfig{
ForgeDelay: 180,
},
)
if err != nil {
log.Error(err)
panic(err)
}
// Start server
listener, err := net.Listen("tcp", ":"+apiPort) //nolint:gosec
listener, err := net.Listen("tcp", apiAddr) //nolint:gosec
if err != nil {
panic(err)
}
@@ -270,6 +257,9 @@ func TestMain(m *testing.M) {
}
}()
// Reset DB
test.WipeDB(api.h.DB())
// Generate blockchain data with til
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{
@@ -316,7 +306,7 @@ func TestMain(m *testing.M) {
USD: &ethUSD,
USDUpdate: &ethNow,
})
err = api.h.UpdateTokenValue(common.EmptyAddr, ethUSD)
err = api.h.UpdateTokenValue(test.EthToken.Symbol, ethUSD)
if err != nil {
panic(err)
}
@@ -343,7 +333,7 @@ func TestMain(m *testing.M) {
token.USD = &value
token.USDUpdate = &now
// Set value in DB
err = api.h.UpdateTokenValue(token.EthAddr, value)
err = api.h.UpdateTokenValue(token.Symbol, value)
if err != nil {
panic(err)
}
@@ -470,19 +460,19 @@ func TestMain(m *testing.M) {
if err = api.h.AddBids(bids); err != nil {
panic(err)
}
bootForger := historydb.NextForgerAPI{
bootForger := NextForger{
Coordinator: historydb.CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
},
}
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
nextForgers := []historydb.NextForgerAPI{}
nextForgers := []NextForger{}
var initBlock int64 = 140
var deltaBlocks int64 = 40
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
fromBlock := initBlock + deltaBlocks*int64(i-1)
bootForger.Period = historydb.Period{
bootForger.Period = Period{
SlotNum: int64(i),
FromBlock: fromBlock,
ToBlock: fromBlock + deltaBlocks - 1,
@@ -522,13 +512,7 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000),
}
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
// Generate test data, as expected to be received/sended from/to the API
// Generate test data, as expected to be received/sent from/to the API
testCoords := genTestCoordinators(commonCoords)
testBids := genTestBids(commonBlocks, testCoords, bids)
testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
@@ -605,24 +589,27 @@ func TestMain(m *testing.M) {
if err := database.Close(); err != nil {
panic(err)
}
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
os.Exit(result)
}
func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
apiConnConTO := db.NewAPIConnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
require.NoError(t, err)
// L2DB
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 1000.0, 24*time.Hour, apiConnConTO)
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO)
// API
apiGinTO := gin.Default()
finishWait := make(chan interface{})
startWait := make(chan interface{})
apiGinTO.GET("/v1/wait", func(c *gin.Context) {
apiGinTO.GET("/wait", func(c *gin.Context) {
cancel, err := apiConnConTO.Acquire()
defer cancel()
require.NoError(t, err)
@@ -640,19 +627,24 @@ func TestTimeout(t *testing.T) {
require.NoError(t, err)
}
}()
_config := getConfigTest(0)
_, err = NewAPI(
true,
true,
apiGinTO,
hdbTO,
l2DBTO,
&_config,
&NodeConfig{
ForgeDelay: 180,
},
)
require.NoError(t, err)
client := &http.Client{}
httpReq, err := http.NewRequest("GET", "http://localhost:4444/v1/tokens", nil)
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil)
require.NoError(t, err)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/v1/wait", nil)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil)
require.NoError(t, err)
// Request that will get timed out
var wg sync.WaitGroup

View File

@@ -7,12 +7,10 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testBatch struct {
@@ -22,7 +20,7 @@ type testBatch struct {
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash"`
Timestamp time.Time `json:"timestamp"`
ForgerAddr ethCommon.Address `json:"forgerAddr"`
CollectedFees apitypes.CollectedFeesAPI `json:"collectedFees"`
CollectedFees map[common.TokenID]string `json:"collectedFees"`
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD"`
StateRoot string `json:"stateRoot"`
NumAccounts int `json:"numAccounts"`
@@ -75,9 +73,9 @@ func genTestBatches(
if !found {
panic("block not found")
}
collectedFees := apitypes.CollectedFeesAPI(make(map[common.TokenID]apitypes.BigIntStr))
collectedFees := make(map[common.TokenID]string)
for k, v := range cBatches[i].CollectedFees {
collectedFees[k] = *apitypes.NewBigIntStr(v)
collectedFees[k] = v.String()
}
forgedTxs := 0
for _, tx := range txs {
@@ -134,7 +132,7 @@ func TestGetBatches(t *testing.T) {
limit := 3
path := fmt.Sprintf("%s?limit=%d", endpoint, limit)
err := doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
assertBatches(t, tc.batches, fetchedBatches)
// minBatchNum
@@ -143,7 +141,7 @@ func TestGetBatches(t *testing.T) {
minBatchNum := tc.batches[len(tc.batches)/2].BatchNum
path = fmt.Sprintf("%s?minBatchNum=%d&limit=%d", endpoint, minBatchNum, limit)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
minBatchNumBatches := []testBatch{}
for i := 0; i < len(tc.batches); i++ {
if tc.batches[i].BatchNum > minBatchNum {
@@ -158,7 +156,7 @@ func TestGetBatches(t *testing.T) {
maxBatchNum := tc.batches[len(tc.batches)/2].BatchNum
path = fmt.Sprintf("%s?maxBatchNum=%d&limit=%d", endpoint, maxBatchNum, limit)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
maxBatchNumBatches := []testBatch{}
for i := 0; i < len(tc.batches); i++ {
if tc.batches[i].BatchNum < maxBatchNum {
@@ -173,7 +171,7 @@ func TestGetBatches(t *testing.T) {
slotNum := tc.batches[len(tc.batches)/2].SlotNum
path = fmt.Sprintf("%s?slotNum=%d&limit=%d", endpoint, slotNum, limit)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
slotNumBatches := []testBatch{}
for i := 0; i < len(tc.batches); i++ {
if tc.batches[i].SlotNum == slotNum {
@@ -188,7 +186,7 @@ func TestGetBatches(t *testing.T) {
forgerAddr := tc.batches[len(tc.batches)/2].ForgerAddr
path = fmt.Sprintf("%s?forgerAddr=%s&limit=%d", endpoint, forgerAddr.String(), limit)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
forgerAddrBatches := []testBatch{}
for i := 0; i < len(tc.batches); i++ {
if tc.batches[i].ForgerAddr == forgerAddr {
@@ -202,7 +200,7 @@ func TestGetBatches(t *testing.T) {
limit = 6
path = fmt.Sprintf("%s?limit=%d", endpoint, limit)
err = doGoodReqPaginated(path, historydb.OrderDesc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
flippedBatches := []testBatch{}
for i := len(tc.batches) - 1; i >= 0; i-- {
flippedBatches = append(flippedBatches, tc.batches[i])
@@ -216,7 +214,7 @@ func TestGetBatches(t *testing.T) {
minBatchNum = tc.batches[len(tc.batches)/4].BatchNum
path = fmt.Sprintf("%s?minBatchNum=%d&maxBatchNum=%d&limit=%d", endpoint, minBatchNum, maxBatchNum, limit)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
minMaxBatchNumBatches := []testBatch{}
for i := 0; i < len(tc.batches); i++ {
if tc.batches[i].BatchNum < maxBatchNum && tc.batches[i].BatchNum > minBatchNum {
@@ -229,25 +227,25 @@ func TestGetBatches(t *testing.T) {
fetchedBatches = []testBatch{}
path = fmt.Sprintf("%s?slotNum=%d&minBatchNum=%d", endpoint, 1, 25)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
require.NoError(t, err)
assert.NoError(t, err)
assertBatches(t, []testBatch{}, fetchedBatches)
// 400
// Invalid minBatchNum
path = fmt.Sprintf("%s?minBatchNum=%d", endpoint, -2)
err = doBadReq("GET", path, nil, 400)
require.NoError(t, err)
assert.NoError(t, err)
// Invalid forgerAddr
path = fmt.Sprintf("%s?forgerAddr=%s", endpoint, "0xG0000001")
err = doBadReq("GET", path, nil, 400)
require.NoError(t, err)
assert.NoError(t, err)
}
func TestGetBatch(t *testing.T) {
endpoint := apiURL + "batches/"
for _, batch := range tc.batches {
fetchedBatch := testBatch{}
require.NoError(
assert.NoError(
t, doGoodReq(
"GET",
endpoint+strconv.Itoa(int(batch.BatchNum)),
@@ -257,16 +255,16 @@ func TestGetBatch(t *testing.T) {
assertBatch(t, batch, fetchedBatch)
}
// 400
require.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
// 404
require.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
}
func TestGetFullBatch(t *testing.T) {
endpoint := apiURL + "full-batches/"
for _, fullBatch := range tc.fullBatches {
fetchedFullBatch := testFullBatch{}
require.NoError(
assert.NoError(
t, doGoodReq(
"GET",
endpoint+strconv.Itoa(int(fullBatch.Batch.BatchNum)),
@@ -277,9 +275,9 @@ func TestGetFullBatch(t *testing.T) {
assertTxs(t, fullBatch.Txs, fetchedFullBatch.Txs)
}
// 400
require.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
// 404
require.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
}
func assertBatches(t *testing.T, expected, actual []testBatch) {

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure"

View File

@@ -99,9 +99,7 @@ func TestGetSlot(t *testing.T) {
nil, &fetchedSlot,
),
)
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars)
assertSlot(t, emptySlot, fetchedSlot)
// Invalid slotNum
@@ -129,10 +127,8 @@ func TestGetSlots(t *testing.T) {
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
assert.NoError(t, err)
allSlots := tc.slots
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars)
emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars)
allSlots = append(allSlots, emptySlot)
}
assertSlots(t, allSlots, fetchedSlots)

View File

@@ -1,16 +1,320 @@
package api
import (
"database/sql"
"fmt"
"math"
"math/big"
"net/http"
"time"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr"
)
// Network define status of the network
type Network struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *historydb.BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
}
// NodeConfig is the configuration of the node that is exposed via API
type NodeConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// NextForger is a representation of the information of a coordinator and the period will forge
type NextForger struct {
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// Period is a representation of a period
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
func (a *API) getState(c *gin.Context) {
stateAPI, err := a.h.GetStateAPI()
// TODO: There are no events for the buckets information, so now this information will be 0
a.status.RLock()
status := a.status //nolint
a.status.RUnlock()
c.JSON(http.StatusOK, status) //nolint
}
// SC Vars
// SetRollupVariables set Status.Rollup variables
func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
a.status.Lock()
var rollupVAPI historydb.RollupVariablesAPI
rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
for i, bucket := range rollupVariables.Buckets {
var apiBucket historydb.BucketParamsAPI
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
rollupVAPI.Buckets[i] = apiBucket
}
rollupVAPI.SafeMode = rollupVariables.SafeMode
a.status.Rollup = rollupVAPI
a.status.Unlock()
}
// SetWDelayerVariables set Status.WithdrawalDelayer variables
func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) {
a.status.Lock()
a.status.WithdrawalDelayer = wDelayerVariables
a.status.Unlock()
}
// SetAuctionVariables set Status.Auction variables
func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) {
a.status.Lock()
var auctionAPI historydb.AuctionVariablesAPI
auctionAPI.EthBlockNum = auctionVariables.EthBlockNum
auctionAPI.DonationAddress = auctionVariables.DonationAddress
auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
auctionAPI.Outbidding = auctionVariables.Outbidding
auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionAPI.AllocationRatio[i] = ratio
}
a.status.Auction = auctionAPI
a.status.Unlock()
}
// Network
// UpdateNetworkInfoBlock update Status.Network block related information
func (a *API) UpdateNetworkInfoBlock(
lastEthBlock, lastSyncBlock common.Block,
) {
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
}
// UpdateNetworkInfo update Status.Network information
func (a *API) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64,
) error {
lastBatch, err := a.h.GetBatchAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil
} else if err != nil {
return tracerr.Wrap(err)
}
lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots)
nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil
} else if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastBatch = lastBatch
a.status.Network.CurrentSlot = currentSlot
a.status.Network.NextForgers = nextForgers
// Update buckets withdrawals
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
if tracerr.Unwrap(err) == sql.ErrNoRows {
bucketsUpdate = nil
} else if err != nil {
return tracerr.Wrap(err)
}
for i, bucketParams := range a.status.Rollup.Buckets {
for _, bucketUpdate := range bucketsUpdate {
if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals
a.status.Rollup.Buckets[i] = bucketParams
break
}
}
}
a.status.Unlock()
return nil
}
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
var slots [6]*big.Int
for i, slot := range defaultSlotSetBid {
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
if !ok {
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
}
slots[i] = bigInt
}
return slots, nil
}
// getNextForgers returns next forgers
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := a.h.GetBestBidsAPI(&currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForger{}
// Get min bid info
var minBidInfo []historydb.MinBidInfo
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
if err != nil {
retBadReq(err, c)
return
return nil, tracerr.Wrap(err)
}
c.JSON(http.StatusOK, stateAPI)
minBidInfo = []historydb.MinBidInfo{{
DefaultSlotSetBid: bigIntSlots,
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
nextForger := NextForger{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = historydb.CoordinatorAPI{
Forger: a.status.Auction.BootCoordinator,
URL: a.status.Auction.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// Metrics
// UpdateMetrics update Status.Metrics information
func (a *API) UpdateMetrics() error {
a.status.RLock()
if a.status.Network.LastBatch == nil {
a.status.RUnlock()
return nil
}
batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock()
metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Metrics = *metrics
a.status.Unlock()
return nil
}
// Recommended fee
// UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil {
return tracerr.Wrap(err)
}
var minFeeUSD float64
if a.l2 != nil {
minFeeUSD = a.l2.MinFeeUSD()
}
a.status.Lock()
a.status.RecommendedFee.ExistingAccount =
math.Max(feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.Unlock()
return nil
}

View File

@@ -4,7 +4,7 @@ import (
"math/big"
"testing"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/stretchr/testify/assert"
@@ -13,7 +13,7 @@ import (
type testStatus struct {
Network testNetwork `json:"network"`
Metrics historydb.MetricsAPI `json:"metrics"`
Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@@ -25,15 +25,14 @@ type testNetwork struct {
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
NextForgers []NextForger `json:"nextForgers"`
}
func TestSetRollupVariables(t *testing.T) {
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars})
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
rollupVars := &common.RollupVariables{}
assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true)
api.SetRollupVariables(tc.rollupVars)
assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true)
}
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
@@ -52,19 +51,17 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
}
func TestSetWDelayerVariables(t *testing.T) {
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars})
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
wdelayerVars := &common.WDelayerVariables{}
assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer)
api.SetWDelayerVariables(tc.wdelayerVars)
assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer)
}
func TestSetAuctionVariables(t *testing.T) {
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars})
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
auctionVars := &common.AuctionVariables{}
assertEqualAuctionVariables(t, *auctionVars, api.status.Auction)
api.SetAuctionVariables(tc.auctionVars)
assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction)
}
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
@@ -88,6 +85,11 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
}
func TestUpdateNetworkInfo(t *testing.T) {
status := &Network{}
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1)
@@ -116,17 +118,14 @@ func TestUpdateNetworkInfo(t *testing.T) {
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
require.NoError(t, err)
err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock)
assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum)
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err)
assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock)
assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum)
assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot)
assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers))
assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
}
func TestUpdateMetrics(t *testing.T) {
@@ -134,62 +133,51 @@ func TestUpdateMetrics(t *testing.T) {
lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1)
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err)
err = stateAPIUpdater.UpdateMetrics()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TokenAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.Wallets, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
err = api.UpdateMetrics()
assert.NoError(t, err)
assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0))
assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0))
assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0))
assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0))
}
func TestUpdateRecommendedFee(t *testing.T) {
err := stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
err := api.UpdateRecommendedFee()
assert.NoError(t, err)
var minFeeUSD float64
if api.l2 != nil {
minFeeUSD = api.l2.MinFeeUSD()
}
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
ni.StateAPI.RecommendedFee.ExistingAccount*
historydb.CreateAccountExtraFeePercentage)
assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountInternal,
ni.StateAPI.RecommendedFee.ExistingAccount*
historydb.CreateAccountInternalExtraFeePercentage)
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, minFeeUSD)
assert.Equal(t, api.status.RecommendedFee.CreatesAccount,
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister,
api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
}
func TestGetState(t *testing.T) {
lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1)
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{
Rollup: &tc.rollupVars,
Auction: &tc.auctionVars,
WDelayer: &tc.wdelayerVars,
})
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err)
err = stateAPIUpdater.UpdateMetrics()
require.NoError(t, err)
err = stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
api.SetRollupVariables(tc.rollupVars)
api.SetWDelayerVariables(tc.wdelayerVars)
api.SetAuctionVariables(tc.auctionVars)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err)
err = api.UpdateMetrics()
assert.NoError(t, err)
err = api.UpdateRecommendedFee()
assert.NoError(t, err)
endpoint := apiURL + "state"
var status testStatus
require.NoError(t, doGoodReq("GET", endpoint, nil, &status))
assert.NoError(t, doGoodReq("GET", endpoint, nil, &status))
// SC vars
// UpdateNetworkInfo will overwrite buckets withdrawal values
@@ -210,21 +198,19 @@ func TestGetState(t *testing.T) {
assert.Greater(t, status.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, status.Metrics.BatchFrequency, float64(0))
assert.Greater(t, status.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, status.Metrics.TokenAccounts, int64(0))
assert.Greater(t, status.Metrics.Wallets, int64(0))
assert.Greater(t, status.Metrics.TotalAccounts, int64(0))
assert.Greater(t, status.Metrics.TotalBJJs, int64(0))
assert.Greater(t, status.Metrics.AvgTransactionFee, float64(0))
// Recommended fee
// TODO: perform real asserts (not just greater than 0)
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
assert.Equal(t, status.RecommendedFee.CreatesAccount,
status.RecommendedFee.ExistingAccount*
historydb.CreateAccountExtraFeePercentage)
assert.Equal(t, status.RecommendedFee.CreatesAccountInternal,
status.RecommendedFee.ExistingAccount*
historydb.CreateAccountInternalExtraFeePercentage)
status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
}
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
func assertNextForgers(t *testing.T, expected, actual []NextForger) {
assert.Equal(t, len(expected), len(actual))
for i := range expected {
// ignore timestamps and other metadata

View File

@@ -1,162 +0,0 @@
package stateapiupdater
import (
"database/sql"
"sync"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr"
)
// Updater is an utility object to facilitate updating the StateAPI
type Updater struct {
hdb *historydb.HistoryDB
state historydb.StateAPI
config historydb.NodeConfig
vars common.SCVariablesPtr
consts historydb.Constants
rw sync.RWMutex
}
// NewUpdater creates a new Updater
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants) *Updater {
u := Updater{
hdb: hdb,
config: *config,
consts: *consts,
state: historydb.StateAPI{
NodePublicInfo: historydb.NodePublicInfo{
ForgeDelay: config.ForgeDelay,
},
},
}
u.SetSCVars(vars.AsPtr())
return &u
}
// Store the State in the HistoryDB
func (u *Updater) Store() error {
u.rw.RLock()
defer u.rw.RUnlock()
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state))
}
// SetSCVars sets the smart contract vars (ony updates those that are not nil)
func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
u.rw.Lock()
defer u.rw.Unlock()
if vars.Rollup != nil {
u.vars.Rollup = vars.Rollup
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
u.state.Rollup = *rollupVars
}
if vars.Auction != nil {
u.vars.Auction = vars.Auction
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
u.state.Auction = *auctionVars
}
if vars.WDelayer != nil {
u.vars.WDelayer = vars.WDelayer
u.state.WithdrawalDelayer = *u.vars.WDelayer
}
}
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *Updater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
}
// UpdateMetrics update Status.Metrics information
func (u *Updater) UpdateMetrics() error {
u.rw.RLock()
lastBatch := u.state.Network.LastBatch
u.rw.RUnlock()
if lastBatch == nil {
return nil
}
lastBatchNum := lastBatch.BatchNum
metrics, poolLoad, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.Metrics = *metrics
u.state.NodePublicInfo.PoolLoad = poolLoad
u.rw.Unlock()
return nil
}
// UpdateNetworkInfoBlock update Status.Network block related information
func (u *Updater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
u.rw.Lock()
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.rw.Unlock()
}
// UpdateNetworkInfo update Status.Network information
func (u *Updater) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64,
) error {
// Get last batch in API format
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil
} else if err != nil {
return tracerr.Wrap(err)
}
u.rw.RLock()
auctionVars := u.vars.Auction
u.rw.RUnlock()
// Get next forgers
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil
} else if err != nil {
return tracerr.Wrap(err)
}
bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
if err == sql.ErrNoRows {
bucketUpdates = nil
} else if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
// Update NodeInfo struct
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals
u.state.Rollup.Buckets[i] = bucketParams
break
}
}
}
// Update pending L1s
pendingL1s, err := u.hdb.GetUnforgedL1UserTxsCount()
if err != nil {
return tracerr.Wrap(err)
}
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers
u.state.Network.PendingL1Txs = pendingL1s
u.rw.Unlock()
return nil
}

View File

@@ -59,10 +59,10 @@ externalDocs:
description: Find out more about Hermez network.
url: 'https://hermez.io'
servers:
- description: Testnet api, connected to Rinkeby deployment
url: https://api.testnet.hermez.io/v1
- description: Localhost usefull for testing/developing the api
url: http://localhost:4010/v1
- description: Hosted mock up
url: https://apimock.hermez.network
- description: Localhost mock Up
url: http://localhost:4010
tags:
- name: Coordinator
description: Endpoints used by the nodes running in coordinator mode. They are used to interact with the network.
@@ -2569,9 +2569,9 @@ components:
description: List of next coordinators to forge.
items:
$ref: '#/components/schemas/NextForger'
Node:
NodeConfig:
type: object
description: Configuration and metrics of the coordinator node. Note that this is specific for each coordinator.
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
properties:
forgeDelay:
type: number
@@ -2581,14 +2581,9 @@ components:
forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value.
example: 193.4
poolLoad:
type: number
description: Number of pending transactions in the pool
example: 23201
additionalProperties: false
required:
- forgeDelay
- poolLoad
State:
type: object
description: Gobal variables of the network
@@ -2605,8 +2600,8 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee:
$ref: '#/components/schemas/RecommendedFee'
node:
$ref: '#/components/schemas/Node'
nodeConfig:
$ref: '#/components/schemas/NodeConfig'
additionalProperties: false
required:
- network
@@ -2615,7 +2610,7 @@ components:
- auction
- withdrawalDelayer
- recommendedFee
- node
- nodeConfig
StateNetwork:
type: object
description: Gobal statistics of the network
@@ -2639,10 +2634,6 @@ components:
- example: 2334
nextForgers:
$ref: '#/components/schemas/NextForgers'
pendingL1Transactions:
type: number
description: Number of pending L1 transactions (added in the smart contract queue but not forged).
example: 22
additionalProperties: false
required:
- lastEthereumBlock
@@ -2818,11 +2809,11 @@ components:
type: number
description: Average transactions per second in the last 24 hours.
example: 302.3
tokenAccounts:
totalAccounts:
type: integer
description: Number of created accounts.
example: 90473
wallets:
totalBJJs:
type: integer
description: Number of different registered BJJs.
example: 23067
@@ -2839,8 +2830,8 @@ components:
- transactionsPerBatch
- batchFrequency
- transactionsPerSecond
- tokenAccounts
- wallets
- totalAccounts
- totalBJJs
- avgTransactionFee
- estimatedTimeToForgeL1
PendingItems:

View File

@@ -8,7 +8,7 @@ import (
"testing"
"time"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/test"

View File

@@ -8,7 +8,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/tracerr"
@@ -179,7 +179,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
// Get public key
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(fmt.Errorf("Error getting from account: %w", err))
return tracerr.Wrap(err)
}
// Validate TokenID
if poolTx.TokenID != account.TokenID {

View File

@@ -4,6 +4,7 @@ import (
"database/sql/driver"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
@@ -18,10 +19,7 @@ import (
// BigIntStr is used to scan/value *big.Int directly into strings from/to sql DBs.
// It assumes that *big.Int are inserted/fetched to/from the DB using the BigIntMeddler meddler
// defined at github.com/hermeznetwork/hermez-node/db. Since *big.Int is
// stored as DECIMAL in SQL, there's no need to implement Scan()/Value()
// because DECIMALS are encoded/decoded as strings by the sql driver, and
// BigIntStr is already a string.
// defined at github.com/hermeznetwork/hermez-node/db
type BigIntStr string
// NewBigIntStr creates a *BigIntStr from a *big.Int.
@@ -34,6 +32,34 @@ func NewBigIntStr(bigInt *big.Int) *BigIntStr {
return &bigIntStr
}
// Scan implements Scanner for database/sql
func (b *BigIntStr) Scan(src interface{}) error {
srcBytes, ok := src.([]byte)
if !ok {
return tracerr.Wrap(fmt.Errorf("can't scan %T into apitypes.BigIntStr", src))
}
// bytes to *big.Int
bigInt := new(big.Int).SetBytes(srcBytes)
// *big.Int to BigIntStr
bigIntStr := NewBigIntStr(bigInt)
if bigIntStr == nil {
return nil
}
*b = *bigIntStr
return nil
}
// Value implements valuer for database/sql
func (b BigIntStr) Value() (driver.Value, error) {
// string to *big.Int
bigInt, ok := new(big.Int).SetString(string(b), 10)
if !ok || bigInt == nil {
return nil, tracerr.Wrap(errors.New("invalid representation of a *big.Int"))
}
// *big.Int to bytes
return bigInt.Bytes(), nil
}
// StrBigInt is used to unmarshal BigIntStr directly into an alias of big.Int
type StrBigInt big.Int
@@ -47,16 +73,22 @@ func (s *StrBigInt) UnmarshalText(text []byte) error {
return nil
}
// CollectedFeesAPI is send common.batch.CollectedFee through the API
type CollectedFeesAPI map[common.TokenID]BigIntStr
// CollectedFees is used to retrieve common.batch.CollectedFee from the DB
type CollectedFees map[common.TokenID]BigIntStr
// NewCollectedFeesAPI creates a new CollectedFeesAPI from a *big.Int map
func NewCollectedFeesAPI(m map[common.TokenID]*big.Int) CollectedFeesAPI {
c := CollectedFeesAPI(make(map[common.TokenID]BigIntStr))
for k, v := range m {
c[k] = *NewBigIntStr(v)
// UnmarshalJSON unmarshals a json representation of map[common.TokenID]*big.Int
func (c *CollectedFees) UnmarshalJSON(text []byte) error {
bigIntMap := make(map[common.TokenID]*big.Int)
if err := json.Unmarshal(text, &bigIntMap); err != nil {
return tracerr.Wrap(err)
}
return c
*c = CollectedFees(make(map[common.TokenID]BigIntStr))
for k, v := range bigIntMap {
bStr := NewBigIntStr(v)
(CollectedFees(*c)[k]) = *bStr
}
// *c = CollectedFees(bStrMap)
return nil
}
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez format (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.

View File

@@ -8,7 +8,7 @@ The `hermez-node` has been tested with go version 1.14
## Usage
```shell
```
NAME:
hermez-node - A new cli application
@@ -16,18 +16,18 @@ USAGE:
node [global options] command [command options] [arguments...]
VERSION:
v0.1.0-6-gd8a50c5
0.1.0-alpha
COMMANDS:
version Show the application version
importkey Import ethereum private key
genbjj Generate a new BabyJubJub key
wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state
wipesql Wipe the SQL DB (HistoryDB and L2DB), leaving the DB in a clean state
run Run the hermez-node in the indicated mode
discard Discard blocks up to a specified block number
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--mode MODE Set node MODE (can be "sync" or "coord")
--cfg FILE Node configuration FILE
--help, -h show help (default: false)
--version, -v print the version (default: false)
```
@@ -75,7 +75,7 @@ when running the coordinator in sync mode
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```shell
```
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
@@ -83,7 +83,7 @@ Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```shell
```
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
@@ -98,40 +98,35 @@ run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer:
```shell
./node run --mode sync --cfg cfg.buidler.toml
```
./node --mode sync --cfg cfg.buidler.toml run
```
Run the node in mode coordinator:
```shell
./node run --mode coord --cfg cfg.buidler.toml
```
./node --mode coord --cfg cfg.buidler.toml run
```
Import an ethereum private key into the keystore:
```shell
./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
```
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
```
Generate a new BabyJubJub key pair:
```shell
./node genbjj
```
Check the binary version:
```shell
./node version
./node --mode coord --cfg cfg.buidler.toml genbjj
```
Wipe the entier SQL database (this will destroy all synchronized and pool
data):
```shell
./node wipesql --mode coord --cfg cfg.buidler.toml
```
./node --mode coord --cfg cfg.buidler.toml wipesql
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```shell
./node discard --mode coord --cfg cfg.buidler.toml --block 8061330
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
```

View File

@@ -1,24 +0,0 @@
[API]
Address = "localhost:8386"
Explorer = true
MaxSQLConnections = 10
SQLConnectionTimeout = "2s"
[PostgreSQL]
PortWrite = 5432
HostWrite = "localhost"
UserWrite = "hermez"
PasswordWrite = "yourpasswordhere"
NameWrite = "hermez"
[Coordinator.L2DB]
SafetyPeriod = 10
MaxTxs = 512
TTL = "24h"
PurgeBatchDelay = 10
InvalidateBatchDelay = 20
PurgeBlockDelay = 10
InvalidateBlockDelay = 20
[Coordinator.API]
Coordinator = true

View File

@@ -8,31 +8,10 @@ SQLConnectionTimeout = "2s"
[PriceUpdater]
Interval = "10s"
URLBitfinexV2 = "https://api-pub.bitfinex.com/v2/"
URLCoinGeckoV3 = "https://api.coingecko.com/api/v3/"
# Available update methods:
# - coingeckoV3 (recommended): get price by SC addr using coingecko API
# - bitfinexV2: get price by token symbol using bitfinex API
# - static (recommended for blacklisting tokens): use the given StaticValue to set the price (if not provided 0 will be used)
# - ignore: don't update the price leave it as it is on the DB
DefaultUpdateMethod = "coingeckoV3" # Update method used for all the tokens registered on the network, and not listed in [[PriceUpdater.TokensConfig]]
[[PriceUpdater.TokensConfig]]
UpdateMethod = "bitfinexV2"
Symbol = "USDT"
Addr = "0xdac17f958d2ee523a2206206994597c13d831ec7"
[[PriceUpdater.TokensConfig]]
UpdateMethod = "coingeckoV3"
Symbol = "ETH"
Addr = "0x0000000000000000000000000000000000000000"
[[PriceUpdater.TokensConfig]]
UpdateMethod = "static"
Symbol = "UNI"
Addr = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"
StaticValue = 30.12
[[PriceUpdater.TokensConfig]]
UpdateMethod = "ignore"
Symbol = "SUSHI"
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
URL = "https://api-pub.bitfinex.com/v2/"
Type = "bitfinexV2"
# URL = "https://api.coingecko.com/api/v3/"
# Type = "coingeckoV3"
[Debug]
APIAddress = "localhost:12345"
@@ -86,8 +65,6 @@ SyncRetryInterval = "1s"
ForgeDelay = "10s"
ForgeNoTxsDelay = "0s"
PurgeByExtDelInterval = "1m"
MustForgeAtSlotDeadline = true
IgnoreSlotCommitment = false
[Coordinator.FeeAccount]
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
@@ -99,7 +76,6 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
SafetyPeriod = 10
MaxTxs = 512
MinFeeUSD = 0.0
MaxFeeUSD = 50.0
TTL = "24h"
PurgeBatchDelay = 10
InvalidateBatchDelay = 20

View File

@@ -34,22 +34,6 @@ const (
modeCoord = "coord"
)
var (
// Version represents the program based on the git tag
Version = "v0.1.0"
// Build represents the program based on the git commit
Build = "dev"
// Date represents the date of application was built
Date = ""
)
func cmdVersion(c *cli.Context) error {
fmt.Printf("Version = \"%v\"\n", Version)
fmt.Printf("Build = \"%v\"\n", Build)
fmt.Printf("Date = \"%v\"\n", Date)
return nil
}
func cmdGenBJJ(c *cli.Context) error {
sk := babyjub.NewRandPrivKey()
skBuf := [32]byte(sk)
@@ -212,7 +196,17 @@ func cmdWipeSQL(c *cli.Context) error {
return nil
}
func waitSigInt() {
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
stopCh := make(chan interface{})
// catch ^C to send the stop signal
@@ -233,40 +227,11 @@ func waitSigInt() {
}
}()
<-stopCh
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
waitSigInt()
node.Stop()
return nil
}
func cmdServeAPI(c *cli.Context) error {
cfg, err := parseCliAPIServer(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
}
srv.Start()
waitSigInt()
srv.Stop()
return nil
}
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
@@ -318,7 +283,6 @@ func cmdDiscard(c *cli.Context) error {
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)
@@ -355,59 +319,20 @@ func getConfig(c *cli.Context) (*Config, error) {
var cfg Config
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
if nodeCfgPath == "" {
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
}
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.node, err = config.LoadNode(nodeCfgPath, false)
cfg.node, err = config.LoadNode(nodeCfgPath)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.node, err = config.LoadNode(nodeCfgPath, true)
if err != nil {
return nil, tracerr.Wrap(err)
}
default:
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
}
return &cfg, nil
}
// ConfigAPIServer is the configuration of the api server execution
type ConfigAPIServer struct {
mode node.Mode
server *config.APIServer
}
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
cfg, err := getConfigAPIServer(c)
if err != nil {
if err := cli.ShowAppHelp(c); err != nil {
panic(err)
}
return nil, tracerr.Wrap(err)
}
return cfg, nil
}
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
var cfg ConfigAPIServer
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
cfg.node, err = config.LoadCoordinator(nodeCfgPath)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -421,8 +346,8 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
func main() {
app := cli.NewApp()
app.Name = "hermez-node"
app.Version = Version
flags := []cli.Flag{
app.Version = "0.1.0-alpha"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: flagMode,
Usage: fmt.Sprintf("Set node `MODE` (can be \"%v\" or \"%v\")", modeSync, modeCoord),
@@ -436,23 +361,17 @@ func main() {
}
app.Commands = []*cli.Command{
{
Name: "version",
Aliases: []string{},
Usage: "Show the application version and build",
Action: cmdVersion,
},
{
Name: "importkey",
Aliases: []string{},
Usage: "Import ethereum private key",
Action: cmdImportKey,
Flags: append(flags,
Flags: []cli.Flag{
&cli.StringFlag{
Name: flagSK,
Usage: "ethereum `PRIVATE_KEY` in hex",
Required: true,
}),
}},
},
{
Name: "genbjj",
@@ -466,37 +385,30 @@ func main() {
Usage: "Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, " +
"leaving the DB in a clean state",
Action: cmdWipeSQL,
Flags: append(flags,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: flagYes,
Usage: "automatic yes to the prompt",
Required: false,
}),
}},
},
{
Name: "run",
Aliases: []string{},
Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun,
Flags: flags,
},
{
Name: "serveapi",
Aliases: []string{},
Usage: "Serve the API only",
Action: cmdServeAPI,
},
{
Name: "discard",
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: append(flags,
Flags: []cli.Flag{
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}),
}},
},
}

View File

@@ -11,15 +11,15 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub"
)
const (
// AccountCreationAuthMsg is the message that is signed to authorize a
// Hermez account creation
AccountCreationAuthMsg = "Account creation"
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez
// account creation
const AccountCreationAuthMsg = "Account creation"
// EIP712Version is the used version of the EIP-712
EIP712Version = "1"
const EIP712Version = "1"
// EIP712Provider defines the Provider for the EIP-712
EIP712Provider = "Hermez Network"
)
const EIP712Provider = "Hermez Network"
var (
// EmptyEthSignature is an ethereum signature of all zeroes

View File

@@ -1,33 +0,0 @@
package common
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup RollupVariables `validate:"required"`
Auction AuctionVariables `validate:"required"`
WDelayer WDelayerVariables `validate:"required"`
}
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
// original SCVariables
func (v *SCVariables) AsPtr() *SCVariablesPtr {
return &SCVariablesPtr{
Rollup: &v.Rollup,
Auction: &v.Auction,
WDelayer: &v.WDelayer,
}
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *RollupVariables `validate:"required"`
Auction *AuctionVariables `validate:"required"`
WDelayer *WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup RollupConstants
Auction AuctionConstants
WDelayer WDelayerConstants
}

View File

@@ -24,7 +24,7 @@ var FeeFactorLsh60 [256]*big.Int
type RecommendedFee struct {
ExistingAccount float64 `json:"existingAccount"`
CreatesAccount float64 `json:"createAccount"`
CreatesAccountInternal float64 `json:"createAccountInternal"`
CreatesAccountAndRegister float64 `json:"createAccountInternal"`
}
// FeeSelector is used to select a percentage from the FeePlan.

View File

@@ -9,7 +9,6 @@ import (
"github.com/BurntSushi/toml"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub"
"gopkg.in/go-playground/validator.v9"
@@ -45,13 +44,6 @@ type ForgeBatchGasCost struct {
L2Tx uint64 `validate:"required"`
}
// CoordinatorAPI specifies the configuration parameters of the API in mode
// coordinator
type CoordinatorAPI struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
}
// Coordinator is the coordinator specific configuration.
type Coordinator struct {
// ForgerAddress is the address under which this coordinator is forging
@@ -109,20 +101,6 @@ type Coordinator struct {
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay Duration `validate:"-"`
// MustForgeAtSlotDeadline enables the coordinator to forge slots if
// the empty slots reach the slot deadline.
MustForgeAtSlotDeadline bool
// IgnoreSlotCommitment disables forcing the coordinator to forge a
// slot immediately when the slot is not committed. If set to false,
// the coordinator will immediately forge a batch at the beginning of a
// slot if it's the slot winner.
IgnoreSlotCommitment bool
// ForgeOncePerSlotIfTxs will make the coordinator forge at most one
// batch per slot, only if there are included txs in that batch, or
// pending l1UserTxs in the smart contract. Setting this parameter
// overrides `ForgeDelay`, `ForgeNoTxsDelay`, `MustForgeAtSlotDeadline`
// and `IgnoreSlotCommitment`.
ForgeOncePerSlotIfTxs bool
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"`
@@ -144,10 +122,6 @@ type Coordinator struct {
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// MaxFeeUSD is the maximum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with greater than
// maximum fee will be rejected at the API level.
MaxFeeUSD float64 `validate:"required"`
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"`
@@ -223,7 +197,10 @@ type Coordinator struct {
// ForgeBatch transaction.
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
} `validate:"required"`
API CoordinatorAPI `validate:"required"`
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
Debug struct {
// BatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline
@@ -238,10 +215,26 @@ type Coordinator struct {
}
}
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
// diferentiated SQL connections for read/write. If the read configuration is
// not provided, the write one it's going to be used for both reads and writes
type PostgreSQL struct {
// Node is the hermez node configuration.
type Node struct {
PriceUpdater struct {
// Interval between price updater calls
Interval Duration `valudate:"required"`
// URL of the token prices provider
URL string `valudate:"required"`
// Type of the API of the token prices provider
Type string `valudate:"required"`
} `validate:"required"`
StateDB struct {
// Path where the synchronizer StateDB is stored
Path string `validate:"required"`
// Keep is the number of checkpoints to keep
Keep int `validate:"required"`
} `validate:"required"`
// It's possible to use diferentiated SQL connections for read/write.
// If the read configuration is not provided, the write one it's going to be used
// for both reads and writes
PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
@@ -262,42 +255,7 @@ type PostgreSQL struct {
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
}
// NodeDebug specifies debug configuration parameters
type NodeDebug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
// Node is the hermez node configuration.
type Node struct {
PriceUpdater struct {
// Interval between price updater calls
Interval Duration `validate:"required"`
// URLBitfinexV2 is the URL of bitfinex V2 API
URLBitfinexV2 string `validate:"required"`
// URLCoinGeckoV3 is the URL of coingecko V3 API
URLCoinGeckoV3 string `validate:"required"`
// DefaultUpdateMethod to get token prices
DefaultUpdateMethod priceupdater.UpdateMethodType `validate:"required"`
// TokensConfig to specify how each token get it's price updated
TokensConfig []priceupdater.TokenConfig
} `validate:"required"`
StateDB struct {
// Path where the synchronizer StateDB is stored
Path string `validate:"required"`
// Keep is the number of checkpoints to keep
Keep int `validate:"required"`
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server
URL string `validate:"required"`
@@ -328,7 +286,6 @@ type Node struct {
// TokenHEZ address
TokenHEZName string `validate:"required"`
} `validate:"required"`
// API specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string
@@ -346,49 +303,20 @@ type Node struct {
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
Debug NodeDebug `validate:"required"`
Debug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
Coordinator Coordinator `validate:"-"`
}
// APIServer is the api server configuration parameters
type APIServer struct {
// NodeAPI specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string `validate:"required"`
// Explorer enables the Explorer API endpoints
Explorer bool
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Coordinator struct {
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
L2DB struct {
// MaxTxs is the maximum number of pending L2Txs that can be
// stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// MaxFeeUSD is the maximum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with greater than
// maximum fee will be rejected at the API level.
MaxFeeUSD float64 `validate:"required"`
} `validate:"required"`
}
Debug NodeDebug `validate:"required"`
}
// Load loads a generic config.
func Load(path string, cfg interface{}) error {
bs, err := ioutil.ReadFile(path) //nolint:gosec
@@ -402,8 +330,8 @@ func Load(path string, cfg interface{}) error {
return nil
}
// LoadNode loads the Node configuration from path.
func LoadNode(path string, coordinator bool) (*Node, error) {
// LoadCoordinator loads the Coordinator configuration from path.
func LoadCoordinator(path string) (*Node, error) {
var cfg Node
if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
@@ -412,28 +340,21 @@ func LoadNode(path string, coordinator bool) (*Node, error) {
if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil
}
// LoadAPIServer loads the APIServer configuration from path.
func LoadAPIServer(path string, coordinator bool) (*APIServer, error) {
var cfg APIServer
// LoadNode loads the Node configuration from path.
func LoadNode(path string) (*Node, error) {
var cfg Node
if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
}
validate := validator.New()
if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil
}

View File

@@ -85,7 +85,7 @@ type BatchInfo struct {
PublicInputs []*big.Int
L1Batch bool
VerifierIdx uint8
L1UserTxs []common.L1Tx
L1UserTxsExtra []common.L1Tx
L1CoordTxs []common.L1Tx
L1CoordinatorTxsAuths [][]byte
L2Txs []common.L2Tx

View File

@@ -25,7 +25,9 @@ import (
var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errSkipBatchByPolicy = fmt.Errorf("skip batch by policy")
errForgeNoTxsBeforeDelay = fmt.Errorf(
"no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
)
const (
@@ -82,20 +84,6 @@ type Config struct {
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay time.Duration
// MustForgeAtSlotDeadline enables the coordinator to forge slots if
// the empty slots reach the slot deadline.
MustForgeAtSlotDeadline bool
// IgnoreSlotCommitment disables forcing the coordinator to forge a
// slot immediately when the slot is not committed. If set to false,
// the coordinator will immediately forge a batch at the beginning of
// a slot if it's the slot winner.
IgnoreSlotCommitment bool
// ForgeOncePerSlotIfTxs will make the coordinator forge at most one
// batch per slot, only if there are included txs in that batch, or
// pending l1UserTxs in the smart contract. Setting this parameter
// overrides `ForgeDelay`, `ForgeNoTxsDelay`, `MustForgeAtSlotDeadline`
// and `IgnoreSlotCommitment`.
ForgeOncePerSlotIfTxs bool
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval time.Duration
@@ -157,8 +145,8 @@ type Coordinator struct {
pipelineNum int // Pipeline sequential number. The first pipeline is 1
pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client
consts common.SCConsts
vars common.SCVariables
consts synchronizer.SCConsts
vars synchronizer.SCVariables
stats synchronizer.Stats
started bool
@@ -198,8 +186,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client,
ethClient eth.ClientInterface,
scConsts *common.SCConsts,
initSCVars *common.SCVariables,
scConsts *synchronizer.SCConsts,
initSCVars *synchronizer.SCVariables,
) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
@@ -288,13 +276,13 @@ type MsgSyncBlock struct {
Batches []common.BatchData
// Vars contains each Smart Contract variables if they are updated, or
// nil if they haven't changed.
Vars common.SCVariablesPtr
Vars synchronizer.SCVariablesPtr
}
// MsgSyncReorg indicates a reorg
type MsgSyncReorg struct {
Stats synchronizer.Stats
Vars common.SCVariablesPtr
Vars synchronizer.SCVariablesPtr
}
// MsgStopPipeline indicates a signal to reset the pipeline
@@ -313,7 +301,7 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
}
}
func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
@@ -325,13 +313,12 @@ func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
}
}
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&c.vars, vars)
}
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64,
mustForgeAtDeadline bool) bool {
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
@@ -356,7 +343,7 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
"block", blockNum)
anyoneForge = true
}
if slot.Forger == addr || (anyoneForge && mustForgeAtDeadline) {
if slot.Forger == addr || anyoneForge {
return true
}
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
@@ -366,14 +353,14 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum, c.cfg.MustForgeAtSlotDeadline)
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum, c.cfg.MustForgeAtSlotDeadline)
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {

View File

@@ -105,7 +105,7 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
test.WipeDB(db)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
@@ -167,7 +167,6 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
EthClientAttemptsDelay: 100 * time.Millisecond,
TxManagerCheckInterval: 300 * time.Millisecond,
DebugBatchPath: debugBatchPath,
MustForgeAtSlotDeadline: true,
Purger: PurgerCfg{
PurgeBatchDelay: 10,
PurgeBlockDelay: 10,
@@ -189,12 +188,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
&prover.MockClient{Delay: 400 * time.Millisecond},
}
scConsts := &common.SCConsts{
scConsts := &synchronizer.SCConsts{
Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants,
}
initSCVars := &common.SCVariables{
initSCVars := &synchronizer.SCVariables{
Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables,
@@ -392,10 +391,6 @@ func TestCoordCanForge(t *testing.T) {
assert.Equal(t, true, coord.canForge())
assert.Equal(t, true, bootCoord.canForge())
// Anyone can forge but the node MustForgeAtSlotDeadline as set as false
coord.cfg.MustForgeAtSlotDeadline = false
assert.Equal(t, false, coord.canForge())
// Slot 3. coordinator bid, so the winner is the coordinator
stats.Eth.LastBlock.Num = ethClientSetup.AuctionConstants.GenesisBlockNum +
3*int64(ethClientSetup.AuctionConstants.BlocksPerSlot)
@@ -534,7 +529,7 @@ func TestCoordinatorStress(t *testing.T) {
coord.SendMsg(ctx, MsgSyncBlock{
Stats: *stats,
Batches: blockData.Rollup.Batches,
Vars: common.SCVariablesPtr{
Vars: synchronizer.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,

View File

@@ -22,7 +22,7 @@ import (
type statsVars struct {
Stats synchronizer.Stats
Vars common.SCVariablesPtr
Vars synchronizer.SCVariablesPtr
}
type state struct {
@@ -36,7 +36,7 @@ type state struct {
type Pipeline struct {
num int
cfg Config
consts common.SCConsts
consts synchronizer.SCConsts
// state
state state
@@ -57,7 +57,7 @@ type Pipeline struct {
purger *Purger
stats synchronizer.Stats
vars common.SCVariables
vars synchronizer.SCVariables
statsVarsCh chan statsVars
ctx context.Context
@@ -90,7 +90,7 @@ func NewPipeline(ctx context.Context,
coord *Coordinator,
txManager *TxManager,
provers []prover.Client,
scConsts *common.SCConsts,
scConsts *synchronizer.SCConsts,
) (*Pipeline, error) {
proversPool := NewProversPool(len(provers))
proversPoolSize := 0
@@ -125,7 +125,7 @@ func NewPipeline(ctx context.Context,
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariablesPtr) {
vars *synchronizer.SCVariablesPtr) {
select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done():
@@ -134,7 +134,7 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *common.SCVariables) error {
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.state = state{
batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
@@ -195,7 +195,7 @@ func (p *Pipeline) reset(batchNum common.BatchNum,
return nil
}
func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&p.vars, vars)
}
@@ -224,9 +224,8 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// 2. Forge the batch internally (make a selection of txs and prepare
// all the smart contract arguments)
var skipReason *string
p.mutexL2DBUpdateDelete.Lock()
batchInfo, skipReason, err = p.forgeBatch(batchNum)
batchInfo, err = p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil {
return nil, ctx.Err()
@@ -235,13 +234,13 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else {
log.Errorw("forgeBatch", "err", err)
}
return nil, tracerr.Wrap(err)
} else if skipReason != nil {
log.Debugw("skipping batch", "batch", batchNum, "reason", *skipReason)
return nil, tracerr.Wrap(errSkipBatchByPolicy)
}
// 3. Send the ZKInputs to the proof server
@@ -257,7 +256,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *common.SCVariables) error {
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
if p.started {
log.Fatal("Pipeline already started")
}
@@ -296,7 +295,8 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
if p.ctx.Err() != nil {
continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errSkipBatchByPolicy {
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
continue
} else if err != nil {
p.setErrAtBatchNum(batchNum)
@@ -389,109 +389,17 @@ func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) er
return nil
}
// slotCommitted returns true if the current slot has already been committed
func (p *Pipeline) slotCommitted() bool {
// Synchronizer has synchronized a batch in the current slot (setting
// CurrentSlot.ForgerCommitment) or the pipeline has already
// internally-forged a batch in the current slot
return p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged
}
// forgePolicySkipPreSelection is called before doing a tx selection in a batch to
// determine by policy if we should forge the batch or not. Returns true and
// the reason when the forging of the batch must be skipped.
func (p *Pipeline) forgePolicySkipPreSelection(now time.Time) (bool, string) {
// Check if the slot is not yet fulfilled
slotCommitted := p.slotCommitted()
if p.cfg.ForgeOncePerSlotIfTxs {
if slotCommitted {
return true, "cfg.ForgeOncePerSlotIfTxs = true and slot already committed"
}
return false, ""
}
// Determine if we must commit the slot
if !p.cfg.IgnoreSlotCommitment && !slotCommitted {
return false, ""
}
// If we haven't reached the ForgeDelay, skip forging the batch
if now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return true, "we haven't reached the forge delay"
}
return false, ""
}
// forgePolicySkipPostSelection is called after doing a tx selection in a batch to
// determine by policy if we should forge the batch or not. Returns true and
// the reason when the forging of the batch must be skipped.
func (p *Pipeline) forgePolicySkipPostSelection(now time.Time, l1UserTxsExtra, l1CoordTxs []common.L1Tx,
poolL2Txs []common.PoolL2Tx, batchInfo *BatchInfo) (bool, string, error) {
// Check if the slot is not yet fulfilled
slotCommitted := p.slotCommitted()
pendingTxs := true
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the number of unforged L1UserTxs
// (either in a open queue or in a frozen
// not-yet-forged queue).
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
if err != nil {
return false, "", err
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues to be able to
// forge the L1UserTxs in the future.
// Otherwise, skip.
if count == 0 {
pendingTxs = false
}
} else {
pendingTxs = false
}
}
if p.cfg.ForgeOncePerSlotIfTxs {
if slotCommitted {
return true, "cfg.ForgeOncePerSlotIfTxs = true and slot already committed",
nil
}
if pendingTxs {
return false, "", nil
}
return true, "cfg.ForgeOncePerSlotIfTxs = true and no pending txs",
nil
}
// Determine if we must commit the slot
if !p.cfg.IgnoreSlotCommitment && !slotCommitted {
return false, "", nil
}
// check if there is no txs to forge, no l1UserTxs in the open queue to
// freeze and we haven't reached the ForgeNoTxsDelay
if now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
if !pendingTxs {
return true, "no txs to forge and we haven't reached the forge no txs delay",
nil
}
}
return false, "", nil
}
// forgeBatch forges the batchNum batch.
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
skipReason *string, err error) {
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
// remove transactions from the pool that have been there for too long
_, err = p.purger.InvalidateMaybe(p.l2DB, p.txSelector.LocalAccountsDB(),
p.stats.Sync.LastBlock.Num, int64(batchNum))
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
_, err = p.purger.PurgeMaybe(p.l2DB, p.stats.Sync.LastBlock.Num, int64(batchNum))
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
// Structure to accumulate data and metadata of the batch
now := time.Now()
@@ -501,48 +409,79 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
var poolL2Txs []common.PoolL2Tx
var discardedL2Txs []common.PoolL2Tx
var l1UserTxs, l1CoordTxs []common.L1Tx
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
var auths [][]byte
var coordIdxs []common.Idx
if skip, reason := p.forgePolicySkipPreSelection(now); skip {
return nil, &reason, nil
// Check if the slot is not yet fulfilled
slotCommitted := false
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, tracerr.Wrap(errForgeBeforeDelay)
}
// 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, nil, tracerr.Wrap(errLastL1BatchNotSynced)
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
}
// 2a: L1+L2 txs
_l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, l1UserTxs)
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
} else {
// 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
l1UserTxs = nil
l1UserTxsExtra = nil
}
if skip, reason, err := p.forgePolicySkipPostSelection(now,
l1UserTxs, l1CoordTxs, poolL2Txs, batchInfo); err != nil {
return nil, nil, tracerr.Wrap(err)
} else if skip {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, nil, tracerr.Wrap(err)
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the number of unforged L1UserTxs
// (either in a open queue or in a frozen
// not-yet-forged queue).
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues to be able to
// forge the L1UserTxs in the future.
// Otherwise, skip.
if count == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, tracerr.Wrap(errForgeNoTxsBeforeDelay)
}
return nil, &reason, tracerr.Wrap(err)
}
if batchInfo.L1Batch {
@@ -551,7 +490,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
}
// 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxs = l1UserTxs
batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs
batchInfo.L1CoordinatorTxsAuths = auths
batchInfo.CoordIdxs = coordIdxs
@@ -559,10 +498,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs),
batchInfo.BatchNum); err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
// Invalidate transactions that become invalid because of
@@ -571,21 +510,21 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
// all the nonces smaller than the current one)
err = p.l2DB.InvalidateOldNonces(idxsNonceFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum)
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
// 4. Call BatchBuilder with TxSelector output
configBatch := &batchbuilder.ConfigBatch{
TxProcessorConfig: p.cfg.TxProcessorConfig,
}
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxs,
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxsExtra,
l1CoordTxs, poolL2Txs)
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
if err != nil {
return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
batchInfo.L2Txs = l2Txs
@@ -597,7 +536,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil, nil
return batchInfo, nil
}
// waitServerProof gets the generated zkProof & sends it to the SmartContract
@@ -642,7 +581,7 @@ func prepareForgeBatchArgs(batchInfo *BatchInfo) *eth.RollupForgeBatchArgs {
NewLastIdx: int64(zki.Metadata.NewLastIdxRaw),
NewStRoot: zki.Metadata.NewStateRootRaw.BigInt(),
NewExitRoot: zki.Metadata.NewExitRootRaw.BigInt(),
L1UserTxs: batchInfo.L1UserTxs,
L1UserTxs: batchInfo.L1UserTxsExtra,
L1CoordinatorTxs: batchInfo.L1CoordTxs,
L1CoordinatorTxsAuths: batchInfo.L1CoordinatorTxsAuths,
L2TxsData: batchInfo.L2Txs,

View File

@@ -206,7 +206,11 @@ PoolTransfer(0) User2-User3: 300 (126)
require.NoError(t, err)
}
err = pipeline.reset(batchNum, syncStats, syncSCVars)
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{
Rollup: *syncSCVars.Rollup,
Auction: *syncSCVars.Auction,
WDelayer: *syncSCVars.WDelayer,
})
require.NoError(t, err)
// Sanity check
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()
@@ -224,12 +228,12 @@ PoolTransfer(0) User2-User3: 300 (126)
batchNum++
batchInfo, _, err := pipeline.forgeBatch(batchNum)
batchInfo, err := pipeline.forgeBatch(batchNum)
require.NoError(t, err)
assert.Equal(t, 3, len(batchInfo.L2Txs))
batchNum++
batchInfo, _, err = pipeline.forgeBatch(batchNum)
batchInfo, err = pipeline.forgeBatch(batchNum)
require.NoError(t, err)
assert.Equal(t, 0, len(batchInfo.L2Txs))
}

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
test.WipeDB(db)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
}
func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -31,10 +31,10 @@ type TxManager struct {
batchCh chan *BatchInfo
chainID *big.Int
account accounts.Account
consts common.SCConsts
consts synchronizer.SCConsts
stats synchronizer.Stats
vars common.SCVariables
vars synchronizer.SCVariables
statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum
@@ -55,7 +55,7 @@ type TxManager struct {
// NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (
*TxManager, error) {
chainID, err := ethClient.EthChainID()
if err != nil {
@@ -104,7 +104,7 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariablesPtr) {
vars *synchronizer.SCVariablesPtr) {
select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done():
@@ -120,7 +120,7 @@ func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
}
}
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&t.vars, vars)
}
@@ -147,7 +147,7 @@ func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.Tr
auth.Value = big.NewInt(0) // in wei
gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
uint64(len(batchInfo.L1UserTxs))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
auth.GasLimit = gasLimit
@@ -608,7 +608,7 @@ func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
func (t *TxManager) canForgeAt(blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction,
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum, t.cfg.MustForgeAtSlotDeadline)
t.cfg.ForgerAddress, blockNum)
}
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {

View File

@@ -1,14 +1,10 @@
package historydb
import (
"database/sql"
"errors"
"fmt"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/tracerr"
@@ -36,18 +32,9 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
// GetBatchInternalAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{}
if err := meddler.QueryRow(
d, batch,
return batch, tracerr.Wrap(meddler.QueryRow(
hdb.dbRead, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
@@ -55,11 +42,7 @@ func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*Batc
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE batch_num = $1;`, batchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
batch.CollectedFeesAPI = apitypes.NewCollectedFeesAPI(batch.CollectedFeesDB)
return batch, nil
))
}
// GetBatchesAPI return the batches applying the given filters
@@ -160,9 +143,6 @@ func (hdb *HistoryDB) GetBatchesAPI(
if len(batches) == 0 {
return batches, 0, nil
}
for i := range batches {
batches[i].CollectedFeesAPI = apitypes.NewCollectedFeesAPI(batches[i].CollectedFeesDB)
}
return batches, batches[0].TotalItems - uint64(len(batches)), nil
}
@@ -200,14 +180,6 @@ func (hdb *HistoryDB) GetBestBidsAPI(
return nil, 0, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
}
func (hdb *HistoryDB) getBestBidsAPI(
d meddler.DB,
minSlotNum, maxSlotNum *int64,
bidderAddr *ethCommon.Address,
limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string
var args []interface{}
// JOIN the best bid of each slot with the latest update of each coordinator
@@ -242,7 +214,7 @@ func (hdb *HistoryDB) getBestBidsAPI(
}
query = hdb.dbRead.Rebind(queryStr)
bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
if err := meddler.QueryAll(hdb.dbRead, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
// log.Debug(query)
@@ -725,6 +697,25 @@ func (hdb *HistoryDB) GetExitsAPI(
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
}
// GetBucketUpdatesAPI retrieves latest values for each bucket
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
var bucketUpdates []*BucketUpdateAPI
err = meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
func (hdb *HistoryDB) GetCoordinatorsAPI(
bidderAddr, forgerAddr *ethCommon.Address,
@@ -809,6 +800,29 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
return auctionVars, tracerr.Wrap(err)
}
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
auctionVars := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;
`
err = meddler.QueryAll(hdb.dbRead, &auctionVars, query, slotNum, maxItems)
if err != nil {
return nil, tracerr.Wrap(err)
}
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
}
// GetAccountAPI returns an account by its index
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
@@ -927,6 +941,125 @@ func (hdb *HistoryDB) GetAccountsAPI(
accounts[0].TotalItems - uint64(len(accounts)), nil
}
// GetMetricsAPI returns metrics
func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
metrics := &Metrics{}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT
COALESCE (MIN(batch.batch_num), 0) as batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS min_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS' and batch.batch_num <= $1;`, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) as total_txs
FROM tx WHERE tx.batch_num between $1 AND $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
// Avoid dividing by 0
if seconds == 0 {
seconds++
}
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
} else {
metrics.TransactionsPerBatch = float64(0)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num between $1 and $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
if metricsTotals.TotalBatches > 0 {
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
} else {
metrics.BatchFrequency = 0
}
if metricsTotals.TotalTransactions > 0 {
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
metrics.AvgTransactionFee = 0
}
err = meddler.QueryRow(
hdb.dbRead, metrics,
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
if err != nil {
return nil, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metrics,
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0)
AS estimated_time_to_forge_l1 FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
metricsTotals.FirstBatchNum, lastBatchNum,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
return metrics, nil
}
// GetAvgTxFeeAPI returns average transaction fee of the last 1h
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return 0, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
if err != nil {
return 0, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return 0, tracerr.Wrap(err)
}
var avgTransactionFee float64
if metricsTotals.TotalTransactions > 0 {
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
avgTransactionFee = 0
}
return avgTransactionFee, nil
}
// GetCommonAccountAPI returns the account associated to an account idx
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
cancel, err := hdb.apiConnCon.Acquire()
@@ -942,265 +1075,3 @@ func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, erro
)
return account, tracerr.Wrap(err)
}
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
}
func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
d, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// GetNodeInfoAPI retusnt he NodeInfo
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.GetNodeInfo()
}
// GetBucketUpdatesInternalAPI returns the latest bucket updates
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetNextForgersInternalAPI returns next forgers
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
auctionConsts *common.AuctionConstants,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForgerAPI{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum - 1
nextForger := NextForgerAPI{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (metrics *MetricsAPI, poolLoad int64, err error) {
metrics = &MetricsAPI{}
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: lastBatchNum,
}
if err := meddler.QueryRow(
hdb.dbRead, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, 0, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, 0, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++
}
metrics.TransactionsPerSecond = float64(nTxs) / seconds
// Set txs/batch
nBatches := p.ToBatchNum - p.FromBatchNum + 1
if nBatches == 0 { // Avoid dividing by 0
nBatches++
}
if (p.ToBatchNum - p.FromBatchNum) > 0 {
metrics.TransactionsPerBatch = float64(nTxs) /
float64(nBatches)
} else {
metrics.TransactionsPerBatch = 0
}
// Get total fee of that period
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return nil, 0, tracerr.Wrap(err)
}
// Set batch frequency
metrics.BatchFrequency = seconds / float64(nBatches)
// Set avg transaction fee (only L2 txs have fee)
row = hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2 AND NOT is_l1;`,
p.FromBatchNum, p.ToBatchNum,
)
var nL2Txs int
if err := row.Scan(&nL2Txs); err != nil {
return nil, 0, tracerr.Wrap(err)
}
if nL2Txs > 0 {
metrics.AvgTransactionFee = totalFee / float64(nL2Txs)
} else {
metrics.AvgTransactionFee = 0
}
// Get and set amount of registered accounts
type registeredAccounts struct {
TokenAccounts int64 `meddler:"token_accounts"`
Wallets int64 `meddler:"wallets"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS token_accounts, COUNT(DISTINCT(bjj)) AS wallets FROM account;`,
); err != nil {
return nil, 0, tracerr.Wrap(err)
}
metrics.TokenAccounts = ra.TokenAccounts
metrics.Wallets = ra.Wallets
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, 0, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
// Get amount of txs in the pool
row = hdb.dbRead.QueryRow(
`SELECT COUNT(*) FROM tx_pool WHERE state = $1 AND NOT external_delete;`,
common.PoolL2TxStatePending,
)
if err := row.Scan(&poolLoad); err != nil {
return nil, 0, tracerr.Wrap(err)
}
return metrics, poolLoad, nil
}
// GetStateAPI returns the StateAPI
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getStateAPI(hdb.dbRead)
}

View File

@@ -456,10 +456,13 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
// UpdateTokenValue updates the USD value of a token. Value is the price in
// USD of a normalized token (1 token = 10^decimals units)
func (hdb *HistoryDB) UpdateTokenValue(tokenAddr ethCommon.Address, value float64) error {
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
// Sanitize symbol
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
_, err := hdb.dbWrite.Exec(
"UPDATE token SET usd = $1 WHERE eth_addr = $2;",
value, tokenAddr,
"UPDATE token SET usd = $1 WHERE symbol = $2;",
value, tokenSymbol,
)
return tracerr.Wrap(err)
}
@@ -693,11 +696,11 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
var txs []*common.L1Tx
err := meddler.QueryAll(
hdb.dbRead, &txs,
hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE 0 END) AS effective_amount,
tx.deposit_amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.deposit_amount_success THEN tx.deposit_amount ELSE 0 END) AS effective_deposit_amount,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
tx.deposit_amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.deposit_amount_success THEN tx.deposit_amount ELSE '\x' END) AS effective_deposit_amount,
tx.eth_block_num, tx.type, tx.batch_num
FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE ORDER BY item_id;`,
)
@@ -839,18 +842,6 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
}
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
minBidInfo := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;`
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
}
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
if len(tokenExchanges) == 0 {
return nil
@@ -1149,6 +1140,17 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(txn.Commit())
}
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
hdb.dbRead, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
@@ -1159,7 +1161,7 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
tokens := []*TokenWithUSD{}
if err := meddler.QueryAll(
hdb.dbRead, &tokens,
"SELECT * FROM token ORDER BY token_id ASC",
"SELECT * FROM TOKEN",
); err != nil {
return nil, tracerr.Wrap(err)
}
@@ -1168,60 +1170,3 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
}
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
}
const (
// CreateAccountExtraFeePercentage is the multiplication factor over
// the average fee for CreateAccount that is applied to obtain the
// recommended fee for CreateAccount
CreateAccountExtraFeePercentage float64 = 2.5
// CreateAccountInternalExtraFeePercentage is the multiplication factor
// over the average fee for CreateAccountInternal that is applied to
// obtain the recommended fee for CreateAccountInternal
CreateAccountInternalExtraFeePercentage float64 = 2.0
)
// GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD, maxFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount = math.Min(maxFeeUSD,
math.Max(avgTransactionFee, minFeeUSD))
recommendedFee.CreatesAccount = math.Min(maxFeeUSD,
math.Max(CreateAccountExtraFeePercentage*avgTransactionFee, minFeeUSD))
recommendedFee.CreatesAccountInternal = math.Min(maxFeeUSD,
math.Max(CreateAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD))
return &recommendedFee, nil
}

View File

@@ -11,7 +11,6 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log"
@@ -167,7 +166,7 @@ func TestBatches(t *testing.T) {
if i%2 != 0 {
// Set value to the token
value := (float64(i) + 5) * 5.389329
assert.NoError(t, historyDB.UpdateTokenValue(token.EthAddr, value))
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
}
}
@@ -277,7 +276,7 @@ func TestTokens(t *testing.T) {
// Update token value
for i, token := range tokens {
value := 1.01 * float64(i)
assert.NoError(t, historyDB.UpdateTokenValue(token.EthAddr, value))
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
}
// Fetch tokens
fetchedTokens, err = historyDB.GetTokensTest()
@@ -303,7 +302,7 @@ func TestTokensUTF8(t *testing.T) {
// Generate fake tokens
const nTokens = 5
tokens, ethToken := test.GenTokens(nTokens, blocks)
nonUTFTokens := make([]common.Token, len(tokens))
nonUTFTokens := make([]common.Token, len(tokens)+1)
// Force token.name and token.symbol to be non UTF-8 Strings
for i, token := range tokens {
token.Name = fmt.Sprint("NON-UTF8-NAME-\xc5-", i)
@@ -333,7 +332,7 @@ func TestTokensUTF8(t *testing.T) {
// Update token value
for i, token := range nonUTFTokens {
value := 1.01 * float64(i)
assert.NoError(t, historyDB.UpdateTokenValue(token.EthAddr, value))
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
}
// Fetch tokens
fetchedTokens, err = historyDB.GetTokensTest()
@@ -1177,7 +1176,7 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err)
}
res, _, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
@@ -1186,8 +1185,8 @@ func TestGetMetricsAPI(t *testing.T) {
// There is a -2 as time for first and last batch is not taken into account
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
assert.Equal(t, int64(3), res.TokenAccounts)
assert.Equal(t, int64(3), res.Wallets)
assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
@@ -1255,22 +1254,28 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
assert.NoError(t, err)
}
res, _, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
assert.Equal(t, int64(3), res.TokenAccounts)
assert.Equal(t, int64(3), res.Wallets)
assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, _, err := historyDBWithACC.GetMetricsInternalAPI(0)
_, err := historyDBWithACC.GetMetricsAPI(0)
assert.NoError(t, err)
}
func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetAvgTxFeeAPI()
assert.NoError(t, err)
}
@@ -1459,128 +1464,3 @@ func setTestBlocks(from, to int64) []common.Block {
}
return blocks
}
func TestNodeInfo(t *testing.T) {
test.WipeDB(historyDB.DB())
err := historyDB.SetStateInternalAPI(&StateAPI{})
require.NoError(t, err)
clientSetup := test.NewClientSetupExample()
constants := &Constants{
SCConsts: common.SCConsts{
Rollup: *clientSetup.RollupConstants,
Auction: *clientSetup.AuctionConstants,
WDelayer: *clientSetup.WDelayerConstants,
},
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
}
err = historyDB.SetConstants(constants)
require.NoError(t, err)
// Test parameters
var f64 float64 = 1.2
var i64 int64 = 8888
addr := ethCommon.HexToAddress("0x1234")
hash := ethCommon.HexToHash("0x5678")
stateAPI := &StateAPI{
NodePublicInfo: NodePublicInfo{
ForgeDelay: 3.1,
},
Network: NetworkAPI{
LastEthBlock: 12,
LastSyncBlock: 34,
LastBatch: &BatchAPI{
ItemID: 123,
BatchNum: 456,
EthBlockNum: 789,
EthBlockHash: hash,
Timestamp: time.Now(),
ForgerAddr: addr,
// CollectedFeesDB: map[common.TokenID]*big.Int{
// 0: big.NewInt(11111),
// 1: big.NewInt(21111),
// 2: big.NewInt(31111),
// },
CollectedFeesAPI: apitypes.CollectedFeesAPI(map[common.TokenID]apitypes.BigIntStr{
0: apitypes.BigIntStr("11111"),
1: apitypes.BigIntStr("21111"),
2: apitypes.BigIntStr("31111"),
}),
TotalFeesUSD: &f64,
StateRoot: apitypes.BigIntStr("1234"),
NumAccounts: 11,
ExitRoot: apitypes.BigIntStr("5678"),
ForgeL1TxsNum: &i64,
SlotNum: 44,
ForgedTxs: 23,
TotalItems: 0,
FirstItem: 0,
LastItem: 0,
},
CurrentSlot: 22,
NextForgers: []NextForgerAPI{
{
Coordinator: CoordinatorAPI{
ItemID: 111,
Bidder: addr,
Forger: addr,
EthBlockNum: 566,
URL: "asd",
TotalItems: 0,
FirstItem: 0,
LastItem: 0,
},
Period: Period{
SlotNum: 33,
FromBlock: 55,
ToBlock: 66,
FromTimestamp: time.Now(),
ToTimestamp: time.Now(),
},
},
},
},
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TokenAccounts: 42,
},
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
WithdrawalDelayer: *clientSetup.WDelayerVariables,
RecommendedFee: common.RecommendedFee{
ExistingAccount: 0.15,
},
}
err = historyDB.SetStateInternalAPI(stateAPI)
require.NoError(t, err)
nodeConfig := &NodeConfig{
MaxPoolTxs: 123,
MinFeeUSD: 0.5,
}
err = historyDB.SetNodeConfig(nodeConfig)
require.NoError(t, err)
dbConstants, err := historyDB.GetConstants()
require.NoError(t, err)
assert.Equal(t, constants, dbConstants)
dbNodeConfig, err := historyDB.GetNodeConfig()
require.NoError(t, err)
assert.Equal(t, nodeConfig, dbNodeConfig)
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
require.NoError(t, err)
assert.Equal(t, stateAPI.Network.LastBatch.Timestamp.Unix(),
dbStateAPI.Network.LastBatch.Timestamp.Unix())
dbStateAPI.Network.LastBatch.Timestamp = stateAPI.Network.LastBatch.Timestamp
assert.Equal(t, stateAPI.Network.NextForgers[0].Period.FromTimestamp.Unix(),
dbStateAPI.Network.NextForgers[0].Period.FromTimestamp.Unix())
dbStateAPI.Network.NextForgers[0].Period.FromTimestamp = stateAPI.Network.NextForgers[0].Period.FromTimestamp
assert.Equal(t, stateAPI.Network.NextForgers[0].Period.ToTimestamp.Unix(),
dbStateAPI.Network.NextForgers[0].Period.ToTimestamp.Unix())
dbStateAPI.Network.NextForgers[0].Period.ToTimestamp = stateAPI.Network.NextForgers[0].Period.ToTimestamp
assert.Equal(t, stateAPI, dbStateAPI)
}

View File

@@ -1,173 +0,0 @@
package historydb
import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
// Period represents a time period in ethereum
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
// NextForgerAPI represents the next forger exposed via the API
type NextForgerAPI struct {
Coordinator CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// NetworkAPI is the network state exposed via the API
type NetworkAPI struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
PendingL1Txs int `json:"pendingL1Transactions"`
}
// NodePublicInfo is the configuration and metrics of the node that is exposed via API
type NodePublicInfo struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
// PoolLoad amount of transactions in the pool
PoolLoad int64 `json:"poolLoad"`
}
// StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct {
NodePublicInfo NodePublicInfo `json:"node"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
Auction AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// Constants contains network constants
type Constants struct {
common.SCConsts
ChainID uint16
HermezAddress ethCommon.Address
}
// NodeConfig contains the node config exposed in the API
type NodeConfig struct {
MaxPoolTxs uint32
MinFeeUSD float64
MaxFeeUSD float64
ForgeDelay float64
}
// NodeInfo contains information about he node used when serving the API
type NodeInfo struct {
ItemID int `meddler:"item_id,pk"`
StateAPI *StateAPI `meddler:"state,json"`
NodeConfig *NodeConfig `meddler:"config,json"`
Constants *Constants `meddler:"constants,json"`
}
// GetNodeInfo returns the NodeInfo
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
ni := &NodeInfo{}
err := meddler.QueryRow(
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
)
return ni, tracerr.Wrap(err)
}
// GetConstants returns the Constats
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT constants FROM node_info WHERE item_id = 1;",
)
return nodeInfo.Constants, tracerr.Wrap(err)
}
// SetConstants sets the Constants
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
_constants := struct {
Constants *Constants `meddler:"constants,json"`
}{constants}
values, err := meddler.Default.Values(&_constants, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetStateInternalAPI returns the StateAPI
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
return hdb.getStateAPI(hdb.dbRead)
}
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
d, &nodeInfo,
"SELECT state FROM node_info WHERE item_id = 1;",
)
return nodeInfo.StateAPI, tracerr.Wrap(err)
}
// SetStateInternalAPI sets the StateAPI
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
if stateAPI.Network.LastBatch != nil {
stateAPI.Network.LastBatch.CollectedFeesAPI =
apitypes.NewCollectedFeesAPI(stateAPI.Network.LastBatch.CollectedFeesDB)
}
_stateAPI := struct {
StateAPI *StateAPI `meddler:"state,json"`
}{stateAPI}
values, err := meddler.Default.Values(&_stateAPI, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetNodeConfig returns the NodeConfig
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT config FROM node_info WHERE item_id = 1;",
)
return nodeInfo.NodeConfig, tracerr.Wrap(err)
}
// SetNodeConfig sets the NodeConfig
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
_nodeConfig := struct {
NodeConfig *NodeConfig `meddler:"config,json"`
}{nodeConfig}
values, err := meddler.Default.Values(&_nodeConfig, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}

View File

@@ -6,7 +6,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub"
"github.com/iden3/go-merkletree"
@@ -295,8 +295,7 @@ type BatchAPI struct {
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash" meddler:"hash"`
Timestamp time.Time `json:"timestamp" meddler:"timestamp,utctime"`
ForgerAddr ethCommon.Address `json:"forgerAddr" meddler:"forger_addr"`
CollectedFeesDB map[common.TokenID]*big.Int `json:"-" meddler:"fees_collected,json"`
CollectedFeesAPI apitypes.CollectedFeesAPI `json:"collectedFees" meddler:"-"`
CollectedFees apitypes.CollectedFees `json:"collectedFees" meddler:"fees_collected,json"`
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD" meddler:"total_fees_usd"`
StateRoot apitypes.BigIntStr `json:"stateRoot" meddler:"state_root"`
NumAccounts int `json:"numAccounts" meddler:"num_accounts"`
@@ -309,17 +308,28 @@ type BatchAPI struct {
LastItem uint64 `json:"-" meddler:"last_item"`
}
// MetricsAPI define metrics of the network
type MetricsAPI struct {
// Metrics define metrics of the network
type Metrics struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TokenAccounts int64 `json:"tokenAccounts"`
Wallets int64 `json:"wallets"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
AvgTransactionFee float64 `json:"avgTransactionFee"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
}
// MetricsTotals is used to get temporal information from HistoryDB
// to calculate data to be stored into the Metrics struct
type MetricsTotals struct {
TotalTransactions uint64 `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
TotalBatches int64 `meddler:"total_batches"`
TotalFeesUSD float64 `meddler:"total_fees"`
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
}
// BidAPI is a representation of a bid with additional information
// required by the API
type BidAPI struct {
@@ -370,27 +380,6 @@ type RollupVariablesAPI struct {
SafeMode bool `json:"safeMode" meddler:"safe_mode"`
}
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
rollupVars := RollupVariablesAPI{
EthBlockNum: rollupVariables.EthBlockNum,
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
WithdrawalDelay: rollupVariables.WithdrawalDelay,
SafeMode: rollupVariables.SafeMode,
}
for i, bucket := range rollupVariables.Buckets {
rollupVars.Buckets[i] = BucketParamsAPI{
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
}
}
return &rollupVars
}
// AuctionVariablesAPI are the variables of the Auction Smart Contract
type AuctionVariablesAPI struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
@@ -415,28 +404,3 @@ type AuctionVariablesAPI struct {
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
}
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
auctionVars := AuctionVariablesAPI{
EthBlockNum: auctionVariables.EthBlockNum,
DonationAddress: auctionVariables.DonationAddress,
BootCoordinator: auctionVariables.BootCoordinator,
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
Outbidding: auctionVariables.Outbidding,
SlotDeadline: auctionVariables.SlotDeadline,
}
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionVars.AllocationRatio[i] = ratio
}
return &auctionVars
}

View File

@@ -62,10 +62,6 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)",
feeUSD, l2db.minFeeUSD))
}
if feeUSD > l2db.maxFeeUSD {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) > maxFeeUSD (%v)",
feeUSD, l2db.maxFeeUSD))
}
// Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
@@ -84,7 +80,7 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
q := fmt.Sprintf(
`INSERT INTO tx_pool (%s)
SELECT %s
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v AND NOT external_delete) < $%v;`,
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
namesPart, valuesPart,
len(values)+1, len(values)+2) //nolint:gomnd
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)

View File

@@ -27,7 +27,6 @@ type L2DB struct {
ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64
maxFeeUSD float64
apiConnCon *db.APIConnectionController
}
@@ -39,7 +38,6 @@ func NewL2DB(
safetyPeriod common.BatchNum,
maxTxs uint32,
minFeeUSD float64,
maxFeeUSD float64,
TTL time.Duration,
apiConnCon *db.APIConnectionController,
) *L2DB {
@@ -50,7 +48,6 @@ func NewL2DB(
ttl: TTL,
maxTxs: maxTxs,
minFeeUSD: minFeeUSD,
maxFeeUSD: maxFeeUSD,
apiConnCon: apiConnCon,
}
}
@@ -207,7 +204,7 @@ func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx
err := meddler.QueryAll(
l2db.dbRead, &txs,
selectPoolTxCommon+"WHERE state = $1 AND NOT external_delete;",
selectPoolTxCommon+"WHERE state = $1",
common.PoolL2TxStatePending,
)
return db.SlicePtrsToSlice(txs).([]common.PoolL2Tx), tracerr.Wrap(err)

View File

@@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 1000.0, 24*time.Hour, nil)
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 1000.0, 24*time.Hour, apiConnCon)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, db, nil)
// Run tests
@@ -121,7 +121,7 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
}
tokens[token.TokenID] = readToken
// Set value to the tokens
err := historyDB.UpdateTokenValue(readToken.EthAddr, *readToken.USD)
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD)
if err != nil {
return tracerr.Wrap(err)
}

View File

@@ -6,7 +6,7 @@ import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub"
)

View File

@@ -1,11 +1,5 @@
-- +migrate Up
-- NOTE: We use "DECIMAL(78,0)" to encode go *big.Int types. All the *big.Int
-- that we deal with represent a value in the SNARK field, which is an integer
-- of 256 bits. `log(2**256, 10) = 77.06`: that is, a 256 bit number can have
-- at most 78 digits, so we use this value to specify the precision in the
-- PostgreSQL DECIMAL guaranteeing that we will never lose precision.
-- History
CREATE TABLE block (
eth_block_num BIGINT PRIMARY KEY,
@@ -28,10 +22,10 @@ CREATE TABLE batch (
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
fees_collected BYTEA NOT NULL,
fee_idxs_coordinator BYTEA NOT NULL,
state_root DECIMAL(78,0) NOT NULL,
state_root BYTEA NOT NULL,
num_accounts BIGINT NOT NULL,
last_idx BIGINT NOT NULL,
exit_root DECIMAL(78,0) NOT NULL,
exit_root BYTEA NOT NULL,
forge_l1_txs_num BIGINT,
slot_num BIGINT NOT NULL,
total_fees_usd NUMERIC
@@ -40,7 +34,7 @@ CREATE TABLE batch (
CREATE TABLE bid (
item_id SERIAL PRIMARY KEY,
slot_num BIGINT NOT NULL,
bid_value DECIMAL(78,0) NOT NULL,
bid_value BYTEA NOT NULL,
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
bidder_addr BYTEA NOT NULL -- fake foreign key for coordinator
);
@@ -112,7 +106,7 @@ CREATE TABLE account_update (
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
nonce BIGINT NOT NULL,
balance DECIMAL(78,0) NOT NULL
balance BYTEA NOT NULL
);
CREATE TABLE exit_tree (
@@ -120,7 +114,7 @@ CREATE TABLE exit_tree (
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
account_idx BIGINT REFERENCES account (idx) ON DELETE CASCADE,
merkle_proof BYTEA NOT NULL,
balance DECIMAL(78,0) NOT NULL,
balance BYTEA NOT NULL,
instant_withdrawn BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
delayed_withdraw_request BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
owner BYTEA,
@@ -170,7 +164,7 @@ CREATE TABLE tx (
to_idx BIGINT NOT NULL,
to_eth_addr BYTEA,
to_bjj BYTEA,
amount DECIMAL(78,0) NOT NULL,
amount BYTEA NOT NULL,
amount_success BOOLEAN NOT NULL DEFAULT true,
amount_f NUMERIC NOT NULL,
token_id INT NOT NULL REFERENCES token (token_id),
@@ -180,7 +174,7 @@ CREATE TABLE tx (
-- L1
to_forge_l1_txs_num BIGINT,
user_origin BOOLEAN,
deposit_amount DECIMAL(78,0),
deposit_amount BYTEA,
deposit_amount_success BOOLEAN NOT NULL DEFAULT true,
deposit_amount_f NUMERIC,
deposit_amount_usd NUMERIC,
@@ -550,7 +544,7 @@ FOR EACH ROW EXECUTE PROCEDURE forge_l1_user_txs();
CREATE TABLE rollup_vars (
eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE,
fee_add_token DECIMAL(78,0) NOT NULL,
fee_add_token BYTEA NOT NULL,
forge_l1_timeout BIGINT NOT NULL,
withdrawal_delay BIGINT NOT NULL,
buckets BYTEA NOT NULL,
@@ -562,7 +556,7 @@ CREATE TABLE bucket_update (
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
num_bucket BIGINT NOT NULL,
block_stamp BIGINT NOT NULL,
withdrawals DECIMAL(78,0) NOT NULL
withdrawals BYTEA NOT NULL
);
CREATE TABLE token_exchange (
@@ -578,7 +572,7 @@ CREATE TABLE escape_hatch_withdrawal (
who_addr BYTEA NOT NULL,
to_addr BYTEA NOT NULL,
token_addr BYTEA NOT NULL,
amount DECIMAL(78,0) NOT NULL
amount BYTEA NOT NULL
);
CREATE TABLE auction_vars (
@@ -616,7 +610,7 @@ CREATE TABLE tx_pool (
effective_to_eth_addr BYTEA,
effective_to_bjj BYTEA,
token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE,
amount DECIMAL(78,0) NOT NULL,
amount BYTEA NOT NULL,
amount_f NUMERIC NOT NULL,
fee SMALLINT NOT NULL,
nonce BIGINT NOT NULL,
@@ -630,7 +624,7 @@ CREATE TABLE tx_pool (
rq_to_eth_addr BYTEA,
rq_to_bjj BYTEA,
rq_token_id INT,
rq_amount DECIMAL(78,0),
rq_amount BYTEA,
rq_fee SMALLINT,
rq_nonce BIGINT,
tx_type VARCHAR(40) NOT NULL,
@@ -667,32 +661,12 @@ CREATE TABLE account_creation_auth (
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
);
CREATE TABLE node_info (
item_id SERIAL PRIMARY KEY,
state BYTEA, -- object returned by GET /state
config BYTEA, -- Node config
-- max_pool_txs BIGINT, -- L2DB config
-- min_fee NUMERIC, -- L2DB config
constants BYTEA -- info of the network that is constant
);
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
CREATE VIEW account_state AS SELECT DISTINCT idx,
first_value(nonce) OVER w AS nonce,
first_value(balance) OVER w AS balance,
first_value(eth_block_num) OVER w AS eth_block_num,
first_value(batch_num) OVER w AS batch_num
FROM account_update
window w AS (partition by idx ORDER BY item_id desc);
-- +migrate Down
-- triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
DROP TRIGGER IF EXISTS trigger_set_tx ON tx;
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch;
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool;
-- drop views IF EXISTS
DROP VIEW IF EXISTS account_state;
-- functions
DROP FUNCTION IF EXISTS hez_idx;
DROP FUNCTION IF EXISTS set_token_usd_update;
@@ -701,7 +675,6 @@ DROP FUNCTION IF EXISTS set_tx;
DROP FUNCTION IF EXISTS forge_l1_user_txs;
DROP FUNCTION IF EXISTS set_pool_tx;
-- drop tables IF EXISTS
DROP TABLE IF EXISTS node_info;
DROP TABLE IF EXISTS account_creation_auth;
DROP TABLE IF EXISTS tx_pool;
DROP TABLE IF EXISTS auction_vars;

View File

@@ -13,9 +13,6 @@ import (
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx"
//nolint:errcheck // driver for postgres DB
_ "github.com/lib/pq"
migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
"golang.org/x/sync/semaphore"
@@ -168,11 +165,7 @@ func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
}
field := fieldPtr.(**big.Int)
var ok bool
*field, ok = new(big.Int).SetString(*ptr, 10)
if !ok {
return tracerr.Wrap(fmt.Errorf("big.Int.SetString failed on \"%v\"", *ptr))
}
*field = new(big.Int).SetBytes([]byte(*ptr))
return nil
}
@@ -180,7 +173,7 @@ func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) {
field := fieldPtr.(*big.Int)
return field.String(), nil
return field.Bytes(), nil
}
// BigIntNullMeddler encodes or decodes the field value to or from JSON
@@ -205,12 +198,7 @@ func (b BigIntNullMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
if ptr == nil {
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
}
var ok bool
*field, ok = new(big.Int).SetString(string(ptr), 10)
if !ok {
return tracerr.Wrap(fmt.Errorf("big.Int.SetString failed on \"%v\"", string(ptr)))
}
*field = new(big.Int).SetBytes(ptr)
return nil
}
@@ -220,7 +208,7 @@ func (b BigIntNullMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}
if field == nil {
return nil, nil
}
return field.String(), nil
return field.Bytes(), nil
}
// SliceToSlicePtrs converts any []Foo to []*Foo

View File

@@ -1,13 +1,9 @@
package db
import (
"math/big"
"os"
"testing"
"github.com/russross/meddler"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type foo struct {
@@ -37,42 +33,3 @@ func TestSlicePtrsToSlice(t *testing.T) {
assert.Equal(t, *a[i], b[i])
}
}
func TestBigInt(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
db, err := InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
defer func() {
_, err := db.Exec("DROP TABLE IF EXISTS test_big_int;")
require.NoError(t, err)
err = db.Close()
require.NoError(t, err)
}()
_, err = db.Exec("DROP TABLE IF EXISTS test_big_int;")
require.NoError(t, err)
_, err = db.Exec(`CREATE TABLE test_big_int (
item_id SERIAL PRIMARY KEY,
value1 DECIMAL(78, 0) NOT NULL,
value2 DECIMAL(78, 0),
value3 DECIMAL(78, 0)
);`)
require.NoError(t, err)
type Entry struct {
ItemID int `meddler:"item_id"`
Value1 *big.Int `meddler:"value1,bigint"`
Value2 *big.Int `meddler:"value2,bigintnull"`
Value3 *big.Int `meddler:"value3,bigintnull"`
}
entry := Entry{ItemID: 1, Value1: big.NewInt(1234567890), Value2: big.NewInt(9876543210), Value3: nil}
err = meddler.Insert(db, "test_big_int", &entry)
require.NoError(t, err)
var dbEntry Entry
err = meddler.QueryRow(db, &dbEntry, "SELECT * FROM test_big_int WHERE item_id = 1;")
require.NoError(t, err)
assert.Equal(t, entry, dbEntry)
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
@@ -55,7 +54,6 @@ const (
// Node is the Hermez Node
type Node struct {
nodeAPI *NodeAPI
stateAPIUpdater *stateapiupdater.Updater
debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
// Coordinator
@@ -69,7 +67,6 @@ type Node struct {
mode Mode
sqlConnRead *sqlx.DB
sqlConnWrite *sqlx.DB
historyDB *historydb.HistoryDB
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
@@ -230,7 +227,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
)
@@ -245,36 +241,12 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
}
initSCVars := sync.SCVars()
scConsts := common.SCConsts{
scConsts := synchronizer.SCConsts{
Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(),
}
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
MaxFeeUSD: cfg.Coordinator.L2DB.MaxFeeUSD,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration.Seconds(),
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
return nil, tracerr.Wrap(err)
}
hdbConsts := historydb.Constants{
SCConsts: common.SCConsts{
Rollup: scConsts.Rollup,
Auction: scConsts.Auction,
WDelayer: scConsts.WDelayer,
},
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
}
if err := historyDB.SetConstants(&hdbConsts); err != nil {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
var coord *coordinator.Coordinator
if mode == ModeCoordinator {
// Unlock FeeAccount EthAddr in the keystore to generate the
@@ -367,9 +339,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
MustForgeAtSlotDeadline: cfg.Coordinator.MustForgeAtSlotDeadline,
IgnoreSlotCommitment: cfg.Coordinator.IgnoreSlotCommitment,
ForgeOncePerSlotIfTxs: cfg.Coordinator.ForgeOncePerSlotIfTxs,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
@@ -398,7 +367,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
serverProofs,
client,
&scConsts,
initSCVars,
&synchronizer.SCVariables{
Rollup: *initSCVars.Rollup,
Auction: *initSCVars.Auction,
WDelayer: *initSCVars.WDelayer,
},
)
if err != nil {
return nil, tracerr.Wrap(err)
@@ -430,29 +403,35 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
coord, cfg.API.Explorer,
server,
historyDB,
stateDB,
l2DB,
&api.Config{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
},
cfg.Coordinator.ForgeDelay.Duration,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
}
var debugAPI *debugapi.DebugAPI
if cfg.Debug.APIAddress != "" {
debugAPI = debugapi.NewDebugAPI(cfg.Debug.APIAddress, stateDB, sync)
}
priceUpdater, err := priceupdater.NewPriceUpdater(
cfg.PriceUpdater.DefaultUpdateMethod,
cfg.PriceUpdater.TokensConfig,
historyDB,
cfg.PriceUpdater.URLBitfinexV2,
cfg.PriceUpdater.URLCoinGeckoV3,
)
priceUpdater, err := priceupdater.NewPriceUpdater(cfg.PriceUpdater.URL,
priceupdater.APIType(cfg.PriceUpdater.Type), historyDB)
if err != nil {
return nil, tracerr.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
return &Node{
stateAPIUpdater: stateAPIUpdater,
nodeAPI: nodeAPI,
debugAPI: debugAPI,
priceUpdater: priceUpdater,
@@ -462,129 +441,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
mode: mode,
sqlConnRead: dbRead,
sqlConnWrite: dbWrite,
historyDB: historyDB,
ctx: ctx,
cancel: cancel,
}, nil
}
// APIServer is a server that only runs the API
type APIServer struct {
nodeAPI *NodeAPI
mode Mode
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
}
// NewAPIServer creates a new APIServer
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
apiConnCon := dbUtils.NewAPIConnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
dbRead, dbWrite,
0,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
0,
apiConnCon,
)
}
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
server := gin.Default()
coord := false
if mode == ModeCoordinator {
coord = cfg.Coordinator.API.Coordinator
}
nodeAPI, err := NewNodeAPI(
cfg.API.Address,
coord, cfg.API.Explorer,
server,
historyDB,
l2DB,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
return &APIServer{
nodeAPI: nodeAPI,
mode: mode,
ctx: ctx,
cancel: cancel,
}, nil
}
// Start the APIServer
func (s *APIServer) Start() {
log.Infow("Starting api server...", "mode", s.mode)
log.Info("Starting NodeAPI...")
s.wg.Add(1)
go func() {
defer func() {
log.Info("NodeAPI routine stopped")
s.wg.Done()
}()
if err := s.nodeAPI.Run(s.ctx); err != nil {
if s.ctx.Err() != nil {
return
}
log.Fatalw("NodeAPI.Run", "err", err)
}
}()
}
// Stop the APIServer
func (s *APIServer) Stop() {
log.Infow("Stopping NodeAPI...")
s.cancel()
s.wg.Wait()
}
// NodeAPI holds the node http API
type NodeAPI struct { //nolint:golint
api *api.API
@@ -604,7 +465,10 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine,
hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB,
config *api.Config,
forgeDelay time.Duration,
) (*NodeAPI, error) {
engine := gin.Default()
engine.NoRoute(handleNoRoute)
@@ -614,6 +478,10 @@ func NewNodeAPI(
engine,
hdb,
l2db,
config,
&api.NodeConfig{
ForgeDelay: forgeDelay.Seconds(),
},
)
if err != nil {
return nil, tracerr.Wrap(err)
@@ -659,50 +527,58 @@ func (a *NodeAPI) Run(ctx context.Context) error {
}
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariablesPtr, batches []common.BatchData) error {
vars synchronizer.SCVariablesPtr, batches []common.BatchData) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats,
Vars: *vars,
Vars: vars,
Batches: batches,
})
}
n.stateAPIUpdater.SetSCVars(vars)
if n.nodeAPI != nil {
if vars.Rollup != nil {
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
}
if vars.Auction != nil {
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
}
if vars.WDelayer != nil {
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
}
if stats.Synced() {
if err := n.stateAPIUpdater.UpdateNetworkInfo(
if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
log.Errorw("API.UpdateNetworkInfo", "err", err)
}
} else {
n.stateAPIUpdater.UpdateNetworkInfoBlock(
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
}
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariables) error {
vars synchronizer.SCVariablesPtr) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: *vars.AsPtr(),
Vars: vars,
})
}
n.stateAPIUpdater.SetSCVars(vars.AsPtr())
n.stateAPIUpdater.UpdateNetworkInfoBlock(
if n.nodeAPI != nil {
vars := n.sync.SCVars()
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
@@ -718,20 +594,16 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars()
if err := n.handleReorg(ctx, stats, vars); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
n.handleReorg(ctx, stats, vars)
return nil, time.Duration(0), nil
} else if blockData != nil {
// case: new block
vars := common.SCVariablesPtr{
vars := synchronizer.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,
}
if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
return &blockData.Block, time.Duration(0), nil
} else {
// case: no block
@@ -750,9 +622,7 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized
stats := n.sync.Stats()
vars := n.sync.SCVars()
if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil {
log.Fatalw("Node.handleNewBlock", "err", err)
}
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
n.wg.Add(1)
go func() {
@@ -839,25 +709,18 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1)
go func() {
// Do an initial update on startup
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
}
for {
select {
case <-n.ctx.Done():
log.Info("ApiStateUpdater.UpdateMetrics loop done")
log.Info("API.UpdateMetrics loop done")
n.wg.Done()
return
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
}
}
}
@@ -866,25 +729,18 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1)
go func() {
// Do an initial update on startup
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err)
}
for {
select {
case <-n.ctx.Done():
log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done")
log.Info("API.UpdateRecommendedFee loop done")
n.wg.Done()
return
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err)
}
}
}

View File

@@ -20,107 +20,57 @@ const (
defaultIdleConnTimeout = 2 * time.Second
)
// UpdateMethodType defines the token price update mechanism
type UpdateMethodType string
// APIType defines the token exchange API
type APIType string
const (
// UpdateMethodTypeBitFinexV2 is the http API used by bitfinex V2
UpdateMethodTypeBitFinexV2 UpdateMethodType = "bitfinexV2"
// UpdateMethodTypeCoingeckoV3 is the http API used by copingecko V3
UpdateMethodTypeCoingeckoV3 UpdateMethodType = "coingeckoV3"
// UpdateMethodTypeStatic is the value given by the configuration
UpdateMethodTypeStatic UpdateMethodType = "static"
// UpdateMethodTypeIgnore indicates to not update the value, to set value 0
// it's better to use UpdateMethodTypeStatic
UpdateMethodTypeIgnore UpdateMethodType = "ignore"
// APITypeBitFinexV2 is the http API used by bitfinex V2
APITypeBitFinexV2 APIType = "bitfinexV2"
// APITypeCoingeckoV3 is the http API used by copingecko V3
APITypeCoingeckoV3 APIType = "coingeckoV3"
)
func (t *UpdateMethodType) valid() bool {
func (t *APIType) valid() bool {
switch *t {
case UpdateMethodTypeBitFinexV2:
case APITypeBitFinexV2:
return true
case UpdateMethodTypeCoingeckoV3:
return true
case UpdateMethodTypeStatic:
return true
case UpdateMethodTypeIgnore:
case APITypeCoingeckoV3:
return true
default:
return false
}
}
// TokenConfig specifies how a single token get its price updated
type TokenConfig struct {
UpdateMethod UpdateMethodType
StaticValue float64 // required by UpdateMethodTypeStatic
Symbol string
Addr ethCommon.Address
}
func (t *TokenConfig) valid() bool {
if (t.Addr == common.EmptyAddr && t.Symbol != "ETH") ||
(t.Symbol == "" && t.UpdateMethod == UpdateMethodTypeBitFinexV2) {
return false
}
return t.UpdateMethod.valid()
}
// PriceUpdater definition
type PriceUpdater struct {
db *historydb.HistoryDB
defaultUpdateMethod UpdateMethodType
tokensList []historydb.TokenSymbolAndAddr
tokensConfig map[ethCommon.Address]TokenConfig
clientCoingeckoV3 *sling.Sling
clientBitfinexV2 *sling.Sling
apiURL string
apiType APIType
tokens []historydb.TokenSymbolAndAddr
}
// NewPriceUpdater is the constructor for the updater
func NewPriceUpdater(
defaultUpdateMethodType UpdateMethodType,
tokensConfig []TokenConfig,
db *historydb.HistoryDB,
bitfinexV2URL, coingeckoV3URL string,
) (*PriceUpdater, error) {
// Validate params
if !defaultUpdateMethodType.valid() || defaultUpdateMethodType == UpdateMethodTypeStatic {
return nil, tracerr.Wrap(
fmt.Errorf("Invalid defaultUpdateMethodType: %v", defaultUpdateMethodType),
)
func NewPriceUpdater(apiURL string, apiType APIType, db *historydb.HistoryDB) (*PriceUpdater,
error) {
if !apiType.valid() {
return nil, tracerr.Wrap(fmt.Errorf("Invalid apiType: %v", apiType))
}
tokensConfigMap := make(map[ethCommon.Address]TokenConfig)
for _, t := range tokensConfig {
if !t.valid() {
return nil, tracerr.Wrap(fmt.Errorf("Invalid tokensConfig, wrong entry: %+v", t))
}
tokensConfigMap[t.Addr] = t
}
// Init
tr := &http.Transport{
MaxIdleConns: defaultMaxIdleConns,
IdleConnTimeout: defaultIdleConnTimeout,
DisableCompression: true,
}
httpClient := &http.Client{Transport: tr}
return &PriceUpdater{
db: db,
defaultUpdateMethod: defaultUpdateMethodType,
tokensList: []historydb.TokenSymbolAndAddr{},
tokensConfig: tokensConfigMap,
clientCoingeckoV3: sling.New().Base(coingeckoV3URL).Client(httpClient),
clientBitfinexV2: sling.New().Base(bitfinexV2URL).Client(httpClient),
apiURL: apiURL,
apiType: apiType,
tokens: []historydb.TokenSymbolAndAddr{},
}, nil
}
func (p *PriceUpdater) getTokenPriceBitfinex(ctx context.Context, tokenSymbol string) (float64, error) {
func getTokenPriceBitfinex(ctx context.Context, client *sling.Sling,
tokenSymbol string) (float64, error) {
state := [10]float64{}
url := "ticker/t" + tokenSymbol + "USD"
req, err := p.clientBitfinexV2.New().Get(url).Request()
req, err := client.New().Get("ticker/t" + tokenSymbol + "USD").Request()
if err != nil {
return 0, tracerr.Wrap(err)
}
res, err := p.clientBitfinexV2.Do(req.WithContext(ctx), &state, nil)
res, err := client.Do(req.WithContext(ctx), &state, nil)
if err != nil {
return 0, tracerr.Wrap(err)
}
@@ -130,7 +80,8 @@ func (p *PriceUpdater) getTokenPriceBitfinex(ctx context.Context, tokenSymbol st
return state[6], nil
}
func (p *PriceUpdater) getTokenPriceCoingecko(ctx context.Context, tokenAddr ethCommon.Address) (float64, error) {
func getTokenPriceCoingecko(ctx context.Context, client *sling.Sling,
tokenAddr ethCommon.Address) (float64, error) {
responseObject := make(map[string]map[string]float64)
var url string
var id string
@@ -142,11 +93,11 @@ func (p *PriceUpdater) getTokenPriceCoingecko(ctx context.Context, tokenAddr eth
url = "simple/token_price/ethereum?contract_addresses=" +
id + "&vs_currencies=usd"
}
req, err := p.clientCoingeckoV3.New().Get(url).Request()
req, err := client.New().Get(url).Request()
if err != nil {
return 0, tracerr.Wrap(err)
}
res, err := p.clientCoingeckoV3.Do(req.WithContext(ctx), &responseObject, nil)
res, err := client.Do(req.WithContext(ctx), &responseObject, nil)
if err != nil {
return 0, tracerr.Wrap(err)
}
@@ -163,50 +114,43 @@ func (p *PriceUpdater) getTokenPriceCoingecko(ctx context.Context, tokenAddr eth
// UpdatePrices is triggered by the Coordinator, and internally will update the
// token prices in the db
func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
for _, token := range p.tokensConfig {
tr := &http.Transport{
MaxIdleConns: defaultMaxIdleConns,
IdleConnTimeout: defaultIdleConnTimeout,
DisableCompression: true,
}
httpClient := &http.Client{Transport: tr}
client := sling.New().Base(p.apiURL).Client(httpClient)
for _, token := range p.tokens {
var tokenPrice float64
var err error
switch token.UpdateMethod {
case UpdateMethodTypeBitFinexV2:
tokenPrice, err = p.getTokenPriceBitfinex(ctx, token.Symbol)
case UpdateMethodTypeCoingeckoV3:
tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr)
case UpdateMethodTypeStatic:
tokenPrice = token.StaticValue
case UpdateMethodTypeIgnore:
continue
switch p.apiType {
case APITypeBitFinexV2:
tokenPrice, err = getTokenPriceBitfinex(ctx, client, token.Symbol)
case APITypeCoingeckoV3:
tokenPrice, err = getTokenPriceCoingecko(ctx, client, token.Addr)
}
if ctx.Err() != nil {
return
}
if err != nil {
log.Warnw("token price not updated (get error)",
"err", err, "token", token.Symbol, "updateMethod", token.UpdateMethod)
"err", err, "token", token.Symbol, "apiType", p.apiType)
}
if err = p.db.UpdateTokenValue(token.Addr, tokenPrice); err != nil {
if err = p.db.UpdateTokenValue(token.Symbol, tokenPrice); err != nil {
log.Errorw("token price not updated (db error)",
"err", err, "token", token.Symbol, "updateMethod", token.UpdateMethod)
"err", err, "token", token.Symbol, "apiType", p.apiType)
}
}
}
// UpdateTokenList get the registered token symbols from HistoryDB
func (p *PriceUpdater) UpdateTokenList() error {
dbTokens, err := p.db.GetTokenSymbolsAndAddrs()
tokens, err := p.db.GetTokenSymbolsAndAddrs()
if err != nil {
return tracerr.Wrap(err)
}
// For each token from the DB
for _, dbToken := range dbTokens {
// If the token doesn't exists in the config list,
// add it with default update emthod
if _, ok := p.tokensConfig[dbToken.Addr]; !ok {
p.tokensConfig[dbToken.Addr] = TokenConfig{
UpdateMethod: p.defaultUpdateMethod,
Symbol: dbToken.Symbol,
Addr: dbToken.Addr,
}
}
}
p.tokens = tokens
return nil
}

View File

@@ -16,9 +16,7 @@ import (
var historyDB *historydb.HistoryDB
const usdtAddr = "0xdac17f958d2ee523a2206206994597c13d831ec7"
func TestPriceUpdaterBitfinex(t *testing.T) {
func TestMain(m *testing.M) {
// Init DB
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
@@ -31,113 +29,60 @@ func TestPriceUpdaterBitfinex(t *testing.T) {
// Populate DB
// Gen blocks and add them to DB
blocks := test.GenBlocks(1, 2)
require.NoError(t, historyDB.AddBlocks(blocks))
err = historyDB.AddBlocks(blocks)
if err != nil {
panic(err)
}
// Gen tokens and add them to DB
tokens := []common.Token{
{
tokens := []common.Token{}
tokens = append(tokens, common.Token{
TokenID: 1,
EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress("0x1"),
EthAddr: ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f"),
Name: "DAI",
Symbol: "DAI",
Decimals: 18,
}, // Used to test get by SC addr
{
TokenID: 2,
EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress(usdtAddr),
Name: "Tether",
Symbol: "USDT",
Decimals: 18,
}, // Used to test get by token symbol
{
TokenID: 3,
EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress("0x2"),
Name: "FOO",
Symbol: "FOO",
Decimals: 18,
}, // Used to test ignore
{
TokenID: 4,
EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress("0x3"),
Name: "BAR",
Symbol: "BAR",
Decimals: 18,
}, // Used to test static
{
TokenID: 5,
EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress("0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"),
Name: "Uniswap",
Symbol: "UNI",
Decimals: 18,
}, // Used to test default
}
require.NoError(t, historyDB.AddTokens(tokens)) // ETH token exist in DB by default
// Update token price used to test ignore
ignoreValue := 44.44
require.NoError(t, historyDB.UpdateTokenValue(tokens[2].EthAddr, ignoreValue))
// Prepare token config
staticValue := 0.12345
tc := []TokenConfig{
// ETH and UNI tokens use default method
{ // DAI uses SC addr
UpdateMethod: UpdateMethodTypeBitFinexV2,
Addr: ethCommon.HexToAddress("0x1"),
Symbol: "DAI",
},
{ // USDT uses symbol
UpdateMethod: UpdateMethodTypeCoingeckoV3,
Addr: ethCommon.HexToAddress(usdtAddr),
},
{ // FOO uses ignore
UpdateMethod: UpdateMethodTypeIgnore,
Addr: ethCommon.HexToAddress("0x2"),
},
{ // BAR uses static
UpdateMethod: UpdateMethodTypeStatic,
Addr: ethCommon.HexToAddress("0x3"),
StaticValue: staticValue,
},
})
err = historyDB.AddTokens(tokens)
if err != nil {
panic(err)
}
bitfinexV2URL := "https://api-pub.bitfinex.com/v2/"
coingeckoV3URL := "https://api.coingecko.com/api/v3/"
result := m.Run()
os.Exit(result)
}
func TestPriceUpdaterBitfinex(t *testing.T) {
// Init price updater
pu, err := NewPriceUpdater(
UpdateMethodTypeCoingeckoV3,
tc,
historyDB,
bitfinexV2URL,
coingeckoV3URL,
)
pu, err := NewPriceUpdater("https://api-pub.bitfinex.com/v2/", APITypeBitFinexV2, historyDB)
require.NoError(t, err)
// Update token list
require.NoError(t, pu.UpdateTokenList())
assert.NoError(t, pu.UpdateTokenList())
// Update prices
pu.UpdatePrices(context.Background())
assertTokenHasPriceAndClean(t)
}
// Check results: get tokens from DB
func TestPriceUpdaterCoingecko(t *testing.T) {
// Init price updater
pu, err := NewPriceUpdater("https://api.coingecko.com/api/v3/", APITypeCoingeckoV3, historyDB)
require.NoError(t, err)
// Update token list
assert.NoError(t, pu.UpdateTokenList())
// Update prices
pu.UpdatePrices(context.Background())
assertTokenHasPriceAndClean(t)
}
func assertTokenHasPriceAndClean(t *testing.T) {
// Check that prices have been updated
fetchedTokens, err := historyDB.GetTokensTest()
require.NoError(t, err)
// Check that tokens that are updated via API have value:
// ETH
require.NotNil(t, fetchedTokens[0].USDUpdate)
assert.Greater(t, *fetchedTokens[0].USD, 0.0)
// DAI
require.NotNil(t, fetchedTokens[1].USDUpdate)
assert.Greater(t, *fetchedTokens[1].USD, 0.0)
// USDT
require.NotNil(t, fetchedTokens[2].USDUpdate)
assert.Greater(t, *fetchedTokens[2].USD, 0.0)
// UNI
require.NotNil(t, fetchedTokens[5].USDUpdate)
assert.Greater(t, *fetchedTokens[5].USD, 0.0)
// Check ignored token
assert.Equal(t, ignoreValue, *fetchedTokens[3].USD)
// Check static value
assert.Equal(t, staticValue, *fetchedTokens[4].USD)
// TokenID 0 (ETH) is always on the DB
assert.Equal(t, 2, len(fetchedTokens))
for _, token := range fetchedTokens {
require.NotNil(t, token.USD)
require.NotNil(t, token.USDUpdate)
assert.Greater(t, *token.USD, 0.0)
}
}

View File

@@ -183,6 +183,28 @@ type StartBlockNums struct {
WDelayer int64
}
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup common.RollupVariables `validate:"required"`
Auction common.AuctionVariables `validate:"required"`
WDelayer common.WDelayerVariables `validate:"required"`
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *common.RollupVariables `validate:"required"`
Auction *common.AuctionVariables `validate:"required"`
WDelayer *common.WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup common.RollupConstants
Auction common.AuctionConstants
WDelayer common.WDelayerConstants
}
// Config is the Synchronizer configuration
type Config struct {
StatsRefreshPeriod time.Duration
@@ -192,14 +214,14 @@ type Config struct {
// Synchronizer implements the Synchronizer type
type Synchronizer struct {
ethClient eth.ClientInterface
consts common.SCConsts
consts SCConsts
historyDB *historydb.HistoryDB
l2DB *l2db.L2DB
stateDB *statedb.StateDB
cfg Config
initVars common.SCVariables
initVars SCVariables
startBlockNum int64
vars common.SCVariables
vars SCVariables
stats *StatsHolder
resetStateFailed bool
}
@@ -222,7 +244,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
err))
}
consts := common.SCConsts{
consts := SCConsts{
Rollup: *rollupConstants,
Auction: *auctionConstants,
WDelayer: *wDelayerConstants,
@@ -288,11 +310,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
}
// SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() *common.SCVariables {
return &common.SCVariables{
Rollup: *s.vars.Rollup.Copy(),
Auction: *s.vars.Auction.Copy(),
WDelayer: *s.vars.WDelayer.Copy(),
func (s *Synchronizer) SCVars() SCVariablesPtr {
return SCVariablesPtr{
Rollup: s.vars.Rollup.Copy(),
Auction: s.vars.Auction.Copy(),
WDelayer: s.vars.WDelayer.Copy(),
}
}
@@ -335,7 +357,7 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
// We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum)
syncLastBlockNum := s.stats.Sync.LastBlock.Num
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
@@ -345,7 +367,7 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
hasBatch = false
} else {
hasBatch = true
syncLastBlockNum = dbFirstBatchBlockNum
firstBatchBlockNum = dbFirstBatchBlockNum
}
slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum {
@@ -354,14 +376,16 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
}
slot.SlotNum = slotNum
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
if hasBatch && s.consts.Auction.RelativeBlock(syncLastBlockNum) < int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true
}
// If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil {
return tracerr.Wrap(err)
}
if hasBatch &&
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true
}
// TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK
@@ -703,7 +727,7 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
}
func getInitialVariables(ethClient eth.ClientInterface,
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
@@ -719,7 +743,7 @@ func getInitialVariables(ethClient eth.ClientInterface,
rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
wDelayerVars := wDelayerInit.WDelayerVariables()
return &common.SCVariables{
return &SCVariables{
Rollup: *rollupVars,
Auction: *auctionVars,
WDelayer: *wDelayerVars,

View File

@@ -323,7 +323,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db
test.WipeDB(historyDB.DB())
// Init L2 DB
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
return stateDB, historyDB, l2DB
}
@@ -378,9 +378,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
vars := s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err)
@@ -541,9 +541,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
vars = s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
dbExits, err := s.historyDB.GetAllExits()
require.NoError(t, err)
@@ -673,9 +673,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced())
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
vars = s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
// At this point, the DB only has data up to block 1
dbBlock, err := s.historyDB.GetLastBlock()
@@ -712,9 +712,9 @@ func TestSyncGeneral(t *testing.T) {
}
vars = s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
}
dbBlock, err = s.historyDB.GetLastBlock()

View File

@@ -212,17 +212,6 @@ PoolTransferToBJJ(1) A-C: 3 (1)
// SetBlockchainMinimumFlow0 contains a set of transactions with a minimal flow
var SetBlockchainMinimumFlow0 = `
Type: Blockchain
// Idxs:
// 256: A(0)
// 257: C(1)
// 258: A(1)
// 259: B(0)
// 260: D(0)
// 261: Coord(1)
// 262: Coord(0)
// 263: B(1)
// 264: C(0)
// 265: F(0)
AddToken(1)
@@ -266,11 +255,10 @@ CreateAccountDeposit(0) D: 800
// C(0): 0
// Coordinator creates needed accounts to receive Fees
// Coordinator creates needed 'To' accounts for the L2Txs
// sorted in the way that the TxSelector creates them
CreateAccountCoordinator(1) Coord
CreateAccountCoordinator(1) B
CreateAccountCoordinator(0) Coord
// Coordinator creates needed 'To' accounts for the L2Txs
CreateAccountCoordinator(1) B
CreateAccountCoordinator(0) C

View File

@@ -77,7 +77,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)
@@ -186,7 +186,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t,
"4392049343656836675348565048374261353937130287163762821533580216441778455298",
"3844339393304253264418296322137281996442345663805792718218845145754742722151",
bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
@@ -215,7 +215,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t,
"8905191229562583213069132470917469035834300549892959854483573322676101624713",
"2537294203394018451170116789946369404362093672592091326351037700505720139801",
bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
@@ -242,7 +242,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t,
"20593679664586247774284790801579542411781976279024409415159440382607791042723",
"13463929859122729344499006353544877221550995454069650137270994940730475267399",
bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
@@ -264,7 +264,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
// same root as previous batch, as the L1CoordinatorTxs created by the
// Til set is not created by the TxSelector in this test
assert.Equal(t,
"20593679664586247774284790801579542411781976279024409415159440382607791042723",
"13463929859122729344499006353544877221550995454069650137270994940730475267399",
bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),

View File

@@ -732,13 +732,13 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
if tp.s.Type() == statedb.TypeSynchronizer {
// this in TypeSynchronizer should never be reached
// thisTypeould never be reached
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
return nil, nil, false,
tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
}
// case when tx.Type == common.TxTypeTransferToEthAddr or
// common.TxTypeTransferToBJJ:
// case when tx.Type== common.TxTypeTransferToEthAddr or common.TxTypeTransferToBJJ
accSender, err := tp.s.GetAccount(tx.FromIdx)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)

View File

@@ -218,7 +218,7 @@ func TestProcessTxsBalances(t *testing.T) {
assert.NoError(t, err)
chainID := uint16(0)
// generate test transactions from test.SetBlockchainMinimumFlow0 code
// generate test transactions from test.SetBlockchain0 code
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(txsets.SetBlockchainMinimumFlow0)
require.NoError(t, err)
@@ -288,7 +288,7 @@ func TestProcessTxsBalances(t *testing.T) {
"9061858435528794221929846392270405504056106238451760714188625065949729889651",
tp.s.MT.Root().BigInt().String())
coordIdxs := []common.Idx{261, 263}
coordIdxs := []common.Idx{261, 262}
log.Debug("block:0 batch:7")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[6].L2Txs)
@@ -303,7 +303,7 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "C", 0, "100")
checkBalance(t, tc, sdb, "D", 0, "800")
assert.Equal(t,
"4392049343656836675348565048374261353937130287163762821533580216441778455298",
"3844339393304253264418296322137281996442345663805792718218845145754742722151",
tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:8")
@@ -321,7 +321,7 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "C", 1, "100")
checkBalance(t, tc, sdb, "D", 0, "800")
assert.Equal(t,
"8905191229562583213069132470917469035834300549892959854483573322676101624713",
"2537294203394018451170116789946369404362093672592091326351037700505720139801",
tp.s.MT.Root().BigInt().String())
coordIdxs = []common.Idx{262}
@@ -330,8 +330,8 @@ func TestProcessTxsBalances(t *testing.T) {
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
checkBalance(t, tc, sdb, "Coord", 0, "75")
checkBalance(t, tc, sdb, "Coord", 1, "30")
checkBalance(t, tc, sdb, "Coord", 0, "35")
checkBalance(t, tc, sdb, "A", 0, "730")
checkBalance(t, tc, sdb, "A", 1, "280")
checkBalance(t, tc, sdb, "B", 0, "380")
@@ -340,7 +340,7 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "C", 1, "100")
checkBalance(t, tc, sdb, "D", 0, "470")
assert.Equal(t,
"12063160053709941400160547588624831667157042937323422396363359123696668555050",
"13463929859122729344499006353544877221550995454069650137270994940730475267399",
tp.s.MT.Root().BigInt().String())
coordIdxs = []common.Idx{}
@@ -350,7 +350,7 @@ func TestProcessTxsBalances(t *testing.T) {
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
assert.Equal(t,
"20375835796927052406196249140510136992262283055544831070430919054949353249481",
"21058792089669864857092637997959333050678445584244682889041632034478049099916",
tp.s.MT.Root().BigInt().String())
// use Set of PoolL2 txs
@@ -359,8 +359,8 @@ func TestProcessTxsBalances(t *testing.T) {
_, err = tp.ProcessTxs(coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
require.NoError(t, err)
checkBalance(t, tc, sdb, "Coord", 0, "75")
checkBalance(t, tc, sdb, "Coord", 1, "30")
checkBalance(t, tc, sdb, "Coord", 0, "35")
checkBalance(t, tc, sdb, "A", 0, "510")
checkBalance(t, tc, sdb, "A", 1, "170")
checkBalance(t, tc, sdb, "B", 0, "480")

File diff suppressed because one or more lines are too long

View File

@@ -148,49 +148,26 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
discardedL2Txs, tracerr.Wrap(err)
}
// getL1L2TxSelection returns the selection of L1 + L2 txs.
// It returns: the CoordinatorIdxs used to receive the fees of the selected
// L2Txs. An array of bytearrays with the signatures of the
// AccountCreationAuthorization of the accounts of the users created by the
// Coordinator with L1CoordinatorTxs of those accounts that does not exist yet
// but there is a transactions to them and the authorization of account
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch.
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
// WIP.0: the TxSelector is not optimized and will need a redesign. The
// current version is implemented in order to have a functional
// implementation that can be used ASAP.
// implementation that can be used asap.
//
// WIP.1: this method uses a 'cherry-pick' of internal calls of the
// StateDB, a refactor of the StateDB to reorganize it internally is
// planned once the main functionallities are covered, with that
// refactor the TxSelector will be updated also.
// Steps of this method:
// - ProcessL1Txs (User txs)
// - getPendingTxs (forgable directly with current state & not forgable
// yet)
// - split between l2TxsForgable & l2TxsNonForgable, where:
// - l2TxsForgable are the txs that are directly forgable with the
// current state
// - l2TxsNonForgable are the txs that are not directly forgable
// with the current state, but that may be forgable once the
// l2TxsForgable ones are processed
// - for l2TxsForgable, and if needed, for l2TxsNonForgable:
// - sort by Fee & Nonce
// - loop over l2Txs (txsel.processL2Txs)
// - Fill tx.TokenID tx.Nonce
// - Check enough Balance on sender
// - Check Nonce
// - Create CoordAccount L1CoordTx for TokenID if needed
// - & ProcessL1Tx of L1CoordTx
// - Check validity of receiver Account for ToEthAddr / ToBJJ
// - Create UserAccount L1CoordTx if needed (and possible)
// - If everything is fine, store l2Tx to validTxs & update NoncesMap
// - Prepare coordIdxsMap & AccumulatedFees
// - Distribute AccumulatedFees to CoordIdxs
// - MakeCheckpoint
// get pending l2-tx from tx-pool
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
txselStateDB := txsel.localAccountsDB.StateDB
tp := txprocessor.NewTxProcessor(txselStateDB, selectionConfig)
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
// Process L1UserTxs
for i := 0; i < len(l1UserTxs); i++ {
@@ -201,189 +178,32 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
}
}
l2TxsFromDB, err := txsel.l2db.GetPendingTxs()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
l2TxsForgable, l2TxsNonForgable := splitL2ForgableAndNonForgable(tp, l2TxsFromDB)
// in case that length of l2TxsForgable is 0, no need to continue, there
// is no L2Txs to forge at all
if len(l2TxsForgable) == 0 {
var discardedL2Txs []common.PoolL2Tx
for i := 0; i < len(l2TxsNonForgable); i++ {
l2TxsNonForgable[i].Info =
"Tx not selected due impossibility to be forged with the current state"
discardedL2Txs = append(discardedL2Txs, l2TxsNonForgable[i])
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL1CoordinatorTxs.Set(0)
metricSelectedL2Txs.Set(0)
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
}
var accAuths [][]byte
var l1CoordinatorTxs []common.L1Tx
var validTxs, discardedL2Txs []common.PoolL2Tx
l2TxsForgable = sortL2Txs(l2TxsForgable)
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs),
l2TxsForgable, validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
// if there is space for more txs get also the NonForgable txs, that may
// be unblocked once the Forgable ones are processed
if len(validTxs) < int(selectionConfig.MaxTx)-(len(l1UserTxs)+len(l1CoordinatorTxs)) {
l2TxsNonForgable = sortL2Txs(l2TxsNonForgable)
var accAuths2 [][]byte
var l1CoordinatorTxs2 []common.L1Tx
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig,
len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable,
validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accAuths = append(accAuths, accAuths2...)
l1CoordinatorTxs = append(l1CoordinatorTxs, l1CoordinatorTxs2...)
} else {
// if there is no space for NonForgable txs, put them at the
// discardedL2Txs array
for i := 0; i < len(l2TxsNonForgable); i++ {
l2TxsNonForgable[i].Info =
"Tx not selected due not available slots for L2Txs"
discardedL2Txs = append(discardedL2Txs, l2TxsNonForgable[i])
}
}
// get CoordIdxsMap for the TokenIDs
coordIdxsMap := make(map[common.TokenID]common.Idx)
for i := 0; i < len(validTxs); i++ {
// get TokenID from tx.Sender
accSender, err := tp.StateDB().GetAccount(validTxs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
tokenID := accSender.TokenID
coordIdx, err := txsel.getCoordIdx(tokenID)
if err != nil {
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
coordIdxsMap[tokenID] = coordIdx
}
var coordIdxs []common.Idx
for _, idx := range coordIdxsMap {
coordIdxs = append(coordIdxs, idx)
}
// sort CoordIdxs
sort.SliceStable(coordIdxs, func(i, j int) bool {
return coordIdxs[i] < coordIdxs[j]
})
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
for idx, accumulatedFee := range tp.AccumulatedFees {
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
if err != nil {
log.Errorw("Can not distribute accumulated fees to coordinator "+
"account: No coord Idx to receive fee", "idx", idx)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
if err != nil {
log.Error(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL2Txs.Set(float64(len(validTxs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
}
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) (
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
var l1CoordinatorTxs []common.L1Tx
positionL1 := nL1Txs
positionL1 := len(l1UserTxs)
var accAuths [][]byte
// Iterate over l2Txs
// - check Nonces
// - check enough Balance for the Amount+Fee
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
// - keep used accAuths
// - put the valid txs into validTxs array
for i := 0; i < len(l2Txs); i++ {
// Check if there is space for more L2Txs in the selection
maxL2Txs := int(selectionConfig.MaxTx) - nL1Txs - len(l1CoordinatorTxs)
if len(validTxs) >= maxL2Txs {
// no more available slots for L2Txs, so mark this tx
// but also the rest of remaining txs as discarded
for j := i; j < len(l2Txs); j++ {
l2Txs[j].Info =
"Tx not selected due not available slots for L2Txs"
discardedL2Txs = append(discardedL2Txs, l2Txs[j])
}
break
// Sort l2TxsRaw (cropping at MaxTx at this point).
// discardedL2Txs contains an array of the L2Txs that have not been
// selected in this Batch.
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx)
for i := range discardedL2Txs {
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee (does not fit inside the profitable set)"
}
// get Nonce & TokenID from the Account by l2Tx.FromIdx
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
noncesMap := make(map[common.Idx]common.Nonce)
var l2Txs []common.PoolL2Tx
// iterate over l2Txs
// - if tx.TokenID does not exist at CoordsIdxDB
// - create new L1CoordinatorTx creating a CoordAccount, for
// Coordinator to receive the fee of the new TokenID
for i := 0; i < len(l2Txs0); i++ {
accSender, err := tp.StateDB().GetAccount(l2Txs0[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, tracerr.Wrap(err)
}
l2Txs[i].TokenID = accSender.TokenID
// Check enough Balance on sender
enoughBalance, balance, feeAndAmount := tp.CheckEnoughBalance(l2Txs[i])
if !enoughBalance {
// not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
// Check if Nonce is correct
if l2Txs[i].Nonce != accSender.Nonce {
// not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs
// array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, accSender.Nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
l2Txs0[i].TokenID = accSender.TokenID
// populate the noncesMap used at the next iteration
noncesMap[l2Txs0[i].FromIdx] = accSender.Nonce
// if TokenID does not exist yet, create new L1CoordinatorTx to
// create the CoordinatorAccount for that TokenID, to receive
@@ -395,30 +215,55 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
txsel.coordAccountForTokenID(l1CoordinatorTxs,
accSender.TokenID, positionL1)
if err != nil {
return nil, nil, nil, nil, tracerr.Wrap(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
if newL1CoordTx != nil {
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1Txs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1Txs {
// if there is no space for the L1CoordinatorTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue
}
// increase positionL1
positionL1++
l1CoordinatorTxs = append(l1CoordinatorTxs, *newL1CoordTx)
accAuths = append(accAuths, txsel.coordAccount.AccountCreationAuth)
// process the L1CoordTx
_, _, _, _, err := tp.ProcessL1Tx(nil, newL1CoordTx)
if err != nil {
return nil, nil, nil, nil, tracerr.Wrap(err)
}
l2Txs = append(l2Txs, l2Txs0[i])
}
var validTxs []common.PoolL2Tx
// iterate over l2TxsRaw
// - check Nonces
// - check enough Balance for the Amount+Fee
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
// - keep used accAuths
// - put the valid txs into validTxs array
for i := 0; i < len(l2Txs); i++ {
enoughBalance, balance, feeAndAmount := tp.CheckEnoughBalance(l2Txs[i])
if !enoughBalance {
// not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
// check if Nonce is correct
nonce := noncesMap[l2Txs[i].FromIdx]
if l2Txs[i].Nonce != nonce {
// not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs
// array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
// If tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB,
@@ -432,7 +277,7 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
validL2Tx, l1CoordinatorTx, accAuth, err :=
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i])
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of
@@ -442,19 +287,7 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1Txs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1Txs {
// discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
if l1CoordinatorTx != nil && validL2Tx != nil {
if l1CoordinatorTx != nil {
// If ToEthAddr == 0xff.. this means that we
// are handling a TransferToBJJ, which doesn't
// require an authorization because it doesn't
@@ -470,16 +303,9 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
}
// process the L1CoordTx
_, _, _, _, err := tp.ProcessL1Tx(nil, l1CoordinatorTx)
if err != nil {
return nil, nil, nil, nil, tracerr.Wrap(err)
}
}
if validL2Tx == nil {
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx)
}
} else if l2Txs[i].ToIdx >= common.IdxUserThreshold {
receiverAcc, err := txsel.localAccountsDB.GetAccount(l2Txs[i].ToIdx)
@@ -526,43 +352,115 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
continue
}
}
// Account found in the DB, include the l2Tx in the selection
validTxs = append(validTxs, l2Txs[i])
} else if l2Txs[i].ToIdx == common.Idx(1) {
// valid txs (of Exit type)
validTxs = append(validTxs, l2Txs[i])
}
noncesMap[l2Txs[i].FromIdx]++
}
// get CoordIdxsMap for the TokenID of the current l2Txs[i]
// get TokenID from tx.Sender account
// Process L1CoordinatorTxs
for i := 0; i < len(l1CoordinatorTxs); i++ {
_, _, _, _, err := tp.ProcessL1Tx(nil, &l1CoordinatorTxs[i])
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
// get CoordIdxsMap for the TokenIDs
coordIdxsMap := make(map[common.TokenID]common.Idx)
for i := 0; i < len(validTxs); i++ {
// get TokenID from tx.Sender
accSender, err := tp.StateDB().GetAccount(validTxs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
tokenID := accSender.TokenID
coordIdx, err := txsel.getCoordIdx(tokenID)
if err != nil {
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil,
tracerr.Wrap(fmt.Errorf("Could not get CoordIdx for TokenID=%d, "+
"due: %s", tokenID, err))
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
// prepare temp coordIdxsMap & AccumulatedFees for the call to
// ProcessL2Tx
coordIdxsMap := map[common.TokenID]common.Idx{tokenID: coordIdx}
// tp.AccumulatedFees = make(map[common.Idx]*big.Int)
if _, ok := tp.AccumulatedFees[coordIdx]; !ok {
tp.AccumulatedFees[coordIdx] = big.NewInt(0)
coordIdxsMap[tokenID] = coordIdx
}
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &l2Txs[i])
var coordIdxs []common.Idx
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
for _, idx := range coordIdxsMap {
tp.AccumulatedFees[idx] = big.NewInt(0)
coordIdxs = append(coordIdxs, idx)
}
// sort CoordIdxs
sort.SliceStable(coordIdxs, func(i, j int) bool {
return coordIdxs[i] < coordIdxs[j]
})
// get most profitable L2-tx
maxL2Txs := int(selectionConfig.MaxTx) -
len(l1UserTxs) - len(l1CoordinatorTxs)
selectedL2Txs := validTxs
if len(validTxs) > maxL2Txs {
selectedL2Txs = selectedL2Txs[:maxL2Txs]
}
var finalL2Txs []common.PoolL2Tx
for i := 0; i < len(selectedL2Txs); i++ {
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &selectedL2Txs[i])
if err != nil {
log.Debugw("txselector.getL1L2TxSelection at ProcessL2Tx", "err", err)
// the error can be due not valid tx data, or due other
// cases (such as StateDB error). At this initial
// version of the TxSelector, we discard the L2Tx and
// log the error, assuming that this will be iterated in
// a near future.
log.Error(err)
// Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s",
err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
noncesMap[selectedL2Txs[i].FromIdx]--
continue
}
finalL2Txs = append(finalL2Txs, selectedL2Txs[i])
}
validTxs = append(validTxs, l2Txs[i])
} // after this loop, no checks to discard txs should be done
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
for idx, accumulatedFee := range tp.AccumulatedFees {
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
if err != nil {
log.Errorw("Can not distribute accumulated fees to coordinator "+
"account: No coord Idx to receive fee", "idx", idx)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
if err != nil {
log.Error(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
}
return accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL2Txs.Set(float64(len(finalL2Txs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
}
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
@@ -674,10 +572,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
Type: common.TxTypeCreateAccountDeposit,
}
}
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1UserTxs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1UserTxs {
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1UserTxs {
// L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
@@ -698,14 +593,26 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
return false
}
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx,
[]common.PoolL2Tx) {
// First sort by nonce so that txs from the same account are sorted so
// that they could be applied in succession.
sort.Slice(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
// Sort by absolute fee with SliceStable, so that txs with same
// AbsoluteFee are not rearranged and nonce order is kept in such case
sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
})
discardedL2Txs := []common.PoolL2Tx{}
if len(l2Txs) > int(max) {
discardedL2Txs = l2Txs[max:]
l2Txs = l2Txs[:max]
}
// sort l2Txs by Nonce. This can be done in many different ways, what
// is needed is to output the l2Txs where the Nonce of l2Txs for each
// Account is sorted, but the l2Txs can not be grouped by sender Account
@@ -715,29 +622,5 @@ func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
return l2Txs
}
func splitL2ForgableAndNonForgable(tp *txprocessor.TxProcessor,
l2Txs []common.PoolL2Tx) ([]common.PoolL2Tx, []common.PoolL2Tx) {
var l2TxsForgable, l2TxsNonForgable []common.PoolL2Tx
for i := 0; i < len(l2Txs); i++ {
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
if err != nil {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
if l2Txs[i].Nonce != accSender.Nonce {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
enoughBalance, _, _ := tp.CheckEnoughBalance(l2Txs[i])
if !enoughBalance {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
l2TxsForgable = append(l2TxsForgable, l2Txs[i])
}
return l2TxsForgable, l2TxsNonForgable
return l2Txs, discardedL2Txs
}

View File

@@ -26,13 +26,11 @@ import (
)
func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address,
coordUser *til.User) (*TxSelector, *historydb.HistoryDB) {
coordUser *til.User) *TxSelector {
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
@@ -67,7 +65,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
test.WipeDB(txsel.l2db.DB())
return txsel, historyDB
return txsel
}
func addAccCreationAuth(t *testing.T, tc *til.Context, txsel *TxSelector, chainID uint16,
@@ -159,7 +157,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -278,23 +276,22 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[0].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[2])
assert.Equal(t, accAuthSig0, accAuths[1])
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[1])
assert.Equal(t, accAuthSig0, accAuths[2])
assert.Equal(t, accAuthSig1, accAuths[3])
assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 4, len(oL1CoordTxs))
assert.Equal(t, 2, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
assert.Equal(t, common.BatchNum(7), txsel.localAccountsDB.CurrentBatch())
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
checkBalance(t, tc, txsel, "Coord", 1, "20") // CoordIdx for TokenID=1
checkBalance(t, tc, txsel, "Coord", 0, "10") // CoordIdx for TokenID=1
checkBalanceByIdx(t, txsel, 261, "20") // CoordIdx for TokenID=1
checkBalanceByIdx(t, txsel, 262, "10") // CoordIdx for TokenID=0
checkBalance(t, tc, txsel, "A", 0, "600")
checkBalance(t, tc, txsel, "A", 1, "280")
checkBalance(t, tc, txsel, "B", 0, "290")
@@ -327,20 +324,19 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[2].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
assert.Equal(t, 0, len(accAuths))
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 4, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
assert.Equal(t, common.BatchNum(8), txsel.localAccountsDB.CurrentBatch())
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
checkBalance(t, tc, txsel, "Coord", 1, "30") // CoordIdx for TokenID=1
checkBalance(t, tc, txsel, "Coord", 0, "35") // CoordIdx for TokenID=1
checkBalanceByIdx(t, txsel, 261, "30")
checkBalanceByIdx(t, txsel, 262, "35")
checkBalance(t, tc, txsel, "A", 0, "430")
checkBalance(t, tc, txsel, "A", 1, "280")
checkBalance(t, tc, txsel, "B", 0, "390")
@@ -374,7 +370,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, []common.Idx{263}, coordIdxs)
assert.Equal(t, []common.Idx{262}, coordIdxs)
assert.Equal(t, 0, len(accAuths))
assert.Equal(t, 4, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
@@ -383,7 +379,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.Equal(t, common.BatchNum(9), txsel.localAccountsDB.CurrentBatch())
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
checkBalanceByIdx(t, txsel, 261, "30")
checkBalanceByIdx(t, txsel, 263, "75")
checkBalanceByIdx(t, txsel, 262, "75")
checkBalance(t, tc, txsel, "A", 0, "730")
checkBalance(t, tc, txsel, "A", 1, "280")
checkBalance(t, tc, txsel, "B", 0, "380")
@@ -419,7 +415,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -472,6 +468,11 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
tc.RestartNonces()
// batch3
// NOTE: this batch will result with 1 L2Tx, as the PoolExit tx is not
// possible, as the PoolTransferToEthAddr is not processed yet when
// checking availability of PoolExit. This, in a near-future iteration
// of the TxSelector will return the 2 transactions as valid and
// selected, as the TxSelector will handle this kind of combinations.
batchPoolL2 = `
Type: PoolL2
PoolTransferToEthAddr(0) A-B: 50 (126)`
@@ -485,11 +486,11 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 2, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
assert.Equal(t, 1, len(oL2Txs)) // see 'NOTE' at the beginning of 'batch3' of this test
assert.Equal(t, 2, len(discardedL2Txs))
assert.Equal(t, expectedTxID2, oL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID1, oL2Txs[1].TxID.String())
assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID1, discardedL2Txs[1].TxID.String())
assert.Equal(t, common.TxTypeTransferToEthAddr, oL2Txs[0].Type)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
@@ -504,8 +505,12 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 0, len(oL2Txs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
// the Exit that was not accepted at the batch2
assert.Equal(t, expectedTxID1, oL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String())
assert.Equal(t, common.TxTypeExit, oL2Txs[0].Type)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
@@ -532,7 +537,7 @@ func TestTransferToBjj(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -575,6 +580,7 @@ func TestTransferToBjj(t *testing.T) {
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
@@ -663,7 +669,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -706,8 +712,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx
// transfers from account A
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
@@ -715,7 +720,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 7, len(oL2Txs))
assert.Equal(t, 4, len(discardedL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
@@ -745,7 +750,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -798,8 +803,8 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 0, len(oL2Txs))
require.Equal(t, 10, len(discardedL2Txs))
require.Equal(t, 2, len(oL2Txs))
require.Equal(t, 8, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
@@ -814,7 +819,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 3, len(oL1CoordTxs))
require.Equal(t, 8, len(oL2Txs))
require.Equal(t, 6, len(oL2Txs))
require.Equal(t, 2, len(discardedL2Txs))
require.Equal(t, 3, len(accAuths))
@@ -838,203 +843,3 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestProcessL2Selection(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 18
CreateAccountDeposit(0) B: 0
> batchL1 // freeze L1User{3}
> batchL1 // forge L1User{3}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
// 8 transfers from the same account
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) A-B: 10 (126)
PoolTransfer(0) A-B: 10 (126) // not enough funds
PoolTransfer(0) A-B: 5 (126) // enough funds
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 3, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// only 1st L2Tx should be accepted, as:
// - 2nd will not be selected as has not enough funds
// - 3rd will not be selected as has Nonce=2, and the account Nonce==1
// (due the 2nd txs not being accepted)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 2, len(discardedL2Txs))
assert.Equal(t, common.Nonce(0), oL2Txs[0].Nonce)
assert.Equal(t, common.Nonce(1), discardedL2Txs[0].Nonce)
assert.Equal(t, common.Nonce(2), discardedL2Txs[1].Nonce)
assert.Equal(t, "Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: 7, Amount+Fee: 11", discardedL2Txs[0].Info)
assert.Equal(t, "Tx not selected due to not current Nonce. Tx.Nonce: 2, "+
"Account.Nonce: 1", discardedL2Txs[1].Info)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// This test recreates the case where there are
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 100
CreateAccountDeposit(0) B: 0
> batchL1 // Batch1: freeze L1User{3}
> batchL1 // Batch2: forge L1User{3}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "Coord",
}
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, historyDB := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// Insert blocks into DB
for i := range blocks {
err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err)
}
err = historyDB.UpdateTokenValue(common.EmptyAddr, 1000)
require.NoError(t, err)
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 5,
MaxTx: 5,
MaxL1Tx: 3,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
// batch 2 to crate the accounts (from L1UserTxs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 0, len(oL2Txs))
require.Equal(t, 0, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// batch 3. The A-B txs have lower fee, but are the only ones possible
// with the current Accounts Balances, as the B-A tx of amount 40 will
// not be included as will be processed first when there is not enough
// balance at B (processed first as the TxSelector sorts by Fee and then
// by Nonce).
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) B-A: 40 (130) // B-A txs are only possible once A-B txs are processed
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) A-B: 20 (20)
PoolTransfer(0) A-B: 25 (150)
PoolTransfer(0) A-B: 20 (20)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 11, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 3, len(oL2Txs)) // the 3 txs A-B
require.Equal(t, 8, len(discardedL2Txs)) // the 8 txs B-A
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// batch 4. In this Batch, account B has enough balance to send the txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 5, len(oL2Txs))
require.Equal(t, 3, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
}