mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
28 Commits
tmp/txsel-
...
feature/up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6aac050858 | ||
|
|
334eecc99e | ||
|
|
b01d5d50ee | ||
|
|
8018f348a7 | ||
|
|
91ffdc250b | ||
|
|
4ebe285912 | ||
|
|
c280b21b89 | ||
|
|
e78fd613c6 | ||
|
|
9edbd135ef | ||
|
|
009d0c5be1 | ||
|
|
777ca3d87e | ||
|
|
eb89ab84f2 | ||
|
|
234268028e | ||
|
|
d7bab0c524 | ||
|
|
36c1ba84df | ||
|
|
450fa08d80 | ||
|
|
578edc80bc | ||
|
|
80f16201a2 | ||
|
|
cd74f1fda3 | ||
|
|
6c6d1ea7b8 | ||
|
|
d625cc9287 | ||
|
|
ec9b0aadce | ||
|
|
5d8579a609 | ||
|
|
84b84ecc17 | ||
|
|
968fcc207e | ||
|
|
a5dcc67a08 | ||
|
|
97062afc90 | ||
|
|
d361abb8cd |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -0,0 +1 @@
|
|||||||
|
bin/
|
||||||
135
Makefile
Normal file
135
Makefile
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
#! /usr/bin/make -f
|
||||||
|
|
||||||
|
# Project variables.
|
||||||
|
PACKAGE := github.com/hermeznetwork/hermez-node
|
||||||
|
VERSION := $(shell git describe --tags --always)
|
||||||
|
BUILD := $(shell git rev-parse --short HEAD)
|
||||||
|
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
||||||
|
PROJECT_NAME := $(shell basename "$(PWD)")
|
||||||
|
|
||||||
|
# Go related variables.
|
||||||
|
GO_FILES ?= $$(find . -name '*.go' | grep -v vendor)
|
||||||
|
GOBASE := $(shell pwd)
|
||||||
|
GOBIN := $(GOBASE)/bin
|
||||||
|
GOPKG := $(.)
|
||||||
|
GOENVVARS := GOBIN=$(GOBIN)
|
||||||
|
GOCMD := $(GOBASE)/cli/node
|
||||||
|
GOPROOF := $(GOBASE)/test/proofserver/cli
|
||||||
|
GOBINARY := node
|
||||||
|
|
||||||
|
# Project configs.
|
||||||
|
MODE ?= sync
|
||||||
|
CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
|
||||||
|
POSTGRES_PASS ?= yourpasswordhere
|
||||||
|
|
||||||
|
# Use linker flags to provide version/build settings.
|
||||||
|
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
|
||||||
|
|
||||||
|
# PID file will keep the process id of the server.
|
||||||
|
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
|
||||||
|
|
||||||
|
# Make is verbose in Linux. Make it silent.
|
||||||
|
MAKEFLAGS += --silent
|
||||||
|
|
||||||
|
.PHONY: help
|
||||||
|
help: Makefile
|
||||||
|
@echo
|
||||||
|
@echo " Choose a command run in "$(PROJECT_NAME)":"
|
||||||
|
@echo
|
||||||
|
@sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /'
|
||||||
|
@echo
|
||||||
|
|
||||||
|
## test: Run the application check and all tests.
|
||||||
|
test: govet gocilint test-unit
|
||||||
|
|
||||||
|
## test-unit: Run all unit tests.
|
||||||
|
test-unit:
|
||||||
|
@echo " > Running unit tests"
|
||||||
|
$(GOENVVARS) go test -race -p 1 -failfast -timeout 300s -v ./...
|
||||||
|
|
||||||
|
## test-api-server: Run the API server using the Go tests.
|
||||||
|
test-api-server:
|
||||||
|
@echo " > Running unit tests"
|
||||||
|
$(GOENVVARS) FAKE_SERVER=yes go test -timeout 0 ./api -p 1 -count 1 -v
|
||||||
|
|
||||||
|
## gofmt: Run `go fmt` for all go files.
|
||||||
|
gofmt:
|
||||||
|
@echo " > Format all go files"
|
||||||
|
$(GOENVVARS) gofmt -w ${GO_FILES}
|
||||||
|
|
||||||
|
## govet: Run go vet.
|
||||||
|
govet:
|
||||||
|
@echo " > Running go vet"
|
||||||
|
$(GOENVVARS) go vet ./...
|
||||||
|
|
||||||
|
## golint: Run default golint.
|
||||||
|
golint:
|
||||||
|
@echo " > Running golint"
|
||||||
|
$(GOENVVARS) golint -set_exit_status ./...
|
||||||
|
|
||||||
|
## gocilint: Run Golang CI Lint.
|
||||||
|
gocilint:
|
||||||
|
@echo " > Running Golang CI Lint"
|
||||||
|
$-golangci-lint run --timeout=5m -E whitespace -E gosec -E gci -E misspell -E gomnd -E gofmt -E goimports -E golint --exclude-use-default=false --max-same-issues 0
|
||||||
|
|
||||||
|
## exec: Run given command. e.g; make exec run="go test ./..."
|
||||||
|
exec:
|
||||||
|
GOBIN=$(GOBIN) $(run)
|
||||||
|
|
||||||
|
## clean: Clean build files. Runs `go clean` internally.
|
||||||
|
clean:
|
||||||
|
@-rm $(GOBIN)/ 2> /dev/null
|
||||||
|
@echo " > Cleaning build cache"
|
||||||
|
$(GOENVVARS) go clean
|
||||||
|
|
||||||
|
## build: Build the project.
|
||||||
|
build: install
|
||||||
|
@echo " > Building Hermez binary..."
|
||||||
|
@bash -c "$(MAKE) migration-pack"
|
||||||
|
$(GOENVVARS) go build $(LDFLAGS) -o $(GOBIN)/$(GOBINARY) $(GOCMD)
|
||||||
|
@bash -c "$(MAKE) migration-clean"
|
||||||
|
|
||||||
|
## install: Install missing dependencies. Runs `go get` internally. e.g; make install get=github.com/foo/bar
|
||||||
|
install:
|
||||||
|
@echo " > Checking if there is any missing dependencies..."
|
||||||
|
$(GOENVVARS) go get $(GOCMD)/... $(get)
|
||||||
|
|
||||||
|
## run: Run Hermez node.
|
||||||
|
run:
|
||||||
|
@bash -c "$(MAKE) clean build"
|
||||||
|
@echo " > Running $(PROJECT_NAME)"
|
||||||
|
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
|
||||||
|
|
||||||
|
## run-proof-mock: Run proof server mock API.
|
||||||
|
run-proof-mock: stop-proof-mock
|
||||||
|
@echo " > Running Proof Server Mock"
|
||||||
|
$(GOENVVARS) go build -o $(GOBIN)/proof $(GOPROOF)
|
||||||
|
@$(GOBIN)/proof 2>&1 & echo $$! > $(PID_PROOF_MOCK)
|
||||||
|
@cat $(PID_PROOF_MOCK) | sed "/^/s/^/ \> Proof Server Mock PID: /"
|
||||||
|
|
||||||
|
## stop-proof-mock: Stop proof server mock API.
|
||||||
|
stop-proof-mock:
|
||||||
|
@-touch $(PID_PROOF_MOCK)
|
||||||
|
@-kill -s INT `cat $(PID_PROOF_MOCK)` 2> /dev/null || true
|
||||||
|
@-rm $(PID_PROOF_MOCK) $(GOBIN)/proof 2> /dev/null || true
|
||||||
|
|
||||||
|
## migration-pack: Pack the database migrations into the binary.
|
||||||
|
migration-pack:
|
||||||
|
@echo " > Packing the migrations..."
|
||||||
|
@cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
|
||||||
|
@cd $(GOBASE)/db && packr2 && cd -
|
||||||
|
|
||||||
|
## migration-clean: Clean the database migrations pack.
|
||||||
|
migration-clean:
|
||||||
|
@echo " > Cleaning the migrations..."
|
||||||
|
@cd $(GOBASE)/db && packr2 clean && cd -
|
||||||
|
|
||||||
|
## run-database-container: Run the Postgres container
|
||||||
|
run-database-container:
|
||||||
|
@echo " > Running the postgreSQL DB..."
|
||||||
|
@-docker run --rm --name hermez-db -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$(POSTGRES_PASS)" -d postgres
|
||||||
|
|
||||||
|
## stop-database-container: Stop the Postgres container
|
||||||
|
stop-database-container:
|
||||||
|
@echo " > Stopping the postgreSQL DB..."
|
||||||
|
@-docker stop hermez-db
|
||||||
83
README.md
83
README.md
@@ -8,42 +8,75 @@ Go implementation of the Hermez node.
|
|||||||
|
|
||||||
The `hermez-node` has been tested with go version 1.14
|
The `hermez-node` has been tested with go version 1.14
|
||||||
|
|
||||||
|
### Build
|
||||||
|
|
||||||
|
Build the binary and check the current version:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make build
|
||||||
|
$ bin/node version
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run
|
||||||
|
|
||||||
|
First you must edit the default/template config file into [cli/node/cfg.buidler.toml](cli/node/cfg.buidler.toml),
|
||||||
|
there are more information about the config file into [cli/node/README.md](cli/node/README.md)
|
||||||
|
|
||||||
|
After setting the config, you can build and run the Hermez Node as a synchronizer:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make run
|
||||||
|
```
|
||||||
|
|
||||||
|
Or build and run as a coordinator, and also passing the config file from other location:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
|
||||||
|
```
|
||||||
|
|
||||||
|
To check the useful make commands:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make help
|
||||||
|
```
|
||||||
|
|
||||||
### Unit testing
|
### Unit testing
|
||||||
|
|
||||||
Running the unit tests requires a connection to a PostgreSQL database. You can
|
Running the unit tests requires a connection to a PostgreSQL database. You can
|
||||||
start PostgreSQL with docker easily this way (where `yourpasswordhere` should
|
run PostgreSQL with docker easily this way (where `yourpasswordhere` should
|
||||||
be your password):
|
be your password):
|
||||||
|
|
||||||
```
|
```shell
|
||||||
POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
|
$ POSTGRES_PASS="yourpasswordhere" make run-database-container
|
||||||
```
|
```
|
||||||
|
|
||||||
Afterwards, run the tests with the password as env var:
|
Afterward, run the tests with the password as env var:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
POSTGRES_PASS=yourpasswordhere go test -p 1 ./...
|
$ POSTGRES_PASS="yourpasswordhere" make test
|
||||||
```
|
```
|
||||||
|
|
||||||
NOTE: `-p 1` forces execution of package test in serial. Otherwise they may be
|
NOTE: `-p 1` forces execution of package test in serial. Otherwise, they may be
|
||||||
executed in paralel and the test may find unexpected entries in the SQL databse
|
executed in parallel, and the test may find unexpected entries in the SQL database
|
||||||
because it's shared among all tests.
|
because it's shared among all tests.
|
||||||
|
|
||||||
There is an extra temporary option that allows you to run the API server using
|
There is an extra temporary option that allows you to run the API server using the
|
||||||
the Go tests. This will be removed once the API can be properly initialized,
|
Go tests. It will be removed once the API can be properly initialized with data
|
||||||
with data from the synchronizer and so on. To use this, run:
|
from the synchronizer. To use this, run:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
FAKE_SERVER=yes POSTGRES_PASS=yourpasswordhere go test -timeout 0 ./api -p 1 -count 1 -v`
|
$ POSTGRES_PASS="yourpasswordhere" make test-api-server
|
||||||
```
|
```
|
||||||
|
|
||||||
### Lint
|
### Lint
|
||||||
|
|
||||||
All Pull Requests need to pass the configured linter.
|
All Pull Requests need to pass the configured linter.
|
||||||
|
|
||||||
To run the linter locally, first install [golangci-lint](https://golangci-lint.run). Afterwards you can check the lints with this command:
|
To run the linter locally, first, install [golangci-lint](https://golangci-lint.run).
|
||||||
|
Afterward, you can check the lints with this command:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
golangci-lint run --timeout=5m -E whitespace -E gosec -E gci -E misspell -E gomnd -E gofmt -E goimports -E golint --exclude-use-default=false --max-same-issues 0
|
$ make gocilint
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
@@ -54,13 +87,13 @@ See [cli/node/README.md](cli/node/README.md)
|
|||||||
|
|
||||||
### Proof Server
|
### Proof Server
|
||||||
|
|
||||||
The node in mode coordinator requires a proof server (a server that is capable
|
The node in mode coordinator requires a proof server (a server capable of
|
||||||
of calculating proofs from the zkInputs). For testing purposes there is a mock
|
calculating proofs from the zkInputs). There is a mock proof server CLI
|
||||||
proof server cli at `test/proofserver/cli`.
|
at `test/proofserver/cli` for testing purposes.
|
||||||
|
|
||||||
Usage of `test/proofserver/cli`:
|
Usage of `test/proofserver/cli`:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
USAGE:
|
USAGE:
|
||||||
go run ./test/proofserver/cli OPTIONS
|
go run ./test/proofserver/cli OPTIONS
|
||||||
|
|
||||||
@@ -71,11 +104,19 @@ OPTIONS:
|
|||||||
proving time duration (default 2s)
|
proving time duration (default 2s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Also, the Makefile commands can be used to run and stop the proof server
|
||||||
|
in the background:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make run-proof-mock
|
||||||
|
$ make stop-proof-mock
|
||||||
|
```
|
||||||
|
|
||||||
### `/tmp` as tmpfs
|
### `/tmp` as tmpfs
|
||||||
|
|
||||||
For every processed batch, the node builds a temporary exit tree in a key-value
|
For every processed batch, the node builds a temporary exit tree in a key-value
|
||||||
DB stored in `/tmp`. It is highly recommended that `/tmp` is mounted as a RAM
|
DB stored in `/tmp`. It is highly recommended that `/tmp` is mounted as a RAM
|
||||||
file system in production to avoid unecessary reads an writes to disk. This
|
file system in production to avoid unnecessary reads a writes to disk. This
|
||||||
can be done by mounting `/tmp` as tmpfs; for example, by having this line in
|
can be done by mounting `/tmp` as tmpfs; for example, by having this line in
|
||||||
`/etc/fstab`:
|
`/etc/fstab`:
|
||||||
```
|
```
|
||||||
|
|||||||
41
api/api.go
41
api/api.go
@@ -2,40 +2,19 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Add correct values to constants
|
|
||||||
const (
|
|
||||||
createAccountExtraFeePercentage float64 = 2
|
|
||||||
createAccountInternalExtraFeePercentage float64 = 2.5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Status define status of the network
|
|
||||||
type Status struct {
|
|
||||||
sync.RWMutex
|
|
||||||
NodeConfig NodeConfig `json:"nodeConfig"`
|
|
||||||
Network Network `json:"network"`
|
|
||||||
Metrics historydb.Metrics `json:"metrics"`
|
|
||||||
Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
|
||||||
Auction historydb.AuctionVariablesAPI `json:"auction"`
|
|
||||||
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
|
||||||
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// API serves HTTP requests to allow external interaction with the Hermez node
|
// API serves HTTP requests to allow external interaction with the Hermez node
|
||||||
type API struct {
|
type API struct {
|
||||||
h *historydb.HistoryDB
|
h *historydb.HistoryDB
|
||||||
cg *configAPI
|
cg *configAPI
|
||||||
l2 *l2db.L2DB
|
l2 *l2db.L2DB
|
||||||
status Status
|
|
||||||
chainID uint16
|
chainID uint16
|
||||||
hermezAddress ethCommon.Address
|
hermezAddress ethCommon.Address
|
||||||
}
|
}
|
||||||
@@ -46,8 +25,6 @@ func NewAPI(
|
|||||||
server *gin.Engine,
|
server *gin.Engine,
|
||||||
hdb *historydb.HistoryDB,
|
hdb *historydb.HistoryDB,
|
||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
config *Config,
|
|
||||||
nodeConfig *NodeConfig,
|
|
||||||
) (*API, error) {
|
) (*API, error) {
|
||||||
// Check input
|
// Check input
|
||||||
// TODO: is stateDB only needed for explorer endpoints or for both?
|
// TODO: is stateDB only needed for explorer endpoints or for both?
|
||||||
@@ -57,20 +34,20 @@ func NewAPI(
|
|||||||
if explorerEndpoints && hdb == nil {
|
if explorerEndpoints && hdb == nil {
|
||||||
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
|
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
|
||||||
}
|
}
|
||||||
|
consts, err := hdb.GetConstants()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
a := &API{
|
a := &API{
|
||||||
h: hdb,
|
h: hdb,
|
||||||
cg: &configAPI{
|
cg: &configAPI{
|
||||||
RollupConstants: *newRollupConstants(config.RollupConstants),
|
RollupConstants: *newRollupConstants(consts.Rollup),
|
||||||
AuctionConstants: config.AuctionConstants,
|
AuctionConstants: consts.Auction,
|
||||||
WDelayerConstants: config.WDelayerConstants,
|
WDelayerConstants: consts.WDelayer,
|
||||||
},
|
},
|
||||||
l2: l2db,
|
l2: l2db,
|
||||||
status: Status{
|
chainID: consts.ChainID,
|
||||||
NodeConfig: *nodeConfig,
|
hermezAddress: consts.HermezAddress,
|
||||||
},
|
|
||||||
chainID: config.ChainID,
|
|
||||||
hermezAddress: config.HermezAddress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add coordinator endpoints
|
// Add coordinator endpoints
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/hermeznetwork/hermez-node/stateapiupdater"
|
||||||
"github.com/hermeznetwork/hermez-node/test"
|
"github.com/hermeznetwork/hermez-node/test"
|
||||||
"github.com/hermeznetwork/hermez-node/test/til"
|
"github.com/hermeznetwork/hermez-node/test/til"
|
||||||
"github.com/hermeznetwork/hermez-node/test/txsets"
|
"github.com/hermeznetwork/hermez-node/test/txsets"
|
||||||
@@ -180,12 +181,13 @@ type testCommon struct {
|
|||||||
auctionVars common.AuctionVariables
|
auctionVars common.AuctionVariables
|
||||||
rollupVars common.RollupVariables
|
rollupVars common.RollupVariables
|
||||||
wdelayerVars common.WDelayerVariables
|
wdelayerVars common.WDelayerVariables
|
||||||
nextForgers []NextForger
|
nextForgers []historydb.NextForgerAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
var tc testCommon
|
var tc testCommon
|
||||||
var config configAPI
|
var config configAPI
|
||||||
var api *API
|
var api *API
|
||||||
|
var stateAPIUpdater *stateapiupdater.Updater
|
||||||
|
|
||||||
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
|
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
|
||||||
// emulating the task of the synchronizer in order to have data to be returned
|
// emulating the task of the synchronizer in order to have data to be returned
|
||||||
@@ -206,16 +208,6 @@ func TestMain(m *testing.M) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
// StateDB
|
|
||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := os.RemoveAll(dir); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// L2DB
|
// L2DB
|
||||||
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
||||||
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
||||||
@@ -230,18 +222,38 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
// API
|
// API
|
||||||
apiGin := gin.Default()
|
apiGin := gin.Default()
|
||||||
|
// Reset DB
|
||||||
|
test.WipeDB(hdb.DB())
|
||||||
|
|
||||||
|
constants := &historydb.Constants{
|
||||||
|
SCConsts: common.SCConsts{
|
||||||
|
Rollup: _config.RollupConstants,
|
||||||
|
Auction: _config.AuctionConstants,
|
||||||
|
WDelayer: _config.WDelayerConstants,
|
||||||
|
},
|
||||||
|
ChainID: chainID,
|
||||||
|
HermezAddress: _config.HermezAddress,
|
||||||
|
}
|
||||||
|
if err := hdb.SetConstants(constants); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
nodeConfig := &historydb.NodeConfig{
|
||||||
|
MaxPoolTxs: 10,
|
||||||
|
MinFeeUSD: 0,
|
||||||
|
}
|
||||||
|
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
api, err = NewAPI(
|
api, err = NewAPI(
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
apiGin,
|
apiGin,
|
||||||
hdb,
|
hdb,
|
||||||
l2DB,
|
l2DB,
|
||||||
&_config,
|
|
||||||
&NodeConfig{
|
|
||||||
ForgeDelay: 180,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
// Start server
|
// Start server
|
||||||
@@ -257,9 +269,6 @@ func TestMain(m *testing.M) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Reset DB
|
|
||||||
test.WipeDB(api.h.DB())
|
|
||||||
|
|
||||||
// Generate blockchain data with til
|
// Generate blockchain data with til
|
||||||
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
@@ -306,7 +315,7 @@ func TestMain(m *testing.M) {
|
|||||||
USD: ðUSD,
|
USD: ðUSD,
|
||||||
USDUpdate: ðNow,
|
USDUpdate: ðNow,
|
||||||
})
|
})
|
||||||
err = api.h.UpdateTokenValue(test.EthToken.Symbol, ethUSD)
|
err = api.h.UpdateTokenValue(common.EmptyAddr, ethUSD)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -333,7 +342,7 @@ func TestMain(m *testing.M) {
|
|||||||
token.USD = &value
|
token.USD = &value
|
||||||
token.USDUpdate = &now
|
token.USDUpdate = &now
|
||||||
// Set value in DB
|
// Set value in DB
|
||||||
err = api.h.UpdateTokenValue(token.Symbol, value)
|
err = api.h.UpdateTokenValue(token.EthAddr, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -460,19 +469,19 @@ func TestMain(m *testing.M) {
|
|||||||
if err = api.h.AddBids(bids); err != nil {
|
if err = api.h.AddBids(bids); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
bootForger := NextForger{
|
bootForger := historydb.NextForgerAPI{
|
||||||
Coordinator: historydb.CoordinatorAPI{
|
Coordinator: historydb.CoordinatorAPI{
|
||||||
Forger: auctionVars.BootCoordinator,
|
Forger: auctionVars.BootCoordinator,
|
||||||
URL: auctionVars.BootCoordinatorURL,
|
URL: auctionVars.BootCoordinatorURL,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
|
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
|
||||||
nextForgers := []NextForger{}
|
nextForgers := []historydb.NextForgerAPI{}
|
||||||
var initBlock int64 = 140
|
var initBlock int64 = 140
|
||||||
var deltaBlocks int64 = 40
|
var deltaBlocks int64 = 40
|
||||||
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
|
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
|
||||||
fromBlock := initBlock + deltaBlocks*int64(i-1)
|
fromBlock := initBlock + deltaBlocks*int64(i-1)
|
||||||
bootForger.Period = Period{
|
bootForger.Period = historydb.Period{
|
||||||
SlotNum: int64(i),
|
SlotNum: int64(i),
|
||||||
FromBlock: fromBlock,
|
FromBlock: fromBlock,
|
||||||
ToBlock: fromBlock + deltaBlocks - 1,
|
ToBlock: fromBlock + deltaBlocks - 1,
|
||||||
@@ -512,7 +521,13 @@ func TestMain(m *testing.M) {
|
|||||||
WithdrawalDelay: uint64(3000),
|
WithdrawalDelay: uint64(3000),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate test data, as expected to be received/sent from/to the API
|
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
||||||
|
Rollup: rollupVars,
|
||||||
|
Auction: auctionVars,
|
||||||
|
WDelayer: wdelayerVars,
|
||||||
|
}, constants)
|
||||||
|
|
||||||
|
// Generate test data, as expected to be received/sended from/to the API
|
||||||
testCoords := genTestCoordinators(commonCoords)
|
testCoords := genTestCoordinators(commonCoords)
|
||||||
testBids := genTestBids(commonBlocks, testCoords, bids)
|
testBids := genTestBids(commonBlocks, testCoords, bids)
|
||||||
testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
|
testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
|
||||||
@@ -589,15 +604,12 @@ func TestMain(m *testing.M) {
|
|||||||
if err := database.Close(); err != nil {
|
if err := database.Close(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := os.RemoveAll(dir); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
os.Exit(result)
|
os.Exit(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimeout(t *testing.T) {
|
func TestTimeout(t *testing.T) {
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
apiConnConTO := db.NewAPIConnectionController(1, 100*time.Millisecond)
|
apiConnConTO := db.NewAPIConnectionController(1, 100*time.Millisecond)
|
||||||
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
|
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
|
||||||
@@ -627,17 +639,12 @@ func TestTimeout(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
_config := getConfigTest(0)
|
|
||||||
_, err = NewAPI(
|
_, err = NewAPI(
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
apiGinTO,
|
apiGinTO,
|
||||||
hdbTO,
|
hdbTO,
|
||||||
l2DBTO,
|
l2DBTO,
|
||||||
&_config,
|
|
||||||
&NodeConfig{
|
|
||||||
ForgeDelay: 180,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -7,10 +7,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/mitchellh/copystructure"
|
"github.com/mitchellh/copystructure"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type testBatch struct {
|
type testBatch struct {
|
||||||
@@ -20,7 +22,7 @@ type testBatch struct {
|
|||||||
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash"`
|
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash"`
|
||||||
Timestamp time.Time `json:"timestamp"`
|
Timestamp time.Time `json:"timestamp"`
|
||||||
ForgerAddr ethCommon.Address `json:"forgerAddr"`
|
ForgerAddr ethCommon.Address `json:"forgerAddr"`
|
||||||
CollectedFees map[common.TokenID]string `json:"collectedFees"`
|
CollectedFees apitypes.CollectedFeesAPI `json:"collectedFees"`
|
||||||
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD"`
|
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD"`
|
||||||
StateRoot string `json:"stateRoot"`
|
StateRoot string `json:"stateRoot"`
|
||||||
NumAccounts int `json:"numAccounts"`
|
NumAccounts int `json:"numAccounts"`
|
||||||
@@ -73,9 +75,9 @@ func genTestBatches(
|
|||||||
if !found {
|
if !found {
|
||||||
panic("block not found")
|
panic("block not found")
|
||||||
}
|
}
|
||||||
collectedFees := make(map[common.TokenID]string)
|
collectedFees := apitypes.CollectedFeesAPI(make(map[common.TokenID]apitypes.BigIntStr))
|
||||||
for k, v := range cBatches[i].CollectedFees {
|
for k, v := range cBatches[i].CollectedFees {
|
||||||
collectedFees[k] = v.String()
|
collectedFees[k] = *apitypes.NewBigIntStr(v)
|
||||||
}
|
}
|
||||||
forgedTxs := 0
|
forgedTxs := 0
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
@@ -132,7 +134,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
limit := 3
|
limit := 3
|
||||||
path := fmt.Sprintf("%s?limit=%d", endpoint, limit)
|
path := fmt.Sprintf("%s?limit=%d", endpoint, limit)
|
||||||
err := doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err := doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertBatches(t, tc.batches, fetchedBatches)
|
assertBatches(t, tc.batches, fetchedBatches)
|
||||||
|
|
||||||
// minBatchNum
|
// minBatchNum
|
||||||
@@ -141,7 +143,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
minBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
minBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
||||||
path = fmt.Sprintf("%s?minBatchNum=%d&limit=%d", endpoint, minBatchNum, limit)
|
path = fmt.Sprintf("%s?minBatchNum=%d&limit=%d", endpoint, minBatchNum, limit)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minBatchNumBatches := []testBatch{}
|
minBatchNumBatches := []testBatch{}
|
||||||
for i := 0; i < len(tc.batches); i++ {
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
if tc.batches[i].BatchNum > minBatchNum {
|
if tc.batches[i].BatchNum > minBatchNum {
|
||||||
@@ -156,7 +158,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
maxBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
maxBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
||||||
path = fmt.Sprintf("%s?maxBatchNum=%d&limit=%d", endpoint, maxBatchNum, limit)
|
path = fmt.Sprintf("%s?maxBatchNum=%d&limit=%d", endpoint, maxBatchNum, limit)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
maxBatchNumBatches := []testBatch{}
|
maxBatchNumBatches := []testBatch{}
|
||||||
for i := 0; i < len(tc.batches); i++ {
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
if tc.batches[i].BatchNum < maxBatchNum {
|
if tc.batches[i].BatchNum < maxBatchNum {
|
||||||
@@ -171,7 +173,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
slotNum := tc.batches[len(tc.batches)/2].SlotNum
|
slotNum := tc.batches[len(tc.batches)/2].SlotNum
|
||||||
path = fmt.Sprintf("%s?slotNum=%d&limit=%d", endpoint, slotNum, limit)
|
path = fmt.Sprintf("%s?slotNum=%d&limit=%d", endpoint, slotNum, limit)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
slotNumBatches := []testBatch{}
|
slotNumBatches := []testBatch{}
|
||||||
for i := 0; i < len(tc.batches); i++ {
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
if tc.batches[i].SlotNum == slotNum {
|
if tc.batches[i].SlotNum == slotNum {
|
||||||
@@ -186,7 +188,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
forgerAddr := tc.batches[len(tc.batches)/2].ForgerAddr
|
forgerAddr := tc.batches[len(tc.batches)/2].ForgerAddr
|
||||||
path = fmt.Sprintf("%s?forgerAddr=%s&limit=%d", endpoint, forgerAddr.String(), limit)
|
path = fmt.Sprintf("%s?forgerAddr=%s&limit=%d", endpoint, forgerAddr.String(), limit)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
forgerAddrBatches := []testBatch{}
|
forgerAddrBatches := []testBatch{}
|
||||||
for i := 0; i < len(tc.batches); i++ {
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
if tc.batches[i].ForgerAddr == forgerAddr {
|
if tc.batches[i].ForgerAddr == forgerAddr {
|
||||||
@@ -200,7 +202,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
limit = 6
|
limit = 6
|
||||||
path = fmt.Sprintf("%s?limit=%d", endpoint, limit)
|
path = fmt.Sprintf("%s?limit=%d", endpoint, limit)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderDesc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderDesc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
flippedBatches := []testBatch{}
|
flippedBatches := []testBatch{}
|
||||||
for i := len(tc.batches) - 1; i >= 0; i-- {
|
for i := len(tc.batches) - 1; i >= 0; i-- {
|
||||||
flippedBatches = append(flippedBatches, tc.batches[i])
|
flippedBatches = append(flippedBatches, tc.batches[i])
|
||||||
@@ -214,7 +216,7 @@ func TestGetBatches(t *testing.T) {
|
|||||||
minBatchNum = tc.batches[len(tc.batches)/4].BatchNum
|
minBatchNum = tc.batches[len(tc.batches)/4].BatchNum
|
||||||
path = fmt.Sprintf("%s?minBatchNum=%d&maxBatchNum=%d&limit=%d", endpoint, minBatchNum, maxBatchNum, limit)
|
path = fmt.Sprintf("%s?minBatchNum=%d&maxBatchNum=%d&limit=%d", endpoint, minBatchNum, maxBatchNum, limit)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minMaxBatchNumBatches := []testBatch{}
|
minMaxBatchNumBatches := []testBatch{}
|
||||||
for i := 0; i < len(tc.batches); i++ {
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
if tc.batches[i].BatchNum < maxBatchNum && tc.batches[i].BatchNum > minBatchNum {
|
if tc.batches[i].BatchNum < maxBatchNum && tc.batches[i].BatchNum > minBatchNum {
|
||||||
@@ -227,25 +229,25 @@ func TestGetBatches(t *testing.T) {
|
|||||||
fetchedBatches = []testBatch{}
|
fetchedBatches = []testBatch{}
|
||||||
path = fmt.Sprintf("%s?slotNum=%d&minBatchNum=%d", endpoint, 1, 25)
|
path = fmt.Sprintf("%s?slotNum=%d&minBatchNum=%d", endpoint, 1, 25)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertBatches(t, []testBatch{}, fetchedBatches)
|
assertBatches(t, []testBatch{}, fetchedBatches)
|
||||||
|
|
||||||
// 400
|
// 400
|
||||||
// Invalid minBatchNum
|
// Invalid minBatchNum
|
||||||
path = fmt.Sprintf("%s?minBatchNum=%d", endpoint, -2)
|
path = fmt.Sprintf("%s?minBatchNum=%d", endpoint, -2)
|
||||||
err = doBadReq("GET", path, nil, 400)
|
err = doBadReq("GET", path, nil, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Invalid forgerAddr
|
// Invalid forgerAddr
|
||||||
path = fmt.Sprintf("%s?forgerAddr=%s", endpoint, "0xG0000001")
|
path = fmt.Sprintf("%s?forgerAddr=%s", endpoint, "0xG0000001")
|
||||||
err = doBadReq("GET", path, nil, 400)
|
err = doBadReq("GET", path, nil, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetBatch(t *testing.T) {
|
func TestGetBatch(t *testing.T) {
|
||||||
endpoint := apiURL + "batches/"
|
endpoint := apiURL + "batches/"
|
||||||
for _, batch := range tc.batches {
|
for _, batch := range tc.batches {
|
||||||
fetchedBatch := testBatch{}
|
fetchedBatch := testBatch{}
|
||||||
assert.NoError(
|
require.NoError(
|
||||||
t, doGoodReq(
|
t, doGoodReq(
|
||||||
"GET",
|
"GET",
|
||||||
endpoint+strconv.Itoa(int(batch.BatchNum)),
|
endpoint+strconv.Itoa(int(batch.BatchNum)),
|
||||||
@@ -255,16 +257,16 @@ func TestGetBatch(t *testing.T) {
|
|||||||
assertBatch(t, batch, fetchedBatch)
|
assertBatch(t, batch, fetchedBatch)
|
||||||
}
|
}
|
||||||
// 400
|
// 400
|
||||||
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
|
require.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
|
||||||
// 404
|
// 404
|
||||||
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
|
require.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetFullBatch(t *testing.T) {
|
func TestGetFullBatch(t *testing.T) {
|
||||||
endpoint := apiURL + "full-batches/"
|
endpoint := apiURL + "full-batches/"
|
||||||
for _, fullBatch := range tc.fullBatches {
|
for _, fullBatch := range tc.fullBatches {
|
||||||
fetchedFullBatch := testFullBatch{}
|
fetchedFullBatch := testFullBatch{}
|
||||||
assert.NoError(
|
require.NoError(
|
||||||
t, doGoodReq(
|
t, doGoodReq(
|
||||||
"GET",
|
"GET",
|
||||||
endpoint+strconv.Itoa(int(fullBatch.Batch.BatchNum)),
|
endpoint+strconv.Itoa(int(fullBatch.Batch.BatchNum)),
|
||||||
@@ -275,9 +277,9 @@ func TestGetFullBatch(t *testing.T) {
|
|||||||
assertTxs(t, fullBatch.Txs, fetchedFullBatch.Txs)
|
assertTxs(t, fullBatch.Txs, fetchedFullBatch.Txs)
|
||||||
}
|
}
|
||||||
// 400
|
// 400
|
||||||
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
|
require.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
|
||||||
// 404
|
// 404
|
||||||
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
|
require.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertBatches(t *testing.T, expected, actual []testBatch) {
|
func assertBatches(t *testing.T, expected, actual []testBatch) {
|
||||||
|
|||||||
@@ -99,7 +99,9 @@ func TestGetSlot(t *testing.T) {
|
|||||||
nil, &fetchedSlot,
|
nil, &fetchedSlot,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars)
|
// ni, err := api.h.GetNodeInfoAPI()
|
||||||
|
// assert.NoError(t, err)
|
||||||
|
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
|
||||||
assertSlot(t, emptySlot, fetchedSlot)
|
assertSlot(t, emptySlot, fetchedSlot)
|
||||||
|
|
||||||
// Invalid slotNum
|
// Invalid slotNum
|
||||||
@@ -127,8 +129,10 @@ func TestGetSlots(t *testing.T) {
|
|||||||
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
|
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
allSlots := tc.slots
|
allSlots := tc.slots
|
||||||
|
// ni, err := api.h.GetNodeInfoAPI()
|
||||||
|
// assert.NoError(t, err)
|
||||||
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
|
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
|
||||||
emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars)
|
emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars)
|
||||||
allSlots = append(allSlots, emptySlot)
|
allSlots = append(allSlots, emptySlot)
|
||||||
}
|
}
|
||||||
assertSlots(t, allSlots, fetchedSlots)
|
assertSlots(t, allSlots, fetchedSlots)
|
||||||
|
|||||||
312
api/state.go
312
api/state.go
@@ -1,320 +1,16 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/apitypes"
|
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
|
||||||
"github.com/hermeznetwork/tracerr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Network define status of the network
|
|
||||||
type Network struct {
|
|
||||||
LastEthBlock int64 `json:"lastEthereumBlock"`
|
|
||||||
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
|
||||||
LastBatch *historydb.BatchAPI `json:"lastBatch"`
|
|
||||||
CurrentSlot int64 `json:"currentSlot"`
|
|
||||||
NextForgers []NextForger `json:"nextForgers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeConfig is the configuration of the node that is exposed via API
|
|
||||||
type NodeConfig struct {
|
|
||||||
// ForgeDelay in seconds
|
|
||||||
ForgeDelay float64 `json:"forgeDelay"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextForger is a representation of the information of a coordinator and the period will forge
|
|
||||||
type NextForger struct {
|
|
||||||
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
|
|
||||||
Period Period `json:"period"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Period is a representation of a period
|
|
||||||
type Period struct {
|
|
||||||
SlotNum int64 `json:"slotNum"`
|
|
||||||
FromBlock int64 `json:"fromBlock"`
|
|
||||||
ToBlock int64 `json:"toBlock"`
|
|
||||||
FromTimestamp time.Time `json:"fromTimestamp"`
|
|
||||||
ToTimestamp time.Time `json:"toTimestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *API) getState(c *gin.Context) {
|
func (a *API) getState(c *gin.Context) {
|
||||||
// TODO: There are no events for the buckets information, so now this information will be 0
|
stateAPI, err := a.h.GetStateAPI()
|
||||||
a.status.RLock()
|
|
||||||
status := a.status //nolint
|
|
||||||
a.status.RUnlock()
|
|
||||||
c.JSON(http.StatusOK, status) //nolint
|
|
||||||
}
|
|
||||||
|
|
||||||
// SC Vars
|
|
||||||
|
|
||||||
// SetRollupVariables set Status.Rollup variables
|
|
||||||
func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
|
|
||||||
a.status.Lock()
|
|
||||||
var rollupVAPI historydb.RollupVariablesAPI
|
|
||||||
rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
|
|
||||||
rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
|
|
||||||
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
|
|
||||||
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
|
|
||||||
|
|
||||||
for i, bucket := range rollupVariables.Buckets {
|
|
||||||
var apiBucket historydb.BucketParamsAPI
|
|
||||||
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
|
|
||||||
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
|
|
||||||
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
|
|
||||||
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
|
|
||||||
rollupVAPI.Buckets[i] = apiBucket
|
|
||||||
}
|
|
||||||
|
|
||||||
rollupVAPI.SafeMode = rollupVariables.SafeMode
|
|
||||||
a.status.Rollup = rollupVAPI
|
|
||||||
a.status.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWDelayerVariables set Status.WithdrawalDelayer variables
|
|
||||||
func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) {
|
|
||||||
a.status.Lock()
|
|
||||||
a.status.WithdrawalDelayer = wDelayerVariables
|
|
||||||
a.status.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuctionVariables set Status.Auction variables
|
|
||||||
func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) {
|
|
||||||
a.status.Lock()
|
|
||||||
var auctionAPI historydb.AuctionVariablesAPI
|
|
||||||
|
|
||||||
auctionAPI.EthBlockNum = auctionVariables.EthBlockNum
|
|
||||||
auctionAPI.DonationAddress = auctionVariables.DonationAddress
|
|
||||||
auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
|
|
||||||
auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
|
|
||||||
auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
|
|
||||||
auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
|
|
||||||
auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
|
|
||||||
auctionAPI.Outbidding = auctionVariables.Outbidding
|
|
||||||
auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
|
|
||||||
|
|
||||||
for i, slot := range auctionVariables.DefaultSlotSetBid {
|
|
||||||
auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, ratio := range auctionVariables.AllocationRatio {
|
|
||||||
auctionAPI.AllocationRatio[i] = ratio
|
|
||||||
}
|
|
||||||
|
|
||||||
a.status.Auction = auctionAPI
|
|
||||||
a.status.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Network
|
|
||||||
|
|
||||||
// UpdateNetworkInfoBlock update Status.Network block related information
|
|
||||||
func (a *API) UpdateNetworkInfoBlock(
|
|
||||||
lastEthBlock, lastSyncBlock common.Block,
|
|
||||||
) {
|
|
||||||
a.status.Network.LastSyncBlock = lastSyncBlock.Num
|
|
||||||
a.status.Network.LastEthBlock = lastEthBlock.Num
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateNetworkInfo update Status.Network information
|
|
||||||
func (a *API) UpdateNetworkInfo(
|
|
||||||
lastEthBlock, lastSyncBlock common.Block,
|
|
||||||
lastBatchNum common.BatchNum, currentSlot int64,
|
|
||||||
) error {
|
|
||||||
lastBatch, err := a.h.GetBatchAPI(lastBatchNum)
|
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
|
||||||
lastBatch = nil
|
|
||||||
} else if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots)
|
|
||||||
nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot)
|
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
|
||||||
nextForgers = nil
|
|
||||||
} else if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
a.status.Lock()
|
|
||||||
a.status.Network.LastSyncBlock = lastSyncBlock.Num
|
|
||||||
a.status.Network.LastEthBlock = lastEthBlock.Num
|
|
||||||
a.status.Network.LastBatch = lastBatch
|
|
||||||
a.status.Network.CurrentSlot = currentSlot
|
|
||||||
a.status.Network.NextForgers = nextForgers
|
|
||||||
|
|
||||||
// Update buckets withdrawals
|
|
||||||
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
|
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
|
||||||
bucketsUpdate = nil
|
|
||||||
} else if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, bucketParams := range a.status.Rollup.Buckets {
|
|
||||||
for _, bucketUpdate := range bucketsUpdate {
|
|
||||||
if bucketUpdate.NumBucket == i {
|
|
||||||
bucketParams.Withdrawals = bucketUpdate.Withdrawals
|
|
||||||
a.status.Rollup.Buckets[i] = bucketParams
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
a.status.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
|
|
||||||
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
|
|
||||||
var slots [6]*big.Int
|
|
||||||
|
|
||||||
for i, slot := range defaultSlotSetBid {
|
|
||||||
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
|
|
||||||
if !ok {
|
|
||||||
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
|
|
||||||
}
|
|
||||||
slots[i] = bigInt
|
|
||||||
}
|
|
||||||
|
|
||||||
return slots, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNextForgers returns next forgers
|
|
||||||
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
|
|
||||||
secondsPerBlock := int64(15) //nolint:gomnd
|
|
||||||
// currentSlot and lastClosedSlot included
|
|
||||||
limit := uint(lastClosedSlot - currentSlot + 1)
|
|
||||||
bids, _, err := a.h.GetBestBidsAPI(¤tSlot, &lastClosedSlot, nil, &limit, "ASC")
|
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
nextForgers := []NextForger{}
|
|
||||||
// Get min bid info
|
|
||||||
var minBidInfo []historydb.MinBidInfo
|
|
||||||
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
|
|
||||||
// All min bids can be calculated with the last update of AuctionVariables
|
|
||||||
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
c.JSON(http.StatusOK, stateAPI)
|
||||||
minBidInfo = []historydb.MinBidInfo{{
|
|
||||||
DefaultSlotSetBid: bigIntSlots,
|
|
||||||
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
|
|
||||||
}}
|
|
||||||
} else {
|
|
||||||
// Get all the relevant updates from the DB
|
|
||||||
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create nextForger for each slot
|
|
||||||
for i := currentSlot; i <= lastClosedSlot; i++ {
|
|
||||||
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
|
|
||||||
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
|
|
||||||
nextForger := NextForger{
|
|
||||||
Period: Period{
|
|
||||||
SlotNum: i,
|
|
||||||
FromBlock: fromBlock,
|
|
||||||
ToBlock: toBlock,
|
|
||||||
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
|
|
||||||
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
foundForger := false
|
|
||||||
// If there is a bid for a slot, get forger (coordinator)
|
|
||||||
for j := range bids {
|
|
||||||
slotNum := bids[j].SlotNum
|
|
||||||
if slotNum == i {
|
|
||||||
// There's a bid for the slot
|
|
||||||
// Check if the bid is greater than the minimum required
|
|
||||||
for i := 0; i < len(minBidInfo); i++ {
|
|
||||||
// Find the most recent update
|
|
||||||
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
|
|
||||||
// Get min bid
|
|
||||||
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
|
|
||||||
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
|
|
||||||
// Check if the bid has beaten the minimum
|
|
||||||
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
|
|
||||||
if !ok {
|
|
||||||
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
|
|
||||||
}
|
|
||||||
if minBid.Cmp(bid) == 1 {
|
|
||||||
// Min bid is greater than bid, the slot will be forged by boot coordinator
|
|
||||||
break
|
|
||||||
}
|
|
||||||
foundForger = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !foundForger { // There is no bid or it's smaller than the minimum
|
|
||||||
break
|
|
||||||
}
|
|
||||||
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
nextForger.Coordinator = *coordinator
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If there is no bid, the coordinator that will forge is boot coordinator
|
|
||||||
if !foundForger {
|
|
||||||
nextForger.Coordinator = historydb.CoordinatorAPI{
|
|
||||||
Forger: a.status.Auction.BootCoordinator,
|
|
||||||
URL: a.status.Auction.BootCoordinatorURL,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nextForgers = append(nextForgers, nextForger)
|
|
||||||
}
|
|
||||||
return nextForgers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics
|
|
||||||
|
|
||||||
// UpdateMetrics update Status.Metrics information
|
|
||||||
func (a *API) UpdateMetrics() error {
|
|
||||||
a.status.RLock()
|
|
||||||
if a.status.Network.LastBatch == nil {
|
|
||||||
a.status.RUnlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
batchNum := a.status.Network.LastBatch.BatchNum
|
|
||||||
a.status.RUnlock()
|
|
||||||
metrics, err := a.h.GetMetricsAPI(batchNum)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
a.status.Lock()
|
|
||||||
a.status.Metrics = *metrics
|
|
||||||
a.status.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recommended fee
|
|
||||||
|
|
||||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
|
||||||
func (a *API) UpdateRecommendedFee() error {
|
|
||||||
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
var minFeeUSD float64
|
|
||||||
if a.l2 != nil {
|
|
||||||
minFeeUSD = a.l2.MinFeeUSD()
|
|
||||||
}
|
|
||||||
a.status.Lock()
|
|
||||||
a.status.RecommendedFee.ExistingAccount =
|
|
||||||
math.Max(feeExistingAccount, minFeeUSD)
|
|
||||||
a.status.RecommendedFee.CreatesAccount =
|
|
||||||
math.Max(createAccountExtraFeePercentage*feeExistingAccount, minFeeUSD)
|
|
||||||
a.status.RecommendedFee.CreatesAccountAndRegister =
|
|
||||||
math.Max(createAccountInternalExtraFeePercentage*feeExistingAccount, minFeeUSD)
|
|
||||||
a.status.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
type testStatus struct {
|
type testStatus struct {
|
||||||
Network testNetwork `json:"network"`
|
Network testNetwork `json:"network"`
|
||||||
Metrics historydb.Metrics `json:"metrics"`
|
Metrics historydb.MetricsAPI `json:"metrics"`
|
||||||
Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
||||||
Auction historydb.AuctionVariablesAPI `json:"auction"`
|
Auction historydb.AuctionVariablesAPI `json:"auction"`
|
||||||
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
||||||
@@ -25,14 +25,15 @@ type testNetwork struct {
|
|||||||
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
||||||
LastBatch testBatch `json:"lastBatch"`
|
LastBatch testBatch `json:"lastBatch"`
|
||||||
CurrentSlot int64 `json:"currentSlot"`
|
CurrentSlot int64 `json:"currentSlot"`
|
||||||
NextForgers []NextForger `json:"nextForgers"`
|
NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetRollupVariables(t *testing.T) {
|
func TestSetRollupVariables(t *testing.T) {
|
||||||
rollupVars := &common.RollupVariables{}
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars})
|
||||||
assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
api.SetRollupVariables(tc.rollupVars)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true)
|
require.NoError(t, err)
|
||||||
|
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
|
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
|
||||||
@@ -51,17 +52,19 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSetWDelayerVariables(t *testing.T) {
|
func TestSetWDelayerVariables(t *testing.T) {
|
||||||
wdelayerVars := &common.WDelayerVariables{}
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars})
|
||||||
assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
api.SetWDelayerVariables(tc.wdelayerVars)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer)
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetAuctionVariables(t *testing.T) {
|
func TestSetAuctionVariables(t *testing.T) {
|
||||||
auctionVars := &common.AuctionVariables{}
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars})
|
||||||
assertEqualAuctionVariables(t, *auctionVars, api.status.Auction)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
api.SetAuctionVariables(tc.auctionVars)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction)
|
require.NoError(t, err)
|
||||||
|
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
|
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
|
||||||
@@ -85,11 +88,6 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateNetworkInfo(t *testing.T) {
|
func TestUpdateNetworkInfo(t *testing.T) {
|
||||||
status := &Network{}
|
|
||||||
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
|
|
||||||
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
|
|
||||||
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
|
|
||||||
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
|
|
||||||
lastBlock := tc.blocks[3]
|
lastBlock := tc.blocks[3]
|
||||||
lastBatchNum := common.BatchNum(3)
|
lastBatchNum := common.BatchNum(3)
|
||||||
currentSlotNum := int64(1)
|
currentSlotNum := int64(1)
|
||||||
@@ -118,14 +116,17 @@ func TestUpdateNetworkInfo(t *testing.T) {
|
|||||||
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
|
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers))
|
assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock)
|
||||||
assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
|
assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum)
|
||||||
assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
|
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
|
||||||
|
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
|
||||||
|
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
|
||||||
|
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateMetrics(t *testing.T) {
|
func TestUpdateMetrics(t *testing.T) {
|
||||||
@@ -133,51 +134,62 @@ func TestUpdateMetrics(t *testing.T) {
|
|||||||
lastBlock := tc.blocks[3]
|
lastBlock := tc.blocks[3]
|
||||||
lastBatchNum := common.BatchNum(12)
|
lastBatchNum := common.BatchNum(12)
|
||||||
currentSlotNum := int64(1)
|
currentSlotNum := int64(1)
|
||||||
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = api.UpdateMetrics()
|
err = stateAPIUpdater.UpdateMetrics()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0))
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0))
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0))
|
require.NoError(t, err)
|
||||||
assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0))
|
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
|
||||||
assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0))
|
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
|
||||||
assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0))
|
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
|
||||||
|
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
|
||||||
|
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
|
||||||
|
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateRecommendedFee(t *testing.T) {
|
func TestUpdateRecommendedFee(t *testing.T) {
|
||||||
err := api.UpdateRecommendedFee()
|
err := stateAPIUpdater.UpdateRecommendedFee()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
var minFeeUSD float64
|
var minFeeUSD float64
|
||||||
if api.l2 != nil {
|
if api.l2 != nil {
|
||||||
minFeeUSD = api.l2.MinFeeUSD()
|
minFeeUSD = api.l2.MinFeeUSD()
|
||||||
}
|
}
|
||||||
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, minFeeUSD)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Equal(t, api.status.RecommendedFee.CreatesAccount,
|
require.NoError(t, err)
|
||||||
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
|
assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
|
||||||
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister,
|
assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
|
||||||
api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
|
ni.StateAPI.RecommendedFee.ExistingAccount*
|
||||||
|
historydb.CreateAccountExtraFeePercentage)
|
||||||
|
assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountInternal,
|
||||||
|
ni.StateAPI.RecommendedFee.ExistingAccount*
|
||||||
|
historydb.CreateAccountInternalExtraFeePercentage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetState(t *testing.T) {
|
func TestGetState(t *testing.T) {
|
||||||
lastBlock := tc.blocks[3]
|
lastBlock := tc.blocks[3]
|
||||||
lastBatchNum := common.BatchNum(12)
|
lastBatchNum := common.BatchNum(12)
|
||||||
currentSlotNum := int64(1)
|
currentSlotNum := int64(1)
|
||||||
api.SetRollupVariables(tc.rollupVars)
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{
|
||||||
api.SetWDelayerVariables(tc.wdelayerVars)
|
Rollup: &tc.rollupVars,
|
||||||
api.SetAuctionVariables(tc.auctionVars)
|
Auction: &tc.auctionVars,
|
||||||
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
WDelayer: &tc.wdelayerVars,
|
||||||
assert.NoError(t, err)
|
})
|
||||||
err = api.UpdateMetrics()
|
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = api.UpdateRecommendedFee()
|
err = stateAPIUpdater.UpdateMetrics()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
err = stateAPIUpdater.UpdateRecommendedFee()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
|
|
||||||
endpoint := apiURL + "state"
|
endpoint := apiURL + "state"
|
||||||
var status testStatus
|
var status testStatus
|
||||||
|
|
||||||
assert.NoError(t, doGoodReq("GET", endpoint, nil, &status))
|
require.NoError(t, doGoodReq("GET", endpoint, nil, &status))
|
||||||
|
|
||||||
// SC vars
|
// SC vars
|
||||||
// UpdateNetworkInfo will overwrite buckets withdrawal values
|
// UpdateNetworkInfo will overwrite buckets withdrawal values
|
||||||
@@ -205,12 +217,14 @@ func TestGetState(t *testing.T) {
|
|||||||
// TODO: perform real asserts (not just greater than 0)
|
// TODO: perform real asserts (not just greater than 0)
|
||||||
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
|
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
|
||||||
assert.Equal(t, status.RecommendedFee.CreatesAccount,
|
assert.Equal(t, status.RecommendedFee.CreatesAccount,
|
||||||
status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
|
status.RecommendedFee.ExistingAccount*
|
||||||
assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
|
historydb.CreateAccountExtraFeePercentage)
|
||||||
status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
|
assert.Equal(t, status.RecommendedFee.CreatesAccountInternal,
|
||||||
|
status.RecommendedFee.ExistingAccount*
|
||||||
|
historydb.CreateAccountInternalExtraFeePercentage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNextForgers(t *testing.T, expected, actual []NextForger) {
|
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
|
||||||
assert.Equal(t, len(expected), len(actual))
|
assert.Equal(t, len(expected), len(actual))
|
||||||
for i := range expected {
|
for i := range expected {
|
||||||
// ignore timestamps and other metadata
|
// ignore timestamps and other metadata
|
||||||
|
|||||||
@@ -179,7 +179,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
|
|||||||
// Get public key
|
// Get public key
|
||||||
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
|
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(fmt.Errorf("Error getting from account: %w", err))
|
||||||
}
|
}
|
||||||
// Validate TokenID
|
// Validate TokenID
|
||||||
if poolTx.TokenID != account.TokenID {
|
if poolTx.TokenID != account.TokenID {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"database/sql/driver"
|
"database/sql/driver"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
@@ -19,7 +18,10 @@ import (
|
|||||||
|
|
||||||
// BigIntStr is used to scan/value *big.Int directly into strings from/to sql DBs.
|
// BigIntStr is used to scan/value *big.Int directly into strings from/to sql DBs.
|
||||||
// It assumes that *big.Int are inserted/fetched to/from the DB using the BigIntMeddler meddler
|
// It assumes that *big.Int are inserted/fetched to/from the DB using the BigIntMeddler meddler
|
||||||
// defined at github.com/hermeznetwork/hermez-node/db
|
// defined at github.com/hermeznetwork/hermez-node/db. Since *big.Int is
|
||||||
|
// stored as DECIMAL in SQL, there's no need to implement Scan()/Value()
|
||||||
|
// because DECIMALS are encoded/decoded as strings by the sql driver, and
|
||||||
|
// BigIntStr is already a string.
|
||||||
type BigIntStr string
|
type BigIntStr string
|
||||||
|
|
||||||
// NewBigIntStr creates a *BigIntStr from a *big.Int.
|
// NewBigIntStr creates a *BigIntStr from a *big.Int.
|
||||||
@@ -32,34 +34,6 @@ func NewBigIntStr(bigInt *big.Int) *BigIntStr {
|
|||||||
return &bigIntStr
|
return &bigIntStr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan implements Scanner for database/sql
|
|
||||||
func (b *BigIntStr) Scan(src interface{}) error {
|
|
||||||
srcBytes, ok := src.([]byte)
|
|
||||||
if !ok {
|
|
||||||
return tracerr.Wrap(fmt.Errorf("can't scan %T into apitypes.BigIntStr", src))
|
|
||||||
}
|
|
||||||
// bytes to *big.Int
|
|
||||||
bigInt := new(big.Int).SetBytes(srcBytes)
|
|
||||||
// *big.Int to BigIntStr
|
|
||||||
bigIntStr := NewBigIntStr(bigInt)
|
|
||||||
if bigIntStr == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
*b = *bigIntStr
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value implements valuer for database/sql
|
|
||||||
func (b BigIntStr) Value() (driver.Value, error) {
|
|
||||||
// string to *big.Int
|
|
||||||
bigInt, ok := new(big.Int).SetString(string(b), 10)
|
|
||||||
if !ok || bigInt == nil {
|
|
||||||
return nil, tracerr.Wrap(errors.New("invalid representation of a *big.Int"))
|
|
||||||
}
|
|
||||||
// *big.Int to bytes
|
|
||||||
return bigInt.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StrBigInt is used to unmarshal BigIntStr directly into an alias of big.Int
|
// StrBigInt is used to unmarshal BigIntStr directly into an alias of big.Int
|
||||||
type StrBigInt big.Int
|
type StrBigInt big.Int
|
||||||
|
|
||||||
@@ -73,22 +47,16 @@ func (s *StrBigInt) UnmarshalText(text []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CollectedFees is used to retrieve common.batch.CollectedFee from the DB
|
// CollectedFeesAPI is send common.batch.CollectedFee through the API
|
||||||
type CollectedFees map[common.TokenID]BigIntStr
|
type CollectedFeesAPI map[common.TokenID]BigIntStr
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals a json representation of map[common.TokenID]*big.Int
|
// NewCollectedFeesAPI creates a new CollectedFeesAPI from a *big.Int map
|
||||||
func (c *CollectedFees) UnmarshalJSON(text []byte) error {
|
func NewCollectedFeesAPI(m map[common.TokenID]*big.Int) CollectedFeesAPI {
|
||||||
bigIntMap := make(map[common.TokenID]*big.Int)
|
c := CollectedFeesAPI(make(map[common.TokenID]BigIntStr))
|
||||||
if err := json.Unmarshal(text, &bigIntMap); err != nil {
|
for k, v := range m {
|
||||||
return tracerr.Wrap(err)
|
c[k] = *NewBigIntStr(v)
|
||||||
}
|
}
|
||||||
*c = CollectedFees(make(map[common.TokenID]BigIntStr))
|
return c
|
||||||
for k, v := range bigIntMap {
|
|
||||||
bStr := NewBigIntStr(v)
|
|
||||||
(CollectedFees(*c)[k]) = *bStr
|
|
||||||
}
|
|
||||||
// *c = CollectedFees(bStrMap)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez format (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez format (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ The `hermez-node` has been tested with go version 1.14
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
```
|
```shell
|
||||||
NAME:
|
NAME:
|
||||||
hermez-node - A new cli application
|
hermez-node - A new cli application
|
||||||
|
|
||||||
@@ -16,18 +16,18 @@ USAGE:
|
|||||||
node [global options] command [command options] [arguments...]
|
node [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
0.1.0-alpha
|
v0.1.0-6-gd8a50c5
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
|
version Show the application version
|
||||||
importkey Import ethereum private key
|
importkey Import ethereum private key
|
||||||
genbjj Generate a new BabyJubJub key
|
genbjj Generate a new BabyJubJub key
|
||||||
wipesql Wipe the SQL DB (HistoryDB and L2DB), leaving the DB in a clean state
|
wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state
|
||||||
run Run the hermez-node in the indicated mode
|
run Run the hermez-node in the indicated mode
|
||||||
|
discard Discard blocks up to a specified block number
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:
|
||||||
--mode MODE Set node MODE (can be "sync" or "coord")
|
|
||||||
--cfg FILE Node configuration FILE
|
|
||||||
--help, -h show help (default: false)
|
--help, -h show help (default: false)
|
||||||
--version, -v print the version (default: false)
|
--version, -v print the version (default: false)
|
||||||
```
|
```
|
||||||
@@ -75,7 +75,7 @@ when running the coordinator in sync mode
|
|||||||
|
|
||||||
Building the node requires using the packr utility to bundle the database
|
Building the node requires using the packr utility to bundle the database
|
||||||
migrations inside the resulting binary. Install the packr utility with:
|
migrations inside the resulting binary. Install the packr utility with:
|
||||||
```
|
```shell
|
||||||
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
|
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
|
|||||||
not be found.
|
not be found.
|
||||||
|
|
||||||
Now build the node executable:
|
Now build the node executable:
|
||||||
```
|
```shell
|
||||||
cd ../../db && packr2 && cd -
|
cd ../../db && packr2 && cd -
|
||||||
go build .
|
go build .
|
||||||
cd ../../db && packr2 clean && cd -
|
cd ../../db && packr2 clean && cd -
|
||||||
@@ -98,35 +98,40 @@ run the following examples by replacing `./node` with `go run .` and executing
|
|||||||
them in the `cli/node` directory to build from source and run at the same time.
|
them in the `cli/node` directory to build from source and run at the same time.
|
||||||
|
|
||||||
Run the node in mode synchronizer:
|
Run the node in mode synchronizer:
|
||||||
```
|
```shell
|
||||||
./node --mode sync --cfg cfg.buidler.toml run
|
./node run --mode sync --cfg cfg.buidler.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
Run the node in mode coordinator:
|
Run the node in mode coordinator:
|
||||||
```
|
```shell
|
||||||
./node --mode coord --cfg cfg.buidler.toml run
|
./node run --mode coord --cfg cfg.buidler.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
Import an ethereum private key into the keystore:
|
Import an ethereum private key into the keystore:
|
||||||
```
|
```shell
|
||||||
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
||||||
```
|
```
|
||||||
|
|
||||||
Generate a new BabyJubJub key pair:
|
Generate a new BabyJubJub key pair:
|
||||||
|
```shell
|
||||||
|
./node genbjj
|
||||||
```
|
```
|
||||||
./node --mode coord --cfg cfg.buidler.toml genbjj
|
|
||||||
|
Check the binary version:
|
||||||
|
```shell
|
||||||
|
./node version
|
||||||
```
|
```
|
||||||
|
|
||||||
Wipe the entier SQL database (this will destroy all synchronized and pool
|
Wipe the entier SQL database (this will destroy all synchronized and pool
|
||||||
data):
|
data):
|
||||||
```
|
```shell
|
||||||
./node --mode coord --cfg cfg.buidler.toml wipesql
|
./node wipesql --mode coord --cfg cfg.buidler.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
Discard all synchronized blocks and associated state up to a given block
|
Discard all synchronized blocks and associated state up to a given block
|
||||||
number. This command is useful in case the synchronizer reaches an invalid
|
number. This command is useful in case the synchronizer reaches an invalid
|
||||||
state and you want to roll back a few blocks and try again (maybe with some
|
state and you want to roll back a few blocks and try again (maybe with some
|
||||||
fixes in the code).
|
fixes in the code).
|
||||||
```
|
```shell
|
||||||
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
|
./node discard --mode coord --cfg cfg.buidler.toml --block 8061330
|
||||||
```
|
```
|
||||||
|
|||||||
24
cli/node/cfg.api.toml
Normal file
24
cli/node/cfg.api.toml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
[API]
|
||||||
|
Address = "localhost:8386"
|
||||||
|
Explorer = true
|
||||||
|
MaxSQLConnections = 10
|
||||||
|
SQLConnectionTimeout = "2s"
|
||||||
|
|
||||||
|
[PostgreSQL]
|
||||||
|
PortWrite = 5432
|
||||||
|
HostWrite = "localhost"
|
||||||
|
UserWrite = "hermez"
|
||||||
|
PasswordWrite = "yourpasswordhere"
|
||||||
|
NameWrite = "hermez"
|
||||||
|
|
||||||
|
[Coordinator.L2DB]
|
||||||
|
SafetyPeriod = 10
|
||||||
|
MaxTxs = 512
|
||||||
|
TTL = "24h"
|
||||||
|
PurgeBatchDelay = 10
|
||||||
|
InvalidateBatchDelay = 20
|
||||||
|
PurgeBlockDelay = 10
|
||||||
|
InvalidateBlockDelay = 20
|
||||||
|
|
||||||
|
[Coordinator.API]
|
||||||
|
Coordinator = true
|
||||||
@@ -8,10 +8,31 @@ SQLConnectionTimeout = "2s"
|
|||||||
|
|
||||||
[PriceUpdater]
|
[PriceUpdater]
|
||||||
Interval = "10s"
|
Interval = "10s"
|
||||||
URL = "https://api-pub.bitfinex.com/v2/"
|
URLBitfinexV2 = "https://api-pub.bitfinex.com/v2/"
|
||||||
Type = "bitfinexV2"
|
URLCoinGeckoV3 = "https://api.coingecko.com/api/v3/"
|
||||||
# URL = "https://api.coingecko.com/api/v3/"
|
# Available update methods:
|
||||||
# Type = "coingeckoV3"
|
# - coingeckoV3 (recommended): get price by SC addr using coingecko API
|
||||||
|
# - bitfinexV2: get price by token symbol using bitfinex API
|
||||||
|
# - static (recommended for blacklisting tokens): use the given StaticValue to set the price (if not provided 0 will be used)
|
||||||
|
# - ignore: don't update the price leave it as it is on the DB
|
||||||
|
DefaultUpdateMethod = "coingeckoV3" # Update method used for all the tokens registered on the network, and not listed in [[PriceUpdater.TokensConfig]]
|
||||||
|
[[PriceUpdater.TokensConfig]]
|
||||||
|
UpdateMethod = "bitfinexV2"
|
||||||
|
Symbol = "USDT"
|
||||||
|
Addr = "0xdac17f958d2ee523a2206206994597c13d831ec7"
|
||||||
|
[[PriceUpdater.TokensConfig]]
|
||||||
|
UpdateMethod = "coingeckoV3"
|
||||||
|
Symbol = "ETH"
|
||||||
|
Addr = "0x0000000000000000000000000000000000000000"
|
||||||
|
[[PriceUpdater.TokensConfig]]
|
||||||
|
UpdateMethod = "static"
|
||||||
|
Symbol = "UNI"
|
||||||
|
Addr = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"
|
||||||
|
StaticValue = 30.12
|
||||||
|
[[PriceUpdater.TokensConfig]]
|
||||||
|
UpdateMethod = "ignore"
|
||||||
|
Symbol = "SUSHI"
|
||||||
|
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
|
||||||
|
|
||||||
[Debug]
|
[Debug]
|
||||||
APIAddress = "localhost:12345"
|
APIAddress = "localhost:12345"
|
||||||
@@ -65,6 +86,8 @@ SyncRetryInterval = "1s"
|
|||||||
ForgeDelay = "10s"
|
ForgeDelay = "10s"
|
||||||
ForgeNoTxsDelay = "0s"
|
ForgeNoTxsDelay = "0s"
|
||||||
PurgeByExtDelInterval = "1m"
|
PurgeByExtDelInterval = "1m"
|
||||||
|
MustForgeAtSlotDeadline = true
|
||||||
|
IgnoreSlotCommitment = false
|
||||||
|
|
||||||
[Coordinator.FeeAccount]
|
[Coordinator.FeeAccount]
|
||||||
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
|
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
|
||||||
|
|||||||
135
cli/node/main.go
135
cli/node/main.go
@@ -34,6 +34,22 @@ const (
|
|||||||
modeCoord = "coord"
|
modeCoord = "coord"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Version represents the program based on the git tag
|
||||||
|
Version = "v0.1.0"
|
||||||
|
// Build represents the program based on the git commit
|
||||||
|
Build = "dev"
|
||||||
|
// Date represents the date of application was built
|
||||||
|
Date = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
func cmdVersion(c *cli.Context) error {
|
||||||
|
fmt.Printf("Version = \"%v\"\n", Version)
|
||||||
|
fmt.Printf("Build = \"%v\"\n", Build)
|
||||||
|
fmt.Printf("Date = \"%v\"\n", Date)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func cmdGenBJJ(c *cli.Context) error {
|
func cmdGenBJJ(c *cli.Context) error {
|
||||||
sk := babyjub.NewRandPrivKey()
|
sk := babyjub.NewRandPrivKey()
|
||||||
skBuf := [32]byte(sk)
|
skBuf := [32]byte(sk)
|
||||||
@@ -196,17 +212,7 @@ func cmdWipeSQL(c *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdRun(c *cli.Context) error {
|
func waitSigInt() {
|
||||||
cfg, err := parseCli(c)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
|
||||||
}
|
|
||||||
node, err := node.NewNode(cfg.mode, cfg.node)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
|
|
||||||
}
|
|
||||||
node.Start()
|
|
||||||
|
|
||||||
stopCh := make(chan interface{})
|
stopCh := make(chan interface{})
|
||||||
|
|
||||||
// catch ^C to send the stop signal
|
// catch ^C to send the stop signal
|
||||||
@@ -227,11 +233,40 @@ func cmdRun(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
<-stopCh
|
<-stopCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdRun(c *cli.Context) error {
|
||||||
|
cfg, err := parseCli(c)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
||||||
|
}
|
||||||
|
node, err := node.NewNode(cfg.mode, cfg.node)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
|
||||||
|
}
|
||||||
|
node.Start()
|
||||||
|
waitSigInt()
|
||||||
node.Stop()
|
node.Stop()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cmdServeAPI(c *cli.Context) error {
|
||||||
|
cfg, err := parseCliAPIServer(c)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
||||||
|
}
|
||||||
|
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
|
||||||
|
}
|
||||||
|
srv.Start()
|
||||||
|
waitSigInt()
|
||||||
|
srv.Stop()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func cmdDiscard(c *cli.Context) error {
|
func cmdDiscard(c *cli.Context) error {
|
||||||
_cfg, err := parseCli(c)
|
_cfg, err := parseCli(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -319,20 +354,59 @@ func getConfig(c *cli.Context) (*Config, error) {
|
|||||||
var cfg Config
|
var cfg Config
|
||||||
mode := c.String(flagMode)
|
mode := c.String(flagMode)
|
||||||
nodeCfgPath := c.String(flagCfg)
|
nodeCfgPath := c.String(flagCfg)
|
||||||
if nodeCfgPath == "" {
|
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
switch mode {
|
switch mode {
|
||||||
case modeSync:
|
case modeSync:
|
||||||
cfg.mode = node.ModeSynchronizer
|
cfg.mode = node.ModeSynchronizer
|
||||||
cfg.node, err = config.LoadNode(nodeCfgPath)
|
cfg.node, err = config.LoadNode(nodeCfgPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
case modeCoord:
|
case modeCoord:
|
||||||
cfg.mode = node.ModeCoordinator
|
cfg.mode = node.ModeCoordinator
|
||||||
cfg.node, err = config.LoadCoordinator(nodeCfgPath)
|
cfg.node, err = config.LoadNode(nodeCfgPath, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigAPIServer is the configuration of the api server execution
|
||||||
|
type ConfigAPIServer struct {
|
||||||
|
mode node.Mode
|
||||||
|
server *config.APIServer
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
||||||
|
cfg, err := getConfigAPIServer(c)
|
||||||
|
if err != nil {
|
||||||
|
if err := cli.ShowAppHelp(c); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
||||||
|
var cfg ConfigAPIServer
|
||||||
|
mode := c.String(flagMode)
|
||||||
|
nodeCfgPath := c.String(flagCfg)
|
||||||
|
var err error
|
||||||
|
switch mode {
|
||||||
|
case modeSync:
|
||||||
|
cfg.mode = node.ModeSynchronizer
|
||||||
|
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
case modeCoord:
|
||||||
|
cfg.mode = node.ModeCoordinator
|
||||||
|
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -346,8 +420,8 @@ func getConfig(c *cli.Context) (*Config, error) {
|
|||||||
func main() {
|
func main() {
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = "hermez-node"
|
app.Name = "hermez-node"
|
||||||
app.Version = "0.1.0-alpha"
|
app.Version = Version
|
||||||
app.Flags = []cli.Flag{
|
flags := []cli.Flag{
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: flagMode,
|
Name: flagMode,
|
||||||
Usage: fmt.Sprintf("Set node `MODE` (can be \"%v\" or \"%v\")", modeSync, modeCoord),
|
Usage: fmt.Sprintf("Set node `MODE` (can be \"%v\" or \"%v\")", modeSync, modeCoord),
|
||||||
@@ -361,17 +435,23 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
app.Commands = []*cli.Command{
|
app.Commands = []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "version",
|
||||||
|
Aliases: []string{},
|
||||||
|
Usage: "Show the application version and build",
|
||||||
|
Action: cmdVersion,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "importkey",
|
Name: "importkey",
|
||||||
Aliases: []string{},
|
Aliases: []string{},
|
||||||
Usage: "Import ethereum private key",
|
Usage: "Import ethereum private key",
|
||||||
Action: cmdImportKey,
|
Action: cmdImportKey,
|
||||||
Flags: []cli.Flag{
|
Flags: append(flags,
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: flagSK,
|
Name: flagSK,
|
||||||
Usage: "ethereum `PRIVATE_KEY` in hex",
|
Usage: "ethereum `PRIVATE_KEY` in hex",
|
||||||
Required: true,
|
Required: true,
|
||||||
}},
|
}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "genbjj",
|
Name: "genbjj",
|
||||||
@@ -385,30 +465,37 @@ func main() {
|
|||||||
Usage: "Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, " +
|
Usage: "Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, " +
|
||||||
"leaving the DB in a clean state",
|
"leaving the DB in a clean state",
|
||||||
Action: cmdWipeSQL,
|
Action: cmdWipeSQL,
|
||||||
Flags: []cli.Flag{
|
Flags: append(flags,
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: flagYes,
|
Name: flagYes,
|
||||||
Usage: "automatic yes to the prompt",
|
Usage: "automatic yes to the prompt",
|
||||||
Required: false,
|
Required: false,
|
||||||
}},
|
}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "run",
|
Name: "run",
|
||||||
Aliases: []string{},
|
Aliases: []string{},
|
||||||
Usage: "Run the hermez-node in the indicated mode",
|
Usage: "Run the hermez-node in the indicated mode",
|
||||||
Action: cmdRun,
|
Action: cmdRun,
|
||||||
|
Flags: flags,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "serveapi",
|
||||||
|
Aliases: []string{},
|
||||||
|
Usage: "Serve the API only",
|
||||||
|
Action: cmdServeAPI,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "discard",
|
Name: "discard",
|
||||||
Aliases: []string{},
|
Aliases: []string{},
|
||||||
Usage: "Discard blocks up to a specified block number",
|
Usage: "Discard blocks up to a specified block number",
|
||||||
Action: cmdDiscard,
|
Action: cmdDiscard,
|
||||||
Flags: []cli.Flag{
|
Flags: append(flags,
|
||||||
&cli.Int64Flag{
|
&cli.Int64Flag{
|
||||||
Name: flagBlock,
|
Name: flagBlock,
|
||||||
Usage: "last block number to keep",
|
Usage: "last block number to keep",
|
||||||
Required: false,
|
Required: false,
|
||||||
}},
|
}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,15 +11,15 @@ import (
|
|||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez
|
const (
|
||||||
// account creation
|
// AccountCreationAuthMsg is the message that is signed to authorize a
|
||||||
const AccountCreationAuthMsg = "Account creation"
|
// Hermez account creation
|
||||||
|
AccountCreationAuthMsg = "Account creation"
|
||||||
// EIP712Version is the used version of the EIP-712
|
// EIP712Version is the used version of the EIP-712
|
||||||
const EIP712Version = "1"
|
EIP712Version = "1"
|
||||||
|
// EIP712Provider defines the Provider for the EIP-712
|
||||||
// EIP712Provider defines the Provider for the EIP-712
|
EIP712Provider = "Hermez Network"
|
||||||
const EIP712Provider = "Hermez Network"
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// EmptyEthSignature is an ethereum signature of all zeroes
|
// EmptyEthSignature is an ethereum signature of all zeroes
|
||||||
|
|||||||
33
common/eth.go
Normal file
33
common/eth.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
// SCVariables joins all the smart contract variables in a single struct
|
||||||
|
type SCVariables struct {
|
||||||
|
Rollup RollupVariables `validate:"required"`
|
||||||
|
Auction AuctionVariables `validate:"required"`
|
||||||
|
WDelayer WDelayerVariables `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
|
||||||
|
// original SCVariables
|
||||||
|
func (v *SCVariables) AsPtr() *SCVariablesPtr {
|
||||||
|
return &SCVariablesPtr{
|
||||||
|
Rollup: &v.Rollup,
|
||||||
|
Auction: &v.Auction,
|
||||||
|
WDelayer: &v.WDelayer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SCVariablesPtr joins all the smart contract variables as pointers in a single
|
||||||
|
// struct
|
||||||
|
type SCVariablesPtr struct {
|
||||||
|
Rollup *RollupVariables `validate:"required"`
|
||||||
|
Auction *AuctionVariables `validate:"required"`
|
||||||
|
WDelayer *WDelayerVariables `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SCConsts joins all the smart contract constants in a single struct
|
||||||
|
type SCConsts struct {
|
||||||
|
Rollup RollupConstants
|
||||||
|
Auction AuctionConstants
|
||||||
|
WDelayer WDelayerConstants
|
||||||
|
}
|
||||||
@@ -24,7 +24,7 @@ var FeeFactorLsh60 [256]*big.Int
|
|||||||
type RecommendedFee struct {
|
type RecommendedFee struct {
|
||||||
ExistingAccount float64 `json:"existingAccount"`
|
ExistingAccount float64 `json:"existingAccount"`
|
||||||
CreatesAccount float64 `json:"createAccount"`
|
CreatesAccount float64 `json:"createAccount"`
|
||||||
CreatesAccountAndRegister float64 `json:"createAccountInternal"`
|
CreatesAccountInternal float64 `json:"createAccountInternal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FeeSelector is used to select a percentage from the FeePlan.
|
// FeeSelector is used to select a percentage from the FeePlan.
|
||||||
|
|||||||
153
config/config.go
153
config/config.go
@@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/priceupdater"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
"gopkg.in/go-playground/validator.v9"
|
"gopkg.in/go-playground/validator.v9"
|
||||||
@@ -44,6 +45,13 @@ type ForgeBatchGasCost struct {
|
|||||||
L2Tx uint64 `validate:"required"`
|
L2Tx uint64 `validate:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CoordinatorAPI specifies the configuration parameters of the API in mode
|
||||||
|
// coordinator
|
||||||
|
type CoordinatorAPI struct {
|
||||||
|
// Coordinator enables the coordinator API endpoints
|
||||||
|
Coordinator bool
|
||||||
|
}
|
||||||
|
|
||||||
// Coordinator is the coordinator specific configuration.
|
// Coordinator is the coordinator specific configuration.
|
||||||
type Coordinator struct {
|
type Coordinator struct {
|
||||||
// ForgerAddress is the address under which this coordinator is forging
|
// ForgerAddress is the address under which this coordinator is forging
|
||||||
@@ -101,6 +109,20 @@ type Coordinator struct {
|
|||||||
// to 0s, the coordinator will continuously forge even if the batches
|
// to 0s, the coordinator will continuously forge even if the batches
|
||||||
// are empty.
|
// are empty.
|
||||||
ForgeNoTxsDelay Duration `validate:"-"`
|
ForgeNoTxsDelay Duration `validate:"-"`
|
||||||
|
// MustForgeAtSlotDeadline enables the coordinator to forge slots if
|
||||||
|
// the empty slots reach the slot deadline.
|
||||||
|
MustForgeAtSlotDeadline bool
|
||||||
|
// IgnoreSlotCommitment disables forcing the coordinator to forge a
|
||||||
|
// slot immediately when the slot is not committed. If set to false,
|
||||||
|
// the coordinator will immediately forge a batch at the beginning of a
|
||||||
|
// slot if it's the slot winner.
|
||||||
|
IgnoreSlotCommitment bool
|
||||||
|
// ForgeOncePerSlotIfTxs will make the coordinator forge at most one
|
||||||
|
// batch per slot, only if there are included txs in that batch, or
|
||||||
|
// pending l1UserTxs in the smart contract. Setting this parameter
|
||||||
|
// overrides `ForgeDelay`, `ForgeNoTxsDelay`, `MustForgeAtSlotDeadline`
|
||||||
|
// and `IgnoreSlotCommitment`.
|
||||||
|
ForgeOncePerSlotIfTxs bool
|
||||||
// SyncRetryInterval is the waiting interval between calls to the main
|
// SyncRetryInterval is the waiting interval between calls to the main
|
||||||
// handler of a synced block after an error
|
// handler of a synced block after an error
|
||||||
SyncRetryInterval Duration `validate:"required"`
|
SyncRetryInterval Duration `validate:"required"`
|
||||||
@@ -197,10 +219,7 @@ type Coordinator struct {
|
|||||||
// ForgeBatch transaction.
|
// ForgeBatch transaction.
|
||||||
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
|
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
API struct {
|
API CoordinatorAPI `validate:"required"`
|
||||||
// Coordinator enables the coordinator API endpoints
|
|
||||||
Coordinator bool
|
|
||||||
} `validate:"required"`
|
|
||||||
Debug struct {
|
Debug struct {
|
||||||
// BatchPath if set, specifies the path where batchInfo is stored
|
// BatchPath if set, specifies the path where batchInfo is stored
|
||||||
// in JSON in every step/update of the pipeline
|
// in JSON in every step/update of the pipeline
|
||||||
@@ -215,26 +234,10 @@ type Coordinator struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node is the hermez node configuration.
|
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
|
||||||
type Node struct {
|
// diferentiated SQL connections for read/write. If the read configuration is
|
||||||
PriceUpdater struct {
|
// not provided, the write one it's going to be used for both reads and writes
|
||||||
// Interval between price updater calls
|
type PostgreSQL struct {
|
||||||
Interval Duration `valudate:"required"`
|
|
||||||
// URL of the token prices provider
|
|
||||||
URL string `valudate:"required"`
|
|
||||||
// Type of the API of the token prices provider
|
|
||||||
Type string `valudate:"required"`
|
|
||||||
} `validate:"required"`
|
|
||||||
StateDB struct {
|
|
||||||
// Path where the synchronizer StateDB is stored
|
|
||||||
Path string `validate:"required"`
|
|
||||||
// Keep is the number of checkpoints to keep
|
|
||||||
Keep int `validate:"required"`
|
|
||||||
} `validate:"required"`
|
|
||||||
// It's possible to use diferentiated SQL connections for read/write.
|
|
||||||
// If the read configuration is not provided, the write one it's going to be used
|
|
||||||
// for both reads and writes
|
|
||||||
PostgreSQL struct {
|
|
||||||
// Port of the PostgreSQL write server
|
// Port of the PostgreSQL write server
|
||||||
PortWrite int `validate:"required"`
|
PortWrite int `validate:"required"`
|
||||||
// Host of the PostgreSQL write server
|
// Host of the PostgreSQL write server
|
||||||
@@ -255,7 +258,42 @@ type Node struct {
|
|||||||
PasswordRead string
|
PasswordRead string
|
||||||
// Name of the PostgreSQL read server database
|
// Name of the PostgreSQL read server database
|
||||||
NameRead string
|
NameRead string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeDebug specifies debug configuration parameters
|
||||||
|
type NodeDebug struct {
|
||||||
|
// APIAddress is the address where the debugAPI will listen if
|
||||||
|
// set
|
||||||
|
APIAddress string
|
||||||
|
// MeddlerLogs enables meddler debug mode, where unused columns and struct
|
||||||
|
// fields will be logged
|
||||||
|
MeddlerLogs bool
|
||||||
|
// GinDebugMode sets Gin-Gonic (the web framework) to run in
|
||||||
|
// debug mode
|
||||||
|
GinDebugMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node is the hermez node configuration.
|
||||||
|
type Node struct {
|
||||||
|
PriceUpdater struct {
|
||||||
|
// Interval between price updater calls
|
||||||
|
Interval Duration `validate:"required"`
|
||||||
|
// URLBitfinexV2 is the URL of bitfinex V2 API
|
||||||
|
URLBitfinexV2 string `validate:"required"`
|
||||||
|
// URLCoinGeckoV3 is the URL of coingecko V3 API
|
||||||
|
URLCoinGeckoV3 string `validate:"required"`
|
||||||
|
// DefaultUpdateMethod to get token prices
|
||||||
|
DefaultUpdateMethod priceupdater.UpdateMethodType `validate:"required"`
|
||||||
|
// TokensConfig to specify how each token get it's price updated
|
||||||
|
TokensConfig []priceupdater.TokenConfig
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
|
StateDB struct {
|
||||||
|
// Path where the synchronizer StateDB is stored
|
||||||
|
Path string `validate:"required"`
|
||||||
|
// Keep is the number of checkpoints to keep
|
||||||
|
Keep int `validate:"required"`
|
||||||
|
} `validate:"required"`
|
||||||
|
PostgreSQL PostgreSQL `validate:"required"`
|
||||||
Web3 struct {
|
Web3 struct {
|
||||||
// URL is the URL of the web3 ethereum-node RPC server
|
// URL is the URL of the web3 ethereum-node RPC server
|
||||||
URL string `validate:"required"`
|
URL string `validate:"required"`
|
||||||
@@ -286,6 +324,7 @@ type Node struct {
|
|||||||
// TokenHEZ address
|
// TokenHEZ address
|
||||||
TokenHEZName string `validate:"required"`
|
TokenHEZName string `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
|
// API specifies the configuration parameters of the API
|
||||||
API struct {
|
API struct {
|
||||||
// Address where the API will listen if set
|
// Address where the API will listen if set
|
||||||
Address string
|
Address string
|
||||||
@@ -303,20 +342,45 @@ type Node struct {
|
|||||||
// can wait to stablish a SQL connection
|
// can wait to stablish a SQL connection
|
||||||
SQLConnectionTimeout Duration
|
SQLConnectionTimeout Duration
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
Debug struct {
|
Debug NodeDebug `validate:"required"`
|
||||||
// APIAddress is the address where the debugAPI will listen if
|
|
||||||
// set
|
|
||||||
APIAddress string
|
|
||||||
// MeddlerLogs enables meddler debug mode, where unused columns and struct
|
|
||||||
// fields will be logged
|
|
||||||
MeddlerLogs bool
|
|
||||||
// GinDebugMode sets Gin-Gonic (the web framework) to run in
|
|
||||||
// debug mode
|
|
||||||
GinDebugMode bool
|
|
||||||
}
|
|
||||||
Coordinator Coordinator `validate:"-"`
|
Coordinator Coordinator `validate:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIServer is the api server configuration parameters
|
||||||
|
type APIServer struct {
|
||||||
|
// NodeAPI specifies the configuration parameters of the API
|
||||||
|
API struct {
|
||||||
|
// Address where the API will listen if set
|
||||||
|
Address string `validate:"required"`
|
||||||
|
// Explorer enables the Explorer API endpoints
|
||||||
|
Explorer bool
|
||||||
|
// Maximum concurrent connections allowed between API and SQL
|
||||||
|
MaxSQLConnections int `validate:"required"`
|
||||||
|
// SQLConnectionTimeout is the maximum amount of time that an API request
|
||||||
|
// can wait to stablish a SQL connection
|
||||||
|
SQLConnectionTimeout Duration
|
||||||
|
} `validate:"required"`
|
||||||
|
PostgreSQL PostgreSQL `validate:"required"`
|
||||||
|
Coordinator struct {
|
||||||
|
API struct {
|
||||||
|
// Coordinator enables the coordinator API endpoints
|
||||||
|
Coordinator bool
|
||||||
|
} `validate:"required"`
|
||||||
|
L2DB struct {
|
||||||
|
// MaxTxs is the maximum number of pending L2Txs that can be
|
||||||
|
// stored in the pool. Once this number of pending L2Txs is
|
||||||
|
// reached, inserts to the pool will be denied until some of
|
||||||
|
// the pending txs are forged.
|
||||||
|
MaxTxs uint32 `validate:"required"`
|
||||||
|
// MinFeeUSD is the minimum fee in USD that a tx must pay in
|
||||||
|
// order to be accepted into the pool. Txs with lower than
|
||||||
|
// minimum fee will be rejected at the API level.
|
||||||
|
MinFeeUSD float64
|
||||||
|
} `validate:"required"`
|
||||||
|
}
|
||||||
|
Debug NodeDebug `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
// Load loads a generic config.
|
// Load loads a generic config.
|
||||||
func Load(path string, cfg interface{}) error {
|
func Load(path string, cfg interface{}) error {
|
||||||
bs, err := ioutil.ReadFile(path) //nolint:gosec
|
bs, err := ioutil.ReadFile(path) //nolint:gosec
|
||||||
@@ -330,8 +394,8 @@ func Load(path string, cfg interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadCoordinator loads the Coordinator configuration from path.
|
// LoadNode loads the Node configuration from path.
|
||||||
func LoadCoordinator(path string) (*Node, error) {
|
func LoadNode(path string, coordinator bool) (*Node, error) {
|
||||||
var cfg Node
|
var cfg Node
|
||||||
if err := Load(path, &cfg); err != nil {
|
if err := Load(path, &cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
|
||||||
@@ -340,21 +404,28 @@ func LoadCoordinator(path string) (*Node, error) {
|
|||||||
if err := validate.Struct(cfg); err != nil {
|
if err := validate.Struct(cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
}
|
}
|
||||||
|
if coordinator {
|
||||||
if err := validate.Struct(cfg.Coordinator); err != nil {
|
if err := validate.Struct(cfg.Coordinator); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return &cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadNode loads the Node configuration from path.
|
// LoadAPIServer loads the APIServer configuration from path.
|
||||||
func LoadNode(path string) (*Node, error) {
|
func LoadAPIServer(path string, coordinator bool) (*APIServer, error) {
|
||||||
var cfg Node
|
var cfg APIServer
|
||||||
if err := Load(path, &cfg); err != nil {
|
if err := Load(path, &cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
|
||||||
}
|
}
|
||||||
validate := validator.New()
|
validate := validator.New()
|
||||||
if err := validate.Struct(cfg); err != nil {
|
if err := validate.Struct(cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
}
|
}
|
||||||
|
if coordinator {
|
||||||
|
if err := validate.Struct(cfg.Coordinator); err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
return &cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ type BatchInfo struct {
|
|||||||
PublicInputs []*big.Int
|
PublicInputs []*big.Int
|
||||||
L1Batch bool
|
L1Batch bool
|
||||||
VerifierIdx uint8
|
VerifierIdx uint8
|
||||||
L1UserTxsExtra []common.L1Tx
|
L1UserTxs []common.L1Tx
|
||||||
L1CoordTxs []common.L1Tx
|
L1CoordTxs []common.L1Tx
|
||||||
L1CoordinatorTxsAuths [][]byte
|
L1CoordinatorTxsAuths [][]byte
|
||||||
L2Txs []common.L2Tx
|
L2Txs []common.L2Tx
|
||||||
|
|||||||
@@ -25,9 +25,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
|
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
|
||||||
errForgeNoTxsBeforeDelay = fmt.Errorf(
|
errSkipBatchByPolicy = fmt.Errorf("skip batch by policy")
|
||||||
"no txs to forge and we haven't reached the forge no txs delay")
|
|
||||||
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -84,6 +82,20 @@ type Config struct {
|
|||||||
// to 0s, the coordinator will continuously forge even if the batches
|
// to 0s, the coordinator will continuously forge even if the batches
|
||||||
// are empty.
|
// are empty.
|
||||||
ForgeNoTxsDelay time.Duration
|
ForgeNoTxsDelay time.Duration
|
||||||
|
// MustForgeAtSlotDeadline enables the coordinator to forge slots if
|
||||||
|
// the empty slots reach the slot deadline.
|
||||||
|
MustForgeAtSlotDeadline bool
|
||||||
|
// IgnoreSlotCommitment disables forcing the coordinator to forge a
|
||||||
|
// slot immediately when the slot is not committed. If set to false,
|
||||||
|
// the coordinator will immediately forge a batch at the beginning of
|
||||||
|
// a slot if it's the slot winner.
|
||||||
|
IgnoreSlotCommitment bool
|
||||||
|
// ForgeOncePerSlotIfTxs will make the coordinator forge at most one
|
||||||
|
// batch per slot, only if there are included txs in that batch, or
|
||||||
|
// pending l1UserTxs in the smart contract. Setting this parameter
|
||||||
|
// overrides `ForgeDelay`, `ForgeNoTxsDelay`, `MustForgeAtSlotDeadline`
|
||||||
|
// and `IgnoreSlotCommitment`.
|
||||||
|
ForgeOncePerSlotIfTxs bool
|
||||||
// SyncRetryInterval is the waiting interval between calls to the main
|
// SyncRetryInterval is the waiting interval between calls to the main
|
||||||
// handler of a synced block after an error
|
// handler of a synced block after an error
|
||||||
SyncRetryInterval time.Duration
|
SyncRetryInterval time.Duration
|
||||||
@@ -145,8 +157,8 @@ type Coordinator struct {
|
|||||||
pipelineNum int // Pipeline sequential number. The first pipeline is 1
|
pipelineNum int // Pipeline sequential number. The first pipeline is 1
|
||||||
pipelineFromBatch fromBatch // batch from which we started the pipeline
|
pipelineFromBatch fromBatch // batch from which we started the pipeline
|
||||||
provers []prover.Client
|
provers []prover.Client
|
||||||
consts synchronizer.SCConsts
|
consts common.SCConsts
|
||||||
vars synchronizer.SCVariables
|
vars common.SCVariables
|
||||||
stats synchronizer.Stats
|
stats synchronizer.Stats
|
||||||
started bool
|
started bool
|
||||||
|
|
||||||
@@ -186,8 +198,8 @@ func NewCoordinator(cfg Config,
|
|||||||
batchBuilder *batchbuilder.BatchBuilder,
|
batchBuilder *batchbuilder.BatchBuilder,
|
||||||
serverProofs []prover.Client,
|
serverProofs []prover.Client,
|
||||||
ethClient eth.ClientInterface,
|
ethClient eth.ClientInterface,
|
||||||
scConsts *synchronizer.SCConsts,
|
scConsts *common.SCConsts,
|
||||||
initSCVars *synchronizer.SCVariables,
|
initSCVars *common.SCVariables,
|
||||||
) (*Coordinator, error) {
|
) (*Coordinator, error) {
|
||||||
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
|
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
|
||||||
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
|
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
|
||||||
@@ -276,13 +288,13 @@ type MsgSyncBlock struct {
|
|||||||
Batches []common.BatchData
|
Batches []common.BatchData
|
||||||
// Vars contains each Smart Contract variables if they are updated, or
|
// Vars contains each Smart Contract variables if they are updated, or
|
||||||
// nil if they haven't changed.
|
// nil if they haven't changed.
|
||||||
Vars synchronizer.SCVariablesPtr
|
Vars common.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsgSyncReorg indicates a reorg
|
// MsgSyncReorg indicates a reorg
|
||||||
type MsgSyncReorg struct {
|
type MsgSyncReorg struct {
|
||||||
Stats synchronizer.Stats
|
Stats synchronizer.Stats
|
||||||
Vars synchronizer.SCVariablesPtr
|
Vars common.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsgStopPipeline indicates a signal to reset the pipeline
|
// MsgStopPipeline indicates a signal to reset the pipeline
|
||||||
@@ -301,7 +313,7 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
|
func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
|
||||||
if update.Rollup != nil {
|
if update.Rollup != nil {
|
||||||
vars.Rollup = *update.Rollup
|
vars.Rollup = *update.Rollup
|
||||||
}
|
}
|
||||||
@@ -313,12 +325,13 @@ func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariable
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
|
||||||
updateSCVars(&c.vars, vars)
|
updateSCVars(&c.vars, vars)
|
||||||
}
|
}
|
||||||
|
|
||||||
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
||||||
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64,
|
||||||
|
mustForgeAtDeadline bool) bool {
|
||||||
if blockNum < auctionConstants.GenesisBlockNum {
|
if blockNum < auctionConstants.GenesisBlockNum {
|
||||||
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
|
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
|
||||||
"genesis", auctionConstants.GenesisBlockNum)
|
"genesis", auctionConstants.GenesisBlockNum)
|
||||||
@@ -343,7 +356,7 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
|
|||||||
"block", blockNum)
|
"block", blockNum)
|
||||||
anyoneForge = true
|
anyoneForge = true
|
||||||
}
|
}
|
||||||
if slot.Forger == addr || anyoneForge {
|
if slot.Forger == addr || (anyoneForge && mustForgeAtDeadline) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
|
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
|
||||||
@@ -353,14 +366,14 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
|
|||||||
func (c *Coordinator) canForgeAt(blockNum int64) bool {
|
func (c *Coordinator) canForgeAt(blockNum int64) bool {
|
||||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||||
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
||||||
c.cfg.ForgerAddress, blockNum)
|
c.cfg.ForgerAddress, blockNum, c.cfg.MustForgeAtSlotDeadline)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) canForge() bool {
|
func (c *Coordinator) canForge() bool {
|
||||||
blockNum := c.stats.Eth.LastBlock.Num + 1
|
blockNum := c.stats.Eth.LastBlock.Num + 1
|
||||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||||
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
||||||
c.cfg.ForgerAddress, blockNum)
|
c.cfg.ForgerAddress, blockNum, c.cfg.MustForgeAtSlotDeadline)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
||||||
|
|||||||
@@ -167,6 +167,7 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
|
|||||||
EthClientAttemptsDelay: 100 * time.Millisecond,
|
EthClientAttemptsDelay: 100 * time.Millisecond,
|
||||||
TxManagerCheckInterval: 300 * time.Millisecond,
|
TxManagerCheckInterval: 300 * time.Millisecond,
|
||||||
DebugBatchPath: debugBatchPath,
|
DebugBatchPath: debugBatchPath,
|
||||||
|
MustForgeAtSlotDeadline: true,
|
||||||
Purger: PurgerCfg{
|
Purger: PurgerCfg{
|
||||||
PurgeBatchDelay: 10,
|
PurgeBatchDelay: 10,
|
||||||
PurgeBlockDelay: 10,
|
PurgeBlockDelay: 10,
|
||||||
@@ -188,12 +189,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
|
|||||||
&prover.MockClient{Delay: 400 * time.Millisecond},
|
&prover.MockClient{Delay: 400 * time.Millisecond},
|
||||||
}
|
}
|
||||||
|
|
||||||
scConsts := &synchronizer.SCConsts{
|
scConsts := &common.SCConsts{
|
||||||
Rollup: *ethClientSetup.RollupConstants,
|
Rollup: *ethClientSetup.RollupConstants,
|
||||||
Auction: *ethClientSetup.AuctionConstants,
|
Auction: *ethClientSetup.AuctionConstants,
|
||||||
WDelayer: *ethClientSetup.WDelayerConstants,
|
WDelayer: *ethClientSetup.WDelayerConstants,
|
||||||
}
|
}
|
||||||
initSCVars := &synchronizer.SCVariables{
|
initSCVars := &common.SCVariables{
|
||||||
Rollup: *ethClientSetup.RollupVariables,
|
Rollup: *ethClientSetup.RollupVariables,
|
||||||
Auction: *ethClientSetup.AuctionVariables,
|
Auction: *ethClientSetup.AuctionVariables,
|
||||||
WDelayer: *ethClientSetup.WDelayerVariables,
|
WDelayer: *ethClientSetup.WDelayerVariables,
|
||||||
@@ -391,6 +392,10 @@ func TestCoordCanForge(t *testing.T) {
|
|||||||
assert.Equal(t, true, coord.canForge())
|
assert.Equal(t, true, coord.canForge())
|
||||||
assert.Equal(t, true, bootCoord.canForge())
|
assert.Equal(t, true, bootCoord.canForge())
|
||||||
|
|
||||||
|
// Anyone can forge but the node MustForgeAtSlotDeadline as set as false
|
||||||
|
coord.cfg.MustForgeAtSlotDeadline = false
|
||||||
|
assert.Equal(t, false, coord.canForge())
|
||||||
|
|
||||||
// Slot 3. coordinator bid, so the winner is the coordinator
|
// Slot 3. coordinator bid, so the winner is the coordinator
|
||||||
stats.Eth.LastBlock.Num = ethClientSetup.AuctionConstants.GenesisBlockNum +
|
stats.Eth.LastBlock.Num = ethClientSetup.AuctionConstants.GenesisBlockNum +
|
||||||
3*int64(ethClientSetup.AuctionConstants.BlocksPerSlot)
|
3*int64(ethClientSetup.AuctionConstants.BlocksPerSlot)
|
||||||
@@ -529,7 +534,7 @@ func TestCoordinatorStress(t *testing.T) {
|
|||||||
coord.SendMsg(ctx, MsgSyncBlock{
|
coord.SendMsg(ctx, MsgSyncBlock{
|
||||||
Stats: *stats,
|
Stats: *stats,
|
||||||
Batches: blockData.Rollup.Batches,
|
Batches: blockData.Rollup.Batches,
|
||||||
Vars: synchronizer.SCVariablesPtr{
|
Vars: common.SCVariablesPtr{
|
||||||
Rollup: blockData.Rollup.Vars,
|
Rollup: blockData.Rollup.Vars,
|
||||||
Auction: blockData.Auction.Vars,
|
Auction: blockData.Auction.Vars,
|
||||||
WDelayer: blockData.WDelayer.Vars,
|
WDelayer: blockData.WDelayer.Vars,
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
type statsVars struct {
|
type statsVars struct {
|
||||||
Stats synchronizer.Stats
|
Stats synchronizer.Stats
|
||||||
Vars synchronizer.SCVariablesPtr
|
Vars common.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
type state struct {
|
type state struct {
|
||||||
@@ -36,7 +36,7 @@ type state struct {
|
|||||||
type Pipeline struct {
|
type Pipeline struct {
|
||||||
num int
|
num int
|
||||||
cfg Config
|
cfg Config
|
||||||
consts synchronizer.SCConsts
|
consts common.SCConsts
|
||||||
|
|
||||||
// state
|
// state
|
||||||
state state
|
state state
|
||||||
@@ -57,7 +57,7 @@ type Pipeline struct {
|
|||||||
purger *Purger
|
purger *Purger
|
||||||
|
|
||||||
stats synchronizer.Stats
|
stats synchronizer.Stats
|
||||||
vars synchronizer.SCVariables
|
vars common.SCVariables
|
||||||
statsVarsCh chan statsVars
|
statsVarsCh chan statsVars
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
@@ -90,7 +90,7 @@ func NewPipeline(ctx context.Context,
|
|||||||
coord *Coordinator,
|
coord *Coordinator,
|
||||||
txManager *TxManager,
|
txManager *TxManager,
|
||||||
provers []prover.Client,
|
provers []prover.Client,
|
||||||
scConsts *synchronizer.SCConsts,
|
scConsts *common.SCConsts,
|
||||||
) (*Pipeline, error) {
|
) (*Pipeline, error) {
|
||||||
proversPool := NewProversPool(len(provers))
|
proversPool := NewProversPool(len(provers))
|
||||||
proversPoolSize := 0
|
proversPoolSize := 0
|
||||||
@@ -125,7 +125,7 @@ func NewPipeline(ctx context.Context,
|
|||||||
|
|
||||||
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
||||||
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
|
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
|
||||||
vars *synchronizer.SCVariablesPtr) {
|
vars *common.SCVariablesPtr) {
|
||||||
select {
|
select {
|
||||||
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -134,7 +134,7 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
|
|||||||
|
|
||||||
// reset pipeline state
|
// reset pipeline state
|
||||||
func (p *Pipeline) reset(batchNum common.BatchNum,
|
func (p *Pipeline) reset(batchNum common.BatchNum,
|
||||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
stats *synchronizer.Stats, vars *common.SCVariables) error {
|
||||||
p.state = state{
|
p.state = state{
|
||||||
batchNum: batchNum,
|
batchNum: batchNum,
|
||||||
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
||||||
@@ -195,7 +195,7 @@ func (p *Pipeline) reset(batchNum common.BatchNum,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
|
||||||
updateSCVars(&p.vars, vars)
|
updateSCVars(&p.vars, vars)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,8 +224,9 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
|
|||||||
|
|
||||||
// 2. Forge the batch internally (make a selection of txs and prepare
|
// 2. Forge the batch internally (make a selection of txs and prepare
|
||||||
// all the smart contract arguments)
|
// all the smart contract arguments)
|
||||||
|
var skipReason *string
|
||||||
p.mutexL2DBUpdateDelete.Lock()
|
p.mutexL2DBUpdateDelete.Lock()
|
||||||
batchInfo, err = p.forgeBatch(batchNum)
|
batchInfo, skipReason, err = p.forgeBatch(batchNum)
|
||||||
p.mutexL2DBUpdateDelete.Unlock()
|
p.mutexL2DBUpdateDelete.Unlock()
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
@@ -234,13 +235,13 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
|
|||||||
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
||||||
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
||||||
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
||||||
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
|
||||||
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
|
||||||
// no log
|
|
||||||
} else {
|
} else {
|
||||||
log.Errorw("forgeBatch", "err", err)
|
log.Errorw("forgeBatch", "err", err)
|
||||||
}
|
}
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
|
} else if skipReason != nil {
|
||||||
|
log.Debugw("skipping batch", "batch", batchNum, "reason", *skipReason)
|
||||||
|
return nil, tracerr.Wrap(errSkipBatchByPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Send the ZKInputs to the proof server
|
// 3. Send the ZKInputs to the proof server
|
||||||
@@ -256,7 +257,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
|
|||||||
|
|
||||||
// Start the forging pipeline
|
// Start the forging pipeline
|
||||||
func (p *Pipeline) Start(batchNum common.BatchNum,
|
func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
stats *synchronizer.Stats, vars *common.SCVariables) error {
|
||||||
if p.started {
|
if p.started {
|
||||||
log.Fatal("Pipeline already started")
|
log.Fatal("Pipeline already started")
|
||||||
}
|
}
|
||||||
@@ -295,8 +296,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
|
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
|
||||||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
tracerr.Unwrap(err) == errSkipBatchByPolicy {
|
||||||
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
p.setErrAtBatchNum(batchNum)
|
p.setErrAtBatchNum(batchNum)
|
||||||
@@ -389,17 +389,109 @@ func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// slotCommitted returns true if the current slot has already been committed
|
||||||
|
func (p *Pipeline) slotCommitted() bool {
|
||||||
|
// Synchronizer has synchronized a batch in the current slot (setting
|
||||||
|
// CurrentSlot.ForgerCommitment) or the pipeline has already
|
||||||
|
// internally-forged a batch in the current slot
|
||||||
|
return p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
|
||||||
|
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged
|
||||||
|
}
|
||||||
|
|
||||||
|
// forgePolicySkipPreSelection is called before doing a tx selection in a batch to
|
||||||
|
// determine by policy if we should forge the batch or not. Returns true and
|
||||||
|
// the reason when the forging of the batch must be skipped.
|
||||||
|
func (p *Pipeline) forgePolicySkipPreSelection(now time.Time) (bool, string) {
|
||||||
|
// Check if the slot is not yet fulfilled
|
||||||
|
slotCommitted := p.slotCommitted()
|
||||||
|
if p.cfg.ForgeOncePerSlotIfTxs {
|
||||||
|
if slotCommitted {
|
||||||
|
return true, "cfg.ForgeOncePerSlotIfTxs = true and slot already committed"
|
||||||
|
}
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
// Determine if we must commit the slot
|
||||||
|
if !p.cfg.IgnoreSlotCommitment && !slotCommitted {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we haven't reached the ForgeDelay, skip forging the batch
|
||||||
|
if now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
|
||||||
|
return true, "we haven't reached the forge delay"
|
||||||
|
}
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// forgePolicySkipPostSelection is called after doing a tx selection in a batch to
|
||||||
|
// determine by policy if we should forge the batch or not. Returns true and
|
||||||
|
// the reason when the forging of the batch must be skipped.
|
||||||
|
func (p *Pipeline) forgePolicySkipPostSelection(now time.Time, l1UserTxsExtra, l1CoordTxs []common.L1Tx,
|
||||||
|
poolL2Txs []common.PoolL2Tx, batchInfo *BatchInfo) (bool, string, error) {
|
||||||
|
// Check if the slot is not yet fulfilled
|
||||||
|
slotCommitted := p.slotCommitted()
|
||||||
|
|
||||||
|
pendingTxs := true
|
||||||
|
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
|
||||||
|
if batchInfo.L1Batch {
|
||||||
|
// Query the number of unforged L1UserTxs
|
||||||
|
// (either in a open queue or in a frozen
|
||||||
|
// not-yet-forged queue).
|
||||||
|
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
|
||||||
|
if err != nil {
|
||||||
|
return false, "", err
|
||||||
|
}
|
||||||
|
// If there are future L1UserTxs, we forge a
|
||||||
|
// batch to advance the queues to be able to
|
||||||
|
// forge the L1UserTxs in the future.
|
||||||
|
// Otherwise, skip.
|
||||||
|
if count == 0 {
|
||||||
|
pendingTxs = false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pendingTxs = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.cfg.ForgeOncePerSlotIfTxs {
|
||||||
|
if slotCommitted {
|
||||||
|
return true, "cfg.ForgeOncePerSlotIfTxs = true and slot already committed",
|
||||||
|
nil
|
||||||
|
}
|
||||||
|
if pendingTxs {
|
||||||
|
return false, "", nil
|
||||||
|
}
|
||||||
|
return true, "cfg.ForgeOncePerSlotIfTxs = true and no pending txs",
|
||||||
|
nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if we must commit the slot
|
||||||
|
if !p.cfg.IgnoreSlotCommitment && !slotCommitted {
|
||||||
|
return false, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if there is no txs to forge, no l1UserTxs in the open queue to
|
||||||
|
// freeze and we haven't reached the ForgeNoTxsDelay
|
||||||
|
if now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
|
||||||
|
if !pendingTxs {
|
||||||
|
return true, "no txs to forge and we haven't reached the forge no txs delay",
|
||||||
|
nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
// forgeBatch forges the batchNum batch.
|
// forgeBatch forges the batchNum batch.
|
||||||
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
|
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
|
||||||
|
skipReason *string, err error) {
|
||||||
// remove transactions from the pool that have been there for too long
|
// remove transactions from the pool that have been there for too long
|
||||||
_, err = p.purger.InvalidateMaybe(p.l2DB, p.txSelector.LocalAccountsDB(),
|
_, err = p.purger.InvalidateMaybe(p.l2DB, p.txSelector.LocalAccountsDB(),
|
||||||
p.stats.Sync.LastBlock.Num, int64(batchNum))
|
p.stats.Sync.LastBlock.Num, int64(batchNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
_, err = p.purger.PurgeMaybe(p.l2DB, p.stats.Sync.LastBlock.Num, int64(batchNum))
|
_, err = p.purger.PurgeMaybe(p.l2DB, p.stats.Sync.LastBlock.Num, int64(batchNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// Structure to accumulate data and metadata of the batch
|
// Structure to accumulate data and metadata of the batch
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
@@ -409,79 +501,48 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
|
|
||||||
var poolL2Txs []common.PoolL2Tx
|
var poolL2Txs []common.PoolL2Tx
|
||||||
var discardedL2Txs []common.PoolL2Tx
|
var discardedL2Txs []common.PoolL2Tx
|
||||||
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
|
var l1UserTxs, l1CoordTxs []common.L1Tx
|
||||||
var auths [][]byte
|
var auths [][]byte
|
||||||
var coordIdxs []common.Idx
|
var coordIdxs []common.Idx
|
||||||
|
|
||||||
// Check if the slot is not yet fulfilled
|
if skip, reason := p.forgePolicySkipPreSelection(now); skip {
|
||||||
slotCommitted := false
|
return nil, &reason, nil
|
||||||
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
|
|
||||||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
|
|
||||||
slotCommitted = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we haven't reached the ForgeDelay, skip forging the batch
|
|
||||||
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
|
|
||||||
return nil, tracerr.Wrap(errForgeBeforeDelay)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||||
if p.shouldL1L2Batch(batchInfo) {
|
if p.shouldL1L2Batch(batchInfo) {
|
||||||
batchInfo.L1Batch = true
|
batchInfo.L1Batch = true
|
||||||
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||||
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
return nil, nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
||||||
}
|
}
|
||||||
// 2a: L1+L2 txs
|
// 2a: L1+L2 txs
|
||||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
_l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||||
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, l1UserTxs)
|
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 2b: only L2 txs
|
// 2b: only L2 txs
|
||||||
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||||
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
|
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
l1UserTxsExtra = nil
|
l1UserTxs = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there are no txs to forge, no l1UserTxs in the open queue to
|
if skip, reason, err := p.forgePolicySkipPostSelection(now,
|
||||||
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
|
l1UserTxs, l1CoordTxs, poolL2Txs, batchInfo); err != nil {
|
||||||
// batch.
|
return nil, nil, tracerr.Wrap(err)
|
||||||
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
|
} else if skip {
|
||||||
noTxs := false
|
|
||||||
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
|
|
||||||
if batchInfo.L1Batch {
|
|
||||||
// Query the number of unforged L1UserTxs
|
|
||||||
// (either in a open queue or in a frozen
|
|
||||||
// not-yet-forged queue).
|
|
||||||
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
// If there are future L1UserTxs, we forge a
|
|
||||||
// batch to advance the queues to be able to
|
|
||||||
// forge the L1UserTxs in the future.
|
|
||||||
// Otherwise, skip.
|
|
||||||
if count == 0 {
|
|
||||||
noTxs = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
noTxs = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if noTxs {
|
|
||||||
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
|
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
|
||||||
return nil, tracerr.Wrap(errForgeNoTxsBeforeDelay)
|
|
||||||
}
|
}
|
||||||
|
return nil, &reason, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if batchInfo.L1Batch {
|
if batchInfo.L1Batch {
|
||||||
@@ -490,7 +551,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 3. Save metadata from TxSelector output for BatchNum
|
// 3. Save metadata from TxSelector output for BatchNum
|
||||||
batchInfo.L1UserTxsExtra = l1UserTxsExtra
|
batchInfo.L1UserTxs = l1UserTxs
|
||||||
batchInfo.L1CoordTxs = l1CoordTxs
|
batchInfo.L1CoordTxs = l1CoordTxs
|
||||||
batchInfo.L1CoordinatorTxsAuths = auths
|
batchInfo.L1CoordinatorTxsAuths = auths
|
||||||
batchInfo.CoordIdxs = coordIdxs
|
batchInfo.CoordIdxs = coordIdxs
|
||||||
@@ -498,10 +559,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
|
|
||||||
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs),
|
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs),
|
||||||
batchInfo.BatchNum); err != nil {
|
batchInfo.BatchNum); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil {
|
if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invalidate transactions that become invalid because of
|
// Invalidate transactions that become invalid because of
|
||||||
@@ -510,21 +571,21 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
// all the nonces smaller than the current one)
|
// all the nonces smaller than the current one)
|
||||||
err = p.l2DB.InvalidateOldNonces(idxsNonceFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum)
|
err = p.l2DB.InvalidateOldNonces(idxsNonceFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Call BatchBuilder with TxSelector output
|
// 4. Call BatchBuilder with TxSelector output
|
||||||
configBatch := &batchbuilder.ConfigBatch{
|
configBatch := &batchbuilder.ConfigBatch{
|
||||||
TxProcessorConfig: p.cfg.TxProcessorConfig,
|
TxProcessorConfig: p.cfg.TxProcessorConfig,
|
||||||
}
|
}
|
||||||
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxsExtra,
|
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxs,
|
||||||
l1CoordTxs, poolL2Txs)
|
l1CoordTxs, poolL2Txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
batchInfo.L2Txs = l2Txs
|
batchInfo.L2Txs = l2Txs
|
||||||
|
|
||||||
@@ -536,7 +597,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
|
|
||||||
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
|
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
|
||||||
|
|
||||||
return batchInfo, nil
|
return batchInfo, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
||||||
@@ -581,7 +642,7 @@ func prepareForgeBatchArgs(batchInfo *BatchInfo) *eth.RollupForgeBatchArgs {
|
|||||||
NewLastIdx: int64(zki.Metadata.NewLastIdxRaw),
|
NewLastIdx: int64(zki.Metadata.NewLastIdxRaw),
|
||||||
NewStRoot: zki.Metadata.NewStateRootRaw.BigInt(),
|
NewStRoot: zki.Metadata.NewStateRootRaw.BigInt(),
|
||||||
NewExitRoot: zki.Metadata.NewExitRootRaw.BigInt(),
|
NewExitRoot: zki.Metadata.NewExitRootRaw.BigInt(),
|
||||||
L1UserTxs: batchInfo.L1UserTxsExtra,
|
L1UserTxs: batchInfo.L1UserTxs,
|
||||||
L1CoordinatorTxs: batchInfo.L1CoordTxs,
|
L1CoordinatorTxs: batchInfo.L1CoordTxs,
|
||||||
L1CoordinatorTxsAuths: batchInfo.L1CoordinatorTxsAuths,
|
L1CoordinatorTxsAuths: batchInfo.L1CoordinatorTxsAuths,
|
||||||
L2TxsData: batchInfo.L2Txs,
|
L2TxsData: batchInfo.L2Txs,
|
||||||
|
|||||||
@@ -206,11 +206,7 @@ PoolTransfer(0) User2-User3: 300 (126)
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{
|
err = pipeline.reset(batchNum, syncStats, syncSCVars)
|
||||||
Rollup: *syncSCVars.Rollup,
|
|
||||||
Auction: *syncSCVars.Auction,
|
|
||||||
WDelayer: *syncSCVars.WDelayer,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Sanity check
|
// Sanity check
|
||||||
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()
|
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()
|
||||||
@@ -228,12 +224,12 @@ PoolTransfer(0) User2-User3: 300 (126)
|
|||||||
|
|
||||||
batchNum++
|
batchNum++
|
||||||
|
|
||||||
batchInfo, err := pipeline.forgeBatch(batchNum)
|
batchInfo, _, err := pipeline.forgeBatch(batchNum)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 3, len(batchInfo.L2Txs))
|
assert.Equal(t, 3, len(batchInfo.L2Txs))
|
||||||
|
|
||||||
batchNum++
|
batchNum++
|
||||||
batchInfo, err = pipeline.forgeBatch(batchNum)
|
batchInfo, _, err = pipeline.forgeBatch(batchNum)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(batchInfo.L2Txs))
|
assert.Equal(t, 0, len(batchInfo.L2Txs))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,10 +31,10 @@ type TxManager struct {
|
|||||||
batchCh chan *BatchInfo
|
batchCh chan *BatchInfo
|
||||||
chainID *big.Int
|
chainID *big.Int
|
||||||
account accounts.Account
|
account accounts.Account
|
||||||
consts synchronizer.SCConsts
|
consts common.SCConsts
|
||||||
|
|
||||||
stats synchronizer.Stats
|
stats synchronizer.Stats
|
||||||
vars synchronizer.SCVariables
|
vars common.SCVariables
|
||||||
statsVarsCh chan statsVars
|
statsVarsCh chan statsVars
|
||||||
|
|
||||||
discardPipelineCh chan int // int refers to the pipelineNum
|
discardPipelineCh chan int // int refers to the pipelineNum
|
||||||
@@ -55,7 +55,7 @@ type TxManager struct {
|
|||||||
|
|
||||||
// NewTxManager creates a new TxManager
|
// NewTxManager creates a new TxManager
|
||||||
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
|
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
|
||||||
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (
|
coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (
|
||||||
*TxManager, error) {
|
*TxManager, error) {
|
||||||
chainID, err := ethClient.EthChainID()
|
chainID, err := ethClient.EthChainID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -104,7 +104,7 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
|
|||||||
|
|
||||||
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
||||||
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
|
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
|
||||||
vars *synchronizer.SCVariablesPtr) {
|
vars *common.SCVariablesPtr) {
|
||||||
select {
|
select {
|
||||||
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -120,7 +120,7 @@ func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
|
||||||
updateSCVars(&t.vars, vars)
|
updateSCVars(&t.vars, vars)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,7 +147,7 @@ func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.Tr
|
|||||||
auth.Value = big.NewInt(0) // in wei
|
auth.Value = big.NewInt(0) // in wei
|
||||||
|
|
||||||
gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
|
gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
|
||||||
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
|
uint64(len(batchInfo.L1UserTxs))*t.cfg.ForgeBatchGasCost.L1UserTx +
|
||||||
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
|
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
|
||||||
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
|
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
|
||||||
auth.GasLimit = gasLimit
|
auth.GasLimit = gasLimit
|
||||||
@@ -608,7 +608,7 @@ func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
|
|||||||
func (t *TxManager) canForgeAt(blockNum int64) bool {
|
func (t *TxManager) canForgeAt(blockNum int64) bool {
|
||||||
return canForge(&t.consts.Auction, &t.vars.Auction,
|
return canForge(&t.consts.Auction, &t.vars.Auction,
|
||||||
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
|
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
|
||||||
t.cfg.ForgerAddress, blockNum)
|
t.cfg.ForgerAddress, blockNum, t.cfg.MustForgeAtSlotDeadline)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
|
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
package historydb
|
package historydb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db"
|
"github.com/hermeznetwork/hermez-node/db"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
@@ -32,9 +36,18 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getBatchAPI(hdb.dbRead, batchNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBatchInternalAPI return the batch with the given batchNum
|
||||||
|
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
|
return hdb.getBatchAPI(hdb.dbRead, batchNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
batch := &BatchAPI{}
|
batch := &BatchAPI{}
|
||||||
return batch, tracerr.Wrap(meddler.QueryRow(
|
if err := meddler.QueryRow(
|
||||||
hdb.dbRead, batch,
|
d, batch,
|
||||||
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
||||||
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
||||||
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
||||||
@@ -42,7 +55,11 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
|||||||
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs
|
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs
|
||||||
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
||||||
WHERE batch_num = $1;`, batchNum,
|
WHERE batch_num = $1;`, batchNum,
|
||||||
))
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
batch.CollectedFeesAPI = apitypes.NewCollectedFeesAPI(batch.CollectedFeesDB)
|
||||||
|
return batch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBatchesAPI return the batches applying the given filters
|
// GetBatchesAPI return the batches applying the given filters
|
||||||
@@ -143,6 +160,9 @@ func (hdb *HistoryDB) GetBatchesAPI(
|
|||||||
if len(batches) == 0 {
|
if len(batches) == 0 {
|
||||||
return batches, 0, nil
|
return batches, 0, nil
|
||||||
}
|
}
|
||||||
|
for i := range batches {
|
||||||
|
batches[i].CollectedFeesAPI = apitypes.NewCollectedFeesAPI(batches[i].CollectedFeesDB)
|
||||||
|
}
|
||||||
return batches, batches[0].TotalItems - uint64(len(batches)), nil
|
return batches, batches[0].TotalItems - uint64(len(batches)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,6 +200,14 @@ func (hdb *HistoryDB) GetBestBidsAPI(
|
|||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
|
||||||
|
}
|
||||||
|
func (hdb *HistoryDB) getBestBidsAPI(
|
||||||
|
d meddler.DB,
|
||||||
|
minSlotNum, maxSlotNum *int64,
|
||||||
|
bidderAddr *ethCommon.Address,
|
||||||
|
limit *uint, order string,
|
||||||
|
) ([]BidAPI, uint64, error) {
|
||||||
var query string
|
var query string
|
||||||
var args []interface{}
|
var args []interface{}
|
||||||
// JOIN the best bid of each slot with the latest update of each coordinator
|
// JOIN the best bid of each slot with the latest update of each coordinator
|
||||||
@@ -214,7 +242,7 @@ func (hdb *HistoryDB) GetBestBidsAPI(
|
|||||||
}
|
}
|
||||||
query = hdb.dbRead.Rebind(queryStr)
|
query = hdb.dbRead.Rebind(queryStr)
|
||||||
bidPtrs := []*BidAPI{}
|
bidPtrs := []*BidAPI{}
|
||||||
if err := meddler.QueryAll(hdb.dbRead, &bidPtrs, query, args...); err != nil {
|
if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// log.Debug(query)
|
// log.Debug(query)
|
||||||
@@ -697,25 +725,6 @@ func (hdb *HistoryDB) GetExitsAPI(
|
|||||||
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
|
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBucketUpdatesAPI retrieves latest values for each bucket
|
|
||||||
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
|
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
|
||||||
defer cancel()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
defer hdb.apiConnCon.Release()
|
|
||||||
var bucketUpdates []*BucketUpdateAPI
|
|
||||||
err = meddler.QueryAll(
|
|
||||||
hdb.dbRead, &bucketUpdates,
|
|
||||||
`SELECT num_bucket, withdrawals FROM bucket_update
|
|
||||||
WHERE item_id in(SELECT max(item_id) FROM bucket_update
|
|
||||||
group by num_bucket)
|
|
||||||
ORDER BY num_bucket ASC;`,
|
|
||||||
)
|
|
||||||
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
|
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
|
||||||
func (hdb *HistoryDB) GetCoordinatorsAPI(
|
func (hdb *HistoryDB) GetCoordinatorsAPI(
|
||||||
bidderAddr, forgerAddr *ethCommon.Address,
|
bidderAddr, forgerAddr *ethCommon.Address,
|
||||||
@@ -800,29 +809,6 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
|
|||||||
return auctionVars, tracerr.Wrap(err)
|
return auctionVars, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
|
|
||||||
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
|
|
||||||
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
|
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
|
||||||
defer cancel()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
defer hdb.apiConnCon.Release()
|
|
||||||
auctionVars := []*MinBidInfo{}
|
|
||||||
query := `
|
|
||||||
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
|
|
||||||
WHERE default_slot_set_bid_slot_num < $1
|
|
||||||
ORDER BY default_slot_set_bid_slot_num DESC
|
|
||||||
LIMIT $2;
|
|
||||||
`
|
|
||||||
err = meddler.QueryAll(hdb.dbRead, &auctionVars, query, slotNum, maxItems)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccountAPI returns an account by its index
|
// GetAccountAPI returns an account by its index
|
||||||
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
@@ -941,125 +927,6 @@ func (hdb *HistoryDB) GetAccountsAPI(
|
|||||||
accounts[0].TotalItems - uint64(len(accounts)), nil
|
accounts[0].TotalItems - uint64(len(accounts)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricsAPI returns metrics
|
|
||||||
func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
|
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
|
||||||
defer cancel()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
defer hdb.apiConnCon.Release()
|
|
||||||
metricsTotals := &MetricsTotals{}
|
|
||||||
metrics := &Metrics{}
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metricsTotals, `SELECT
|
|
||||||
COALESCE (MIN(batch.batch_num), 0) as batch_num,
|
|
||||||
COALESCE (MIN(block.timestamp), NOW()) AS min_timestamp,
|
|
||||||
COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
|
|
||||||
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
|
||||||
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS' and batch.batch_num <= $1;`, lastBatchNum)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metricsTotals, `SELECT COUNT(*) as total_txs
|
|
||||||
FROM tx WHERE tx.batch_num between $1 AND $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
|
|
||||||
// Avoid dividing by 0
|
|
||||||
if seconds == 0 {
|
|
||||||
seconds++
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
|
|
||||||
|
|
||||||
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
|
|
||||||
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
|
|
||||||
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
|
|
||||||
} else {
|
|
||||||
metrics.TransactionsPerBatch = float64(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
|
||||||
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
|
||||||
WHERE batch_num between $1 and $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if metricsTotals.TotalBatches > 0 {
|
|
||||||
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
|
|
||||||
} else {
|
|
||||||
metrics.BatchFrequency = 0
|
|
||||||
}
|
|
||||||
if metricsTotals.TotalTransactions > 0 {
|
|
||||||
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
|
||||||
} else {
|
|
||||||
metrics.AvgTransactionFee = 0
|
|
||||||
}
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metrics,
|
|
||||||
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metrics,
|
|
||||||
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0)
|
|
||||||
AS estimated_time_to_forge_l1 FROM tx
|
|
||||||
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
|
|
||||||
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
|
|
||||||
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
|
|
||||||
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
|
|
||||||
metricsTotals.FirstBatchNum, lastBatchNum,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return metrics, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAvgTxFeeAPI returns average transaction fee of the last 1h
|
|
||||||
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
|
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
|
||||||
defer cancel()
|
|
||||||
if err != nil {
|
|
||||||
return 0, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
defer hdb.apiConnCon.Release()
|
|
||||||
metricsTotals := &MetricsTotals{}
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
|
|
||||||
COALESCE (MIN(tx.batch_num), 0) as batch_num
|
|
||||||
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
|
||||||
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
|
|
||||||
if err != nil {
|
|
||||||
return 0, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
|
||||||
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
|
||||||
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
|
|
||||||
if err != nil {
|
|
||||||
return 0, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var avgTransactionFee float64
|
|
||||||
if metricsTotals.TotalTransactions > 0 {
|
|
||||||
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
|
||||||
} else {
|
|
||||||
avgTransactionFee = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return avgTransactionFee, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCommonAccountAPI returns the account associated to an account idx
|
// GetCommonAccountAPI returns the account associated to an account idx
|
||||||
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
|
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
@@ -1075,3 +942,261 @@ func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, erro
|
|||||||
)
|
)
|
||||||
return account, tracerr.Wrap(err)
|
return account, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetCoordinatorAPI returns a coordinator by its bidderAddr
|
||||||
|
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
||||||
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
|
defer cancel()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
||||||
|
coordinator := &CoordinatorAPI{}
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
d, coordinator,
|
||||||
|
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
|
||||||
|
bidderAddr,
|
||||||
|
)
|
||||||
|
return coordinator, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeInfoAPI retusnt he NodeInfo
|
||||||
|
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
|
||||||
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
|
defer cancel()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.GetNodeInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBucketUpdatesInternalAPI returns the latest bucket updates
|
||||||
|
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
|
||||||
|
var bucketUpdates []*BucketUpdateAPI
|
||||||
|
err := meddler.QueryAll(
|
||||||
|
hdb.dbRead, &bucketUpdates,
|
||||||
|
`SELECT num_bucket, withdrawals FROM bucket_update
|
||||||
|
WHERE item_id in(SELECT max(item_id) FROM bucket_update
|
||||||
|
group by num_bucket)
|
||||||
|
ORDER BY num_bucket ASC;`,
|
||||||
|
)
|
||||||
|
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNextForgersInternalAPI returns next forgers
|
||||||
|
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
|
||||||
|
auctionConsts *common.AuctionConstants,
|
||||||
|
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
|
||||||
|
secondsPerBlock := int64(15) //nolint:gomnd
|
||||||
|
// currentSlot and lastClosedSlot included
|
||||||
|
limit := uint(lastClosedSlot - currentSlot + 1)
|
||||||
|
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, ¤tSlot, &lastClosedSlot, nil, &limit, "ASC")
|
||||||
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
nextForgers := []NextForgerAPI{}
|
||||||
|
// Get min bid info
|
||||||
|
var minBidInfo []MinBidInfo
|
||||||
|
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
|
||||||
|
// All min bids can be calculated with the last update of AuctionVariables
|
||||||
|
|
||||||
|
minBidInfo = []MinBidInfo{{
|
||||||
|
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
|
||||||
|
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
|
||||||
|
}}
|
||||||
|
} else {
|
||||||
|
// Get all the relevant updates from the DB
|
||||||
|
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create nextForger for each slot
|
||||||
|
for i := currentSlot; i <= lastClosedSlot; i++ {
|
||||||
|
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
|
||||||
|
auctionConsts.GenesisBlockNum
|
||||||
|
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
|
||||||
|
auctionConsts.GenesisBlockNum - 1
|
||||||
|
nextForger := NextForgerAPI{
|
||||||
|
Period: Period{
|
||||||
|
SlotNum: i,
|
||||||
|
FromBlock: fromBlock,
|
||||||
|
ToBlock: toBlock,
|
||||||
|
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
|
||||||
|
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
|
||||||
|
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
|
||||||
|
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
foundForger := false
|
||||||
|
// If there is a bid for a slot, get forger (coordinator)
|
||||||
|
for j := range bids {
|
||||||
|
slotNum := bids[j].SlotNum
|
||||||
|
if slotNum == i {
|
||||||
|
// There's a bid for the slot
|
||||||
|
// Check if the bid is greater than the minimum required
|
||||||
|
for i := 0; i < len(minBidInfo); i++ {
|
||||||
|
// Find the most recent update
|
||||||
|
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
|
||||||
|
// Get min bid
|
||||||
|
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
|
||||||
|
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
|
||||||
|
// Check if the bid has beaten the minimum
|
||||||
|
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
|
||||||
|
if !ok {
|
||||||
|
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
|
||||||
|
}
|
||||||
|
if minBid.Cmp(bid) == 1 {
|
||||||
|
// Min bid is greater than bid, the slot will be forged by boot coordinator
|
||||||
|
break
|
||||||
|
}
|
||||||
|
foundForger = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundForger { // There is no bid or it's smaller than the minimum
|
||||||
|
break
|
||||||
|
}
|
||||||
|
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
nextForger.Coordinator = *coordinator
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If there is no bid, the coordinator that will forge is boot coordinator
|
||||||
|
if !foundForger {
|
||||||
|
nextForger.Coordinator = CoordinatorAPI{
|
||||||
|
Forger: auctionVars.BootCoordinator,
|
||||||
|
URL: auctionVars.BootCoordinatorURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nextForgers = append(nextForgers, nextForger)
|
||||||
|
}
|
||||||
|
return nextForgers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricsInternalAPI returns the MetricsAPI
|
||||||
|
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
|
||||||
|
var metrics MetricsAPI
|
||||||
|
// Get the first and last batch of the last 24h and their timestamps
|
||||||
|
// if u.state.Network.LastBatch == nil {
|
||||||
|
// return &metrics, nil
|
||||||
|
// }
|
||||||
|
type period struct {
|
||||||
|
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
|
||||||
|
FromTimestamp time.Time `meddler:"from_timestamp"`
|
||||||
|
ToBatchNum common.BatchNum `meddler:"-"`
|
||||||
|
ToTimestamp time.Time `meddler:"to_timestamp"`
|
||||||
|
}
|
||||||
|
p := &period{
|
||||||
|
ToBatchNum: lastBatchNum,
|
||||||
|
}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, p, `SELECT
|
||||||
|
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
|
||||||
|
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
|
||||||
|
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
|
||||||
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
||||||
|
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Get the amount of txs of that period
|
||||||
|
row := hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var nTxs int
|
||||||
|
if err := row.Scan(&nTxs); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Set txs/s
|
||||||
|
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
|
||||||
|
if seconds == 0 { // Avoid dividing by 0
|
||||||
|
seconds++
|
||||||
|
}
|
||||||
|
metrics.TransactionsPerSecond = float64(nTxs) / seconds
|
||||||
|
// Set txs/batch
|
||||||
|
nBatches := p.ToBatchNum - p.FromBatchNum + 1
|
||||||
|
if nBatches == 0 { // Avoid dividing by 0
|
||||||
|
nBatches++
|
||||||
|
}
|
||||||
|
if (p.ToBatchNum - p.FromBatchNum) > 0 {
|
||||||
|
metrics.TransactionsPerBatch = float64(nTxs) /
|
||||||
|
float64(nBatches)
|
||||||
|
} else {
|
||||||
|
metrics.TransactionsPerBatch = 0
|
||||||
|
}
|
||||||
|
// Get total fee of that period
|
||||||
|
row = hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var totalFee float64
|
||||||
|
if err := row.Scan(&totalFee); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Set batch frequency
|
||||||
|
metrics.BatchFrequency = seconds / float64(nBatches)
|
||||||
|
// Set avg transaction fee (only L2 txs have fee)
|
||||||
|
row = hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2 AND NOT is_l1;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var nL2Txs int
|
||||||
|
if err := row.Scan(&nL2Txs); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if nL2Txs > 0 {
|
||||||
|
metrics.AvgTransactionFee = totalFee / float64(nL2Txs)
|
||||||
|
} else {
|
||||||
|
metrics.AvgTransactionFee = 0
|
||||||
|
}
|
||||||
|
// Get and set amount of registered accounts
|
||||||
|
type registeredAccounts struct {
|
||||||
|
TotalIdx int64 `meddler:"total_idx"`
|
||||||
|
TotalBJJ int64 `meddler:"total_bjj"`
|
||||||
|
}
|
||||||
|
ra := ®isteredAccounts{}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, ra,
|
||||||
|
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
metrics.TotalAccounts = ra.TotalIdx
|
||||||
|
metrics.TotalBJJs = ra.TotalBJJ
|
||||||
|
// Get and set estimated time to forge L1 tx
|
||||||
|
row = hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
|
||||||
|
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
|
||||||
|
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
|
||||||
|
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
|
||||||
|
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var timeToForgeL1 float64
|
||||||
|
if err := row.Scan(&timeToForgeL1); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
|
||||||
|
return &metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStateAPI returns the StateAPI
|
||||||
|
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
|
||||||
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
|
defer cancel()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getStateAPI(hdb.dbRead)
|
||||||
|
}
|
||||||
|
|||||||
@@ -456,13 +456,10 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
|
|||||||
|
|
||||||
// UpdateTokenValue updates the USD value of a token. Value is the price in
|
// UpdateTokenValue updates the USD value of a token. Value is the price in
|
||||||
// USD of a normalized token (1 token = 10^decimals units)
|
// USD of a normalized token (1 token = 10^decimals units)
|
||||||
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
|
func (hdb *HistoryDB) UpdateTokenValue(tokenAddr ethCommon.Address, value float64) error {
|
||||||
// Sanitize symbol
|
|
||||||
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
|
|
||||||
|
|
||||||
_, err := hdb.dbWrite.Exec(
|
_, err := hdb.dbWrite.Exec(
|
||||||
"UPDATE token SET usd = $1 WHERE symbol = $2;",
|
"UPDATE token SET usd = $1 WHERE eth_addr = $2;",
|
||||||
value, tokenSymbol,
|
value, tokenAddr,
|
||||||
)
|
)
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -696,11 +693,11 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
|
|||||||
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
|
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
|
||||||
var txs []*common.L1Tx
|
var txs []*common.L1Tx
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
|
hdb.dbRead, &txs,
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
|
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE 0 END) AS effective_amount,
|
||||||
tx.deposit_amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.deposit_amount_success THEN tx.deposit_amount ELSE '\x' END) AS effective_deposit_amount,
|
tx.deposit_amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.deposit_amount_success THEN tx.deposit_amount ELSE 0 END) AS effective_deposit_amount,
|
||||||
tx.eth_block_num, tx.type, tx.batch_num
|
tx.eth_block_num, tx.type, tx.batch_num
|
||||||
FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE ORDER BY item_id;`,
|
FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE ORDER BY item_id;`,
|
||||||
)
|
)
|
||||||
@@ -842,6 +839,18 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
|
|||||||
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
|
||||||
|
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
|
||||||
|
minBidInfo := []*MinBidInfo{}
|
||||||
|
query := `
|
||||||
|
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
|
||||||
|
WHERE default_slot_set_bid_slot_num < $1
|
||||||
|
ORDER BY default_slot_set_bid_slot_num DESC
|
||||||
|
LIMIT $2;`
|
||||||
|
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
||||||
|
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
|
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
|
||||||
if len(tokenExchanges) == 0 {
|
if len(tokenExchanges) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -1140,17 +1149,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
|||||||
return tracerr.Wrap(txn.Commit())
|
return tracerr.Wrap(txn.Commit())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCoordinatorAPI returns a coordinator by its bidderAddr
|
|
||||||
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
|
||||||
coordinator := &CoordinatorAPI{}
|
|
||||||
err := meddler.QueryRow(
|
|
||||||
hdb.dbRead, coordinator,
|
|
||||||
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
|
|
||||||
bidderAddr,
|
|
||||||
)
|
|
||||||
return coordinator, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddAuctionVars insert auction vars into the DB
|
// AddAuctionVars insert auction vars into the DB
|
||||||
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
|
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
|
||||||
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
|
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
|
||||||
@@ -1161,7 +1159,7 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
|
|||||||
tokens := []*TokenWithUSD{}
|
tokens := []*TokenWithUSD{}
|
||||||
if err := meddler.QueryAll(
|
if err := meddler.QueryAll(
|
||||||
hdb.dbRead, &tokens,
|
hdb.dbRead, &tokens,
|
||||||
"SELECT * FROM TOKEN",
|
"SELECT * FROM token ORDER BY token_id ASC",
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -1170,3 +1168,60 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
|
|||||||
}
|
}
|
||||||
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
|
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CreateAccountExtraFeePercentage is the multiplication factor over
|
||||||
|
// the average fee for CreateAccount that is applied to obtain the
|
||||||
|
// recommended fee for CreateAccount
|
||||||
|
CreateAccountExtraFeePercentage float64 = 2.5
|
||||||
|
// CreateAccountInternalExtraFeePercentage is the multiplication factor
|
||||||
|
// over the average fee for CreateAccountInternal that is applied to
|
||||||
|
// obtain the recommended fee for CreateAccountInternal
|
||||||
|
CreateAccountInternalExtraFeePercentage float64 = 2.0
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetRecommendedFee returns the RecommendedFee information
|
||||||
|
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
|
||||||
|
var recommendedFee common.RecommendedFee
|
||||||
|
// Get total txs and the batch of the first selected tx of the last hour
|
||||||
|
type totalTxsSinceBatchNum struct {
|
||||||
|
TotalTxs int `meddler:"total_txs"`
|
||||||
|
FirstBatchNum common.BatchNum `meddler:"batch_num"`
|
||||||
|
}
|
||||||
|
ttsbn := &totalTxsSinceBatchNum{}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
|
||||||
|
COALESCE (MIN(tx.batch_num), 0) as batch_num
|
||||||
|
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
||||||
|
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Get the amount of batches and acumulated fees for the last hour
|
||||||
|
type totalBatchesAndFee struct {
|
||||||
|
TotalBatches int `meddler:"total_batches"`
|
||||||
|
TotalFees float64 `meddler:"total_fees"`
|
||||||
|
}
|
||||||
|
tbf := &totalBatchesAndFee{}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
|
||||||
|
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
||||||
|
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Update NodeInfo struct
|
||||||
|
var avgTransactionFee float64
|
||||||
|
if ttsbn.TotalTxs > 0 {
|
||||||
|
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
|
||||||
|
} else {
|
||||||
|
avgTransactionFee = 0
|
||||||
|
}
|
||||||
|
recommendedFee.ExistingAccount =
|
||||||
|
math.Max(avgTransactionFee, minFeeUSD)
|
||||||
|
recommendedFee.CreatesAccount =
|
||||||
|
math.Max(CreateAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
|
||||||
|
recommendedFee.CreatesAccountInternal =
|
||||||
|
math.Max(CreateAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
|
||||||
|
return &recommendedFee, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
@@ -166,7 +167,7 @@ func TestBatches(t *testing.T) {
|
|||||||
if i%2 != 0 {
|
if i%2 != 0 {
|
||||||
// Set value to the token
|
// Set value to the token
|
||||||
value := (float64(i) + 5) * 5.389329
|
value := (float64(i) + 5) * 5.389329
|
||||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
assert.NoError(t, historyDB.UpdateTokenValue(token.EthAddr, value))
|
||||||
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
|
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -276,7 +277,7 @@ func TestTokens(t *testing.T) {
|
|||||||
// Update token value
|
// Update token value
|
||||||
for i, token := range tokens {
|
for i, token := range tokens {
|
||||||
value := 1.01 * float64(i)
|
value := 1.01 * float64(i)
|
||||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
assert.NoError(t, historyDB.UpdateTokenValue(token.EthAddr, value))
|
||||||
}
|
}
|
||||||
// Fetch tokens
|
// Fetch tokens
|
||||||
fetchedTokens, err = historyDB.GetTokensTest()
|
fetchedTokens, err = historyDB.GetTokensTest()
|
||||||
@@ -302,7 +303,7 @@ func TestTokensUTF8(t *testing.T) {
|
|||||||
// Generate fake tokens
|
// Generate fake tokens
|
||||||
const nTokens = 5
|
const nTokens = 5
|
||||||
tokens, ethToken := test.GenTokens(nTokens, blocks)
|
tokens, ethToken := test.GenTokens(nTokens, blocks)
|
||||||
nonUTFTokens := make([]common.Token, len(tokens)+1)
|
nonUTFTokens := make([]common.Token, len(tokens))
|
||||||
// Force token.name and token.symbol to be non UTF-8 Strings
|
// Force token.name and token.symbol to be non UTF-8 Strings
|
||||||
for i, token := range tokens {
|
for i, token := range tokens {
|
||||||
token.Name = fmt.Sprint("NON-UTF8-NAME-\xc5-", i)
|
token.Name = fmt.Sprint("NON-UTF8-NAME-\xc5-", i)
|
||||||
@@ -332,7 +333,7 @@ func TestTokensUTF8(t *testing.T) {
|
|||||||
// Update token value
|
// Update token value
|
||||||
for i, token := range nonUTFTokens {
|
for i, token := range nonUTFTokens {
|
||||||
value := 1.01 * float64(i)
|
value := 1.01 * float64(i)
|
||||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
assert.NoError(t, historyDB.UpdateTokenValue(token.EthAddr, value))
|
||||||
}
|
}
|
||||||
// Fetch tokens
|
// Fetch tokens
|
||||||
fetchedTokens, err = historyDB.GetTokensTest()
|
fetchedTokens, err = historyDB.GetTokensTest()
|
||||||
@@ -1176,7 +1177,7 @@ func TestGetMetricsAPI(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
|
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
|
||||||
@@ -1254,7 +1255,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
|
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
|
||||||
@@ -1269,13 +1270,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetMetricsAPIEmpty(t *testing.T) {
|
func TestGetMetricsAPIEmpty(t *testing.T) {
|
||||||
test.WipeDB(historyDB.DB())
|
test.WipeDB(historyDB.DB())
|
||||||
_, err := historyDBWithACC.GetMetricsAPI(0)
|
_, err := historyDBWithACC.GetMetricsInternalAPI(0)
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAvgTxFeeEmpty(t *testing.T) {
|
|
||||||
test.WipeDB(historyDB.DB())
|
|
||||||
_, err := historyDBWithACC.GetAvgTxFeeAPI()
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1464,3 +1459,128 @@ func setTestBlocks(from, to int64) []common.Block {
|
|||||||
}
|
}
|
||||||
return blocks
|
return blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNodeInfo(t *testing.T) {
|
||||||
|
test.WipeDB(historyDB.DB())
|
||||||
|
|
||||||
|
err := historyDB.SetStateInternalAPI(&StateAPI{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
clientSetup := test.NewClientSetupExample()
|
||||||
|
constants := &Constants{
|
||||||
|
SCConsts: common.SCConsts{
|
||||||
|
Rollup: *clientSetup.RollupConstants,
|
||||||
|
Auction: *clientSetup.AuctionConstants,
|
||||||
|
WDelayer: *clientSetup.WDelayerConstants,
|
||||||
|
},
|
||||||
|
ChainID: 42,
|
||||||
|
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
|
||||||
|
}
|
||||||
|
err = historyDB.SetConstants(constants)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Test parameters
|
||||||
|
var f64 float64 = 1.2
|
||||||
|
var i64 int64 = 8888
|
||||||
|
addr := ethCommon.HexToAddress("0x1234")
|
||||||
|
hash := ethCommon.HexToHash("0x5678")
|
||||||
|
stateAPI := &StateAPI{
|
||||||
|
NodePublicConfig: NodePublicConfig{
|
||||||
|
ForgeDelay: 3.1,
|
||||||
|
},
|
||||||
|
Network: NetworkAPI{
|
||||||
|
LastEthBlock: 12,
|
||||||
|
LastSyncBlock: 34,
|
||||||
|
LastBatch: &BatchAPI{
|
||||||
|
ItemID: 123,
|
||||||
|
BatchNum: 456,
|
||||||
|
EthBlockNum: 789,
|
||||||
|
EthBlockHash: hash,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
ForgerAddr: addr,
|
||||||
|
// CollectedFeesDB: map[common.TokenID]*big.Int{
|
||||||
|
// 0: big.NewInt(11111),
|
||||||
|
// 1: big.NewInt(21111),
|
||||||
|
// 2: big.NewInt(31111),
|
||||||
|
// },
|
||||||
|
CollectedFeesAPI: apitypes.CollectedFeesAPI(map[common.TokenID]apitypes.BigIntStr{
|
||||||
|
0: apitypes.BigIntStr("11111"),
|
||||||
|
1: apitypes.BigIntStr("21111"),
|
||||||
|
2: apitypes.BigIntStr("31111"),
|
||||||
|
}),
|
||||||
|
TotalFeesUSD: &f64,
|
||||||
|
StateRoot: apitypes.BigIntStr("1234"),
|
||||||
|
NumAccounts: 11,
|
||||||
|
ExitRoot: apitypes.BigIntStr("5678"),
|
||||||
|
ForgeL1TxsNum: &i64,
|
||||||
|
SlotNum: 44,
|
||||||
|
ForgedTxs: 23,
|
||||||
|
TotalItems: 0,
|
||||||
|
FirstItem: 0,
|
||||||
|
LastItem: 0,
|
||||||
|
},
|
||||||
|
CurrentSlot: 22,
|
||||||
|
NextForgers: []NextForgerAPI{
|
||||||
|
{
|
||||||
|
Coordinator: CoordinatorAPI{
|
||||||
|
ItemID: 111,
|
||||||
|
Bidder: addr,
|
||||||
|
Forger: addr,
|
||||||
|
EthBlockNum: 566,
|
||||||
|
URL: "asd",
|
||||||
|
TotalItems: 0,
|
||||||
|
FirstItem: 0,
|
||||||
|
LastItem: 0,
|
||||||
|
},
|
||||||
|
Period: Period{
|
||||||
|
SlotNum: 33,
|
||||||
|
FromBlock: 55,
|
||||||
|
ToBlock: 66,
|
||||||
|
FromTimestamp: time.Now(),
|
||||||
|
ToTimestamp: time.Now(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metrics: MetricsAPI{
|
||||||
|
TransactionsPerBatch: 1.1,
|
||||||
|
TotalAccounts: 42,
|
||||||
|
},
|
||||||
|
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
|
||||||
|
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
|
||||||
|
WithdrawalDelayer: *clientSetup.WDelayerVariables,
|
||||||
|
RecommendedFee: common.RecommendedFee{
|
||||||
|
ExistingAccount: 0.15,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = historyDB.SetStateInternalAPI(stateAPI)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
nodeConfig := &NodeConfig{
|
||||||
|
MaxPoolTxs: 123,
|
||||||
|
MinFeeUSD: 0.5,
|
||||||
|
}
|
||||||
|
err = historyDB.SetNodeConfig(nodeConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dbConstants, err := historyDB.GetConstants()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, constants, dbConstants)
|
||||||
|
|
||||||
|
dbNodeConfig, err := historyDB.GetNodeConfig()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, nodeConfig, dbNodeConfig)
|
||||||
|
|
||||||
|
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, stateAPI.Network.LastBatch.Timestamp.Unix(),
|
||||||
|
dbStateAPI.Network.LastBatch.Timestamp.Unix())
|
||||||
|
dbStateAPI.Network.LastBatch.Timestamp = stateAPI.Network.LastBatch.Timestamp
|
||||||
|
assert.Equal(t, stateAPI.Network.NextForgers[0].Period.FromTimestamp.Unix(),
|
||||||
|
dbStateAPI.Network.NextForgers[0].Period.FromTimestamp.Unix())
|
||||||
|
dbStateAPI.Network.NextForgers[0].Period.FromTimestamp = stateAPI.Network.NextForgers[0].Period.FromTimestamp
|
||||||
|
assert.Equal(t, stateAPI.Network.NextForgers[0].Period.ToTimestamp.Unix(),
|
||||||
|
dbStateAPI.Network.NextForgers[0].Period.ToTimestamp.Unix())
|
||||||
|
dbStateAPI.Network.NextForgers[0].Period.ToTimestamp = stateAPI.Network.NextForgers[0].Period.ToTimestamp
|
||||||
|
assert.Equal(t, stateAPI, dbStateAPI)
|
||||||
|
}
|
||||||
|
|||||||
170
db/historydb/nodeinfo.go
Normal file
170
db/historydb/nodeinfo.go
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
package historydb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/tracerr"
|
||||||
|
"github.com/russross/meddler"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Period represents a time period in ethereum
|
||||||
|
type Period struct {
|
||||||
|
SlotNum int64 `json:"slotNum"`
|
||||||
|
FromBlock int64 `json:"fromBlock"`
|
||||||
|
ToBlock int64 `json:"toBlock"`
|
||||||
|
FromTimestamp time.Time `json:"fromTimestamp"`
|
||||||
|
ToTimestamp time.Time `json:"toTimestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextForgerAPI represents the next forger exposed via the API
|
||||||
|
type NextForgerAPI struct {
|
||||||
|
Coordinator CoordinatorAPI `json:"coordinator"`
|
||||||
|
Period Period `json:"period"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkAPI is the network state exposed via the API
|
||||||
|
type NetworkAPI struct {
|
||||||
|
LastEthBlock int64 `json:"lastEthereumBlock"`
|
||||||
|
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
||||||
|
LastBatch *BatchAPI `json:"lastBatch"`
|
||||||
|
CurrentSlot int64 `json:"currentSlot"`
|
||||||
|
NextForgers []NextForgerAPI `json:"nextForgers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodePublicConfig is the configuration of the node that is exposed via API
|
||||||
|
type NodePublicConfig struct {
|
||||||
|
// ForgeDelay in seconds
|
||||||
|
ForgeDelay float64 `json:"forgeDelay"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateAPI is an object representing the node and network state exposed via the API
|
||||||
|
type StateAPI struct {
|
||||||
|
// NodePublicConfig is the configuration of the node that is exposed via API
|
||||||
|
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
|
||||||
|
Network NetworkAPI `json:"network"`
|
||||||
|
Metrics MetricsAPI `json:"metrics"`
|
||||||
|
Rollup RollupVariablesAPI `json:"rollup"`
|
||||||
|
Auction AuctionVariablesAPI `json:"auction"`
|
||||||
|
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
||||||
|
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constants contains network constants
|
||||||
|
type Constants struct {
|
||||||
|
common.SCConsts
|
||||||
|
ChainID uint16
|
||||||
|
HermezAddress ethCommon.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeConfig contains the node config exposed in the API
|
||||||
|
type NodeConfig struct {
|
||||||
|
MaxPoolTxs uint32
|
||||||
|
MinFeeUSD float64
|
||||||
|
ForgeDelay float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfo contains information about he node used when serving the API
|
||||||
|
type NodeInfo struct {
|
||||||
|
ItemID int `meddler:"item_id,pk"`
|
||||||
|
StateAPI *StateAPI `meddler:"state,json"`
|
||||||
|
NodeConfig *NodeConfig `meddler:"config,json"`
|
||||||
|
Constants *Constants `meddler:"constants,json"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeInfo returns the NodeInfo
|
||||||
|
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
|
||||||
|
ni := &NodeInfo{}
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
|
||||||
|
)
|
||||||
|
return ni, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConstants returns the Constats
|
||||||
|
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
|
||||||
|
var nodeInfo NodeInfo
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, &nodeInfo,
|
||||||
|
"SELECT constants FROM node_info WHERE item_id = 1;",
|
||||||
|
)
|
||||||
|
return nodeInfo.Constants, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConstants sets the Constants
|
||||||
|
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
|
||||||
|
_constants := struct {
|
||||||
|
Constants *Constants `meddler:"constants,json"`
|
||||||
|
}{constants}
|
||||||
|
values, err := meddler.Default.Values(&_constants, false)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
_, err = hdb.dbWrite.Exec(
|
||||||
|
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
|
||||||
|
values[0],
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStateInternalAPI returns the StateAPI
|
||||||
|
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
|
||||||
|
return hdb.getStateAPI(hdb.dbRead)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
|
||||||
|
var nodeInfo NodeInfo
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
d, &nodeInfo,
|
||||||
|
"SELECT state FROM node_info WHERE item_id = 1;",
|
||||||
|
)
|
||||||
|
return nodeInfo.StateAPI, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStateInternalAPI sets the StateAPI
|
||||||
|
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
|
||||||
|
if stateAPI.Network.LastBatch != nil {
|
||||||
|
stateAPI.Network.LastBatch.CollectedFeesAPI =
|
||||||
|
apitypes.NewCollectedFeesAPI(stateAPI.Network.LastBatch.CollectedFeesDB)
|
||||||
|
}
|
||||||
|
_stateAPI := struct {
|
||||||
|
StateAPI *StateAPI `meddler:"state,json"`
|
||||||
|
}{stateAPI}
|
||||||
|
values, err := meddler.Default.Values(&_stateAPI, false)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
_, err = hdb.dbWrite.Exec(
|
||||||
|
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
|
||||||
|
values[0],
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeConfig returns the NodeConfig
|
||||||
|
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
|
||||||
|
var nodeInfo NodeInfo
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, &nodeInfo,
|
||||||
|
"SELECT config FROM node_info WHERE item_id = 1;",
|
||||||
|
)
|
||||||
|
return nodeInfo.NodeConfig, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNodeConfig sets the NodeConfig
|
||||||
|
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
|
||||||
|
_nodeConfig := struct {
|
||||||
|
NodeConfig *NodeConfig `meddler:"config,json"`
|
||||||
|
}{nodeConfig}
|
||||||
|
values, err := meddler.Default.Values(&_nodeConfig, false)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
_, err = hdb.dbWrite.Exec(
|
||||||
|
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
|
||||||
|
values[0],
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
@@ -295,7 +295,8 @@ type BatchAPI struct {
|
|||||||
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash" meddler:"hash"`
|
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash" meddler:"hash"`
|
||||||
Timestamp time.Time `json:"timestamp" meddler:"timestamp,utctime"`
|
Timestamp time.Time `json:"timestamp" meddler:"timestamp,utctime"`
|
||||||
ForgerAddr ethCommon.Address `json:"forgerAddr" meddler:"forger_addr"`
|
ForgerAddr ethCommon.Address `json:"forgerAddr" meddler:"forger_addr"`
|
||||||
CollectedFees apitypes.CollectedFees `json:"collectedFees" meddler:"fees_collected,json"`
|
CollectedFeesDB map[common.TokenID]*big.Int `json:"-" meddler:"fees_collected,json"`
|
||||||
|
CollectedFeesAPI apitypes.CollectedFeesAPI `json:"collectedFees" meddler:"-"`
|
||||||
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD" meddler:"total_fees_usd"`
|
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD" meddler:"total_fees_usd"`
|
||||||
StateRoot apitypes.BigIntStr `json:"stateRoot" meddler:"state_root"`
|
StateRoot apitypes.BigIntStr `json:"stateRoot" meddler:"state_root"`
|
||||||
NumAccounts int `json:"numAccounts" meddler:"num_accounts"`
|
NumAccounts int `json:"numAccounts" meddler:"num_accounts"`
|
||||||
@@ -308,8 +309,8 @@ type BatchAPI struct {
|
|||||||
LastItem uint64 `json:"-" meddler:"last_item"`
|
LastItem uint64 `json:"-" meddler:"last_item"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metrics define metrics of the network
|
// MetricsAPI define metrics of the network
|
||||||
type Metrics struct {
|
type MetricsAPI struct {
|
||||||
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
|
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
|
||||||
BatchFrequency float64 `json:"batchFrequency"`
|
BatchFrequency float64 `json:"batchFrequency"`
|
||||||
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
|
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
|
||||||
@@ -319,17 +320,6 @@ type Metrics struct {
|
|||||||
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
|
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetricsTotals is used to get temporal information from HistoryDB
|
|
||||||
// to calculate data to be stored into the Metrics struct
|
|
||||||
type MetricsTotals struct {
|
|
||||||
TotalTransactions uint64 `meddler:"total_txs"`
|
|
||||||
FirstBatchNum common.BatchNum `meddler:"batch_num"`
|
|
||||||
TotalBatches int64 `meddler:"total_batches"`
|
|
||||||
TotalFeesUSD float64 `meddler:"total_fees"`
|
|
||||||
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
|
|
||||||
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BidAPI is a representation of a bid with additional information
|
// BidAPI is a representation of a bid with additional information
|
||||||
// required by the API
|
// required by the API
|
||||||
type BidAPI struct {
|
type BidAPI struct {
|
||||||
@@ -380,6 +370,27 @@ type RollupVariablesAPI struct {
|
|||||||
SafeMode bool `json:"safeMode" meddler:"safe_mode"`
|
SafeMode bool `json:"safeMode" meddler:"safe_mode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
|
||||||
|
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
|
||||||
|
rollupVars := RollupVariablesAPI{
|
||||||
|
EthBlockNum: rollupVariables.EthBlockNum,
|
||||||
|
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
|
||||||
|
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
|
||||||
|
WithdrawalDelay: rollupVariables.WithdrawalDelay,
|
||||||
|
SafeMode: rollupVariables.SafeMode,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, bucket := range rollupVariables.Buckets {
|
||||||
|
rollupVars.Buckets[i] = BucketParamsAPI{
|
||||||
|
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
|
||||||
|
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
|
||||||
|
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
|
||||||
|
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &rollupVars
|
||||||
|
}
|
||||||
|
|
||||||
// AuctionVariablesAPI are the variables of the Auction Smart Contract
|
// AuctionVariablesAPI are the variables of the Auction Smart Contract
|
||||||
type AuctionVariablesAPI struct {
|
type AuctionVariablesAPI struct {
|
||||||
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
||||||
@@ -404,3 +415,28 @@ type AuctionVariablesAPI struct {
|
|||||||
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
|
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
|
||||||
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
|
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
|
||||||
|
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
|
||||||
|
auctionVars := AuctionVariablesAPI{
|
||||||
|
EthBlockNum: auctionVariables.EthBlockNum,
|
||||||
|
DonationAddress: auctionVariables.DonationAddress,
|
||||||
|
BootCoordinator: auctionVariables.BootCoordinator,
|
||||||
|
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
|
||||||
|
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
|
||||||
|
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
|
||||||
|
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
|
||||||
|
Outbidding: auctionVariables.Outbidding,
|
||||||
|
SlotDeadline: auctionVariables.SlotDeadline,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, slot := range auctionVariables.DefaultSlotSetBid {
|
||||||
|
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, ratio := range auctionVariables.AllocationRatio {
|
||||||
|
auctionVars.AllocationRatio[i] = ratio
|
||||||
|
}
|
||||||
|
|
||||||
|
return &auctionVars
|
||||||
|
}
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
|
|||||||
q := fmt.Sprintf(
|
q := fmt.Sprintf(
|
||||||
`INSERT INTO tx_pool (%s)
|
`INSERT INTO tx_pool (%s)
|
||||||
SELECT %s
|
SELECT %s
|
||||||
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
|
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v AND NOT external_delete) < $%v;`,
|
||||||
namesPart, valuesPart,
|
namesPart, valuesPart,
|
||||||
len(values)+1, len(values)+2) //nolint:gomnd
|
len(values)+1, len(values)+2) //nolint:gomnd
|
||||||
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
|
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
|
||||||
|
|||||||
@@ -204,7 +204,7 @@ func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
|
|||||||
var txs []*common.PoolL2Tx
|
var txs []*common.PoolL2Tx
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
l2db.dbRead, &txs,
|
l2db.dbRead, &txs,
|
||||||
selectPoolTxCommon+"WHERE state = $1",
|
selectPoolTxCommon+"WHERE state = $1 AND NOT external_delete;",
|
||||||
common.PoolL2TxStatePending,
|
common.PoolL2TxStatePending,
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(txs).([]common.PoolL2Tx), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(txs).([]common.PoolL2Tx), tracerr.Wrap(err)
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
|||||||
}
|
}
|
||||||
tokens[token.TokenID] = readToken
|
tokens[token.TokenID] = readToken
|
||||||
// Set value to the tokens
|
// Set value to the tokens
|
||||||
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD)
|
err := historyDB.UpdateTokenValue(readToken.EthAddr, *readToken.USD)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
-- +migrate Up
|
-- +migrate Up
|
||||||
|
|
||||||
|
-- NOTE: We use "DECIMAL(78,0)" to encode go *big.Int types. All the *big.Int
|
||||||
|
-- that we deal with represent a value in the SNARK field, which is an integer
|
||||||
|
-- of 256 bits. `log(2**256, 10) = 77.06`: that is, a 256 bit number can have
|
||||||
|
-- at most 78 digits, so we use this value to specify the precision in the
|
||||||
|
-- PostgreSQL DECIMAL guaranteeing that we will never lose precision.
|
||||||
|
|
||||||
-- History
|
-- History
|
||||||
CREATE TABLE block (
|
CREATE TABLE block (
|
||||||
eth_block_num BIGINT PRIMARY KEY,
|
eth_block_num BIGINT PRIMARY KEY,
|
||||||
@@ -22,10 +28,10 @@ CREATE TABLE batch (
|
|||||||
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
||||||
fees_collected BYTEA NOT NULL,
|
fees_collected BYTEA NOT NULL,
|
||||||
fee_idxs_coordinator BYTEA NOT NULL,
|
fee_idxs_coordinator BYTEA NOT NULL,
|
||||||
state_root BYTEA NOT NULL,
|
state_root DECIMAL(78,0) NOT NULL,
|
||||||
num_accounts BIGINT NOT NULL,
|
num_accounts BIGINT NOT NULL,
|
||||||
last_idx BIGINT NOT NULL,
|
last_idx BIGINT NOT NULL,
|
||||||
exit_root BYTEA NOT NULL,
|
exit_root DECIMAL(78,0) NOT NULL,
|
||||||
forge_l1_txs_num BIGINT,
|
forge_l1_txs_num BIGINT,
|
||||||
slot_num BIGINT NOT NULL,
|
slot_num BIGINT NOT NULL,
|
||||||
total_fees_usd NUMERIC
|
total_fees_usd NUMERIC
|
||||||
@@ -34,7 +40,7 @@ CREATE TABLE batch (
|
|||||||
CREATE TABLE bid (
|
CREATE TABLE bid (
|
||||||
item_id SERIAL PRIMARY KEY,
|
item_id SERIAL PRIMARY KEY,
|
||||||
slot_num BIGINT NOT NULL,
|
slot_num BIGINT NOT NULL,
|
||||||
bid_value BYTEA NOT NULL,
|
bid_value DECIMAL(78,0) NOT NULL,
|
||||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||||
bidder_addr BYTEA NOT NULL -- fake foreign key for coordinator
|
bidder_addr BYTEA NOT NULL -- fake foreign key for coordinator
|
||||||
);
|
);
|
||||||
@@ -106,7 +112,7 @@ CREATE TABLE account_update (
|
|||||||
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||||
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
|
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
|
||||||
nonce BIGINT NOT NULL,
|
nonce BIGINT NOT NULL,
|
||||||
balance BYTEA NOT NULL
|
balance DECIMAL(78,0) NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE exit_tree (
|
CREATE TABLE exit_tree (
|
||||||
@@ -114,7 +120,7 @@ CREATE TABLE exit_tree (
|
|||||||
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
|
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||||
account_idx BIGINT REFERENCES account (idx) ON DELETE CASCADE,
|
account_idx BIGINT REFERENCES account (idx) ON DELETE CASCADE,
|
||||||
merkle_proof BYTEA NOT NULL,
|
merkle_proof BYTEA NOT NULL,
|
||||||
balance BYTEA NOT NULL,
|
balance DECIMAL(78,0) NOT NULL,
|
||||||
instant_withdrawn BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
|
instant_withdrawn BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
|
||||||
delayed_withdraw_request BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
|
delayed_withdraw_request BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
|
||||||
owner BYTEA,
|
owner BYTEA,
|
||||||
@@ -164,7 +170,7 @@ CREATE TABLE tx (
|
|||||||
to_idx BIGINT NOT NULL,
|
to_idx BIGINT NOT NULL,
|
||||||
to_eth_addr BYTEA,
|
to_eth_addr BYTEA,
|
||||||
to_bjj BYTEA,
|
to_bjj BYTEA,
|
||||||
amount BYTEA NOT NULL,
|
amount DECIMAL(78,0) NOT NULL,
|
||||||
amount_success BOOLEAN NOT NULL DEFAULT true,
|
amount_success BOOLEAN NOT NULL DEFAULT true,
|
||||||
amount_f NUMERIC NOT NULL,
|
amount_f NUMERIC NOT NULL,
|
||||||
token_id INT NOT NULL REFERENCES token (token_id),
|
token_id INT NOT NULL REFERENCES token (token_id),
|
||||||
@@ -174,7 +180,7 @@ CREATE TABLE tx (
|
|||||||
-- L1
|
-- L1
|
||||||
to_forge_l1_txs_num BIGINT,
|
to_forge_l1_txs_num BIGINT,
|
||||||
user_origin BOOLEAN,
|
user_origin BOOLEAN,
|
||||||
deposit_amount BYTEA,
|
deposit_amount DECIMAL(78,0),
|
||||||
deposit_amount_success BOOLEAN NOT NULL DEFAULT true,
|
deposit_amount_success BOOLEAN NOT NULL DEFAULT true,
|
||||||
deposit_amount_f NUMERIC,
|
deposit_amount_f NUMERIC,
|
||||||
deposit_amount_usd NUMERIC,
|
deposit_amount_usd NUMERIC,
|
||||||
@@ -544,7 +550,7 @@ FOR EACH ROW EXECUTE PROCEDURE forge_l1_user_txs();
|
|||||||
|
|
||||||
CREATE TABLE rollup_vars (
|
CREATE TABLE rollup_vars (
|
||||||
eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||||
fee_add_token BYTEA NOT NULL,
|
fee_add_token DECIMAL(78,0) NOT NULL,
|
||||||
forge_l1_timeout BIGINT NOT NULL,
|
forge_l1_timeout BIGINT NOT NULL,
|
||||||
withdrawal_delay BIGINT NOT NULL,
|
withdrawal_delay BIGINT NOT NULL,
|
||||||
buckets BYTEA NOT NULL,
|
buckets BYTEA NOT NULL,
|
||||||
@@ -556,7 +562,7 @@ CREATE TABLE bucket_update (
|
|||||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||||
num_bucket BIGINT NOT NULL,
|
num_bucket BIGINT NOT NULL,
|
||||||
block_stamp BIGINT NOT NULL,
|
block_stamp BIGINT NOT NULL,
|
||||||
withdrawals BYTEA NOT NULL
|
withdrawals DECIMAL(78,0) NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE token_exchange (
|
CREATE TABLE token_exchange (
|
||||||
@@ -572,7 +578,7 @@ CREATE TABLE escape_hatch_withdrawal (
|
|||||||
who_addr BYTEA NOT NULL,
|
who_addr BYTEA NOT NULL,
|
||||||
to_addr BYTEA NOT NULL,
|
to_addr BYTEA NOT NULL,
|
||||||
token_addr BYTEA NOT NULL,
|
token_addr BYTEA NOT NULL,
|
||||||
amount BYTEA NOT NULL
|
amount DECIMAL(78,0) NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE auction_vars (
|
CREATE TABLE auction_vars (
|
||||||
@@ -610,7 +616,7 @@ CREATE TABLE tx_pool (
|
|||||||
effective_to_eth_addr BYTEA,
|
effective_to_eth_addr BYTEA,
|
||||||
effective_to_bjj BYTEA,
|
effective_to_bjj BYTEA,
|
||||||
token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE,
|
token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE,
|
||||||
amount BYTEA NOT NULL,
|
amount DECIMAL(78,0) NOT NULL,
|
||||||
amount_f NUMERIC NOT NULL,
|
amount_f NUMERIC NOT NULL,
|
||||||
fee SMALLINT NOT NULL,
|
fee SMALLINT NOT NULL,
|
||||||
nonce BIGINT NOT NULL,
|
nonce BIGINT NOT NULL,
|
||||||
@@ -624,7 +630,7 @@ CREATE TABLE tx_pool (
|
|||||||
rq_to_eth_addr BYTEA,
|
rq_to_eth_addr BYTEA,
|
||||||
rq_to_bjj BYTEA,
|
rq_to_bjj BYTEA,
|
||||||
rq_token_id INT,
|
rq_token_id INT,
|
||||||
rq_amount BYTEA,
|
rq_amount DECIMAL(78,0),
|
||||||
rq_fee SMALLINT,
|
rq_fee SMALLINT,
|
||||||
rq_nonce BIGINT,
|
rq_nonce BIGINT,
|
||||||
tx_type VARCHAR(40) NOT NULL,
|
tx_type VARCHAR(40) NOT NULL,
|
||||||
@@ -661,6 +667,16 @@ CREATE TABLE account_creation_auth (
|
|||||||
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
|
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
|
||||||
);
|
);
|
||||||
|
|
||||||
|
CREATE TABLE node_info (
|
||||||
|
item_id SERIAL PRIMARY KEY,
|
||||||
|
state BYTEA, -- object returned by GET /state
|
||||||
|
config BYTEA, -- Node config
|
||||||
|
-- max_pool_txs BIGINT, -- L2DB config
|
||||||
|
-- min_fee NUMERIC, -- L2DB config
|
||||||
|
constants BYTEA -- info of the network that is constant
|
||||||
|
);
|
||||||
|
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
|
||||||
|
|
||||||
-- +migrate Down
|
-- +migrate Down
|
||||||
-- triggers
|
-- triggers
|
||||||
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
|
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
|
||||||
@@ -675,6 +691,7 @@ DROP FUNCTION IF EXISTS set_tx;
|
|||||||
DROP FUNCTION IF EXISTS forge_l1_user_txs;
|
DROP FUNCTION IF EXISTS forge_l1_user_txs;
|
||||||
DROP FUNCTION IF EXISTS set_pool_tx;
|
DROP FUNCTION IF EXISTS set_pool_tx;
|
||||||
-- drop tables IF EXISTS
|
-- drop tables IF EXISTS
|
||||||
|
DROP TABLE IF EXISTS node_info;
|
||||||
DROP TABLE IF EXISTS account_creation_auth;
|
DROP TABLE IF EXISTS account_creation_auth;
|
||||||
DROP TABLE IF EXISTS tx_pool;
|
DROP TABLE IF EXISTS tx_pool;
|
||||||
DROP TABLE IF EXISTS auction_vars;
|
DROP TABLE IF EXISTS auction_vars;
|
||||||
|
|||||||
20
db/utils.go
20
db/utils.go
@@ -13,6 +13,9 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
|
//nolint:errcheck // driver for postgres DB
|
||||||
|
_ "github.com/lib/pq"
|
||||||
migrate "github.com/rubenv/sql-migrate"
|
migrate "github.com/rubenv/sql-migrate"
|
||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
"golang.org/x/sync/semaphore"
|
"golang.org/x/sync/semaphore"
|
||||||
@@ -165,7 +168,11 @@ func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
|
|||||||
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
|
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
|
||||||
}
|
}
|
||||||
field := fieldPtr.(**big.Int)
|
field := fieldPtr.(**big.Int)
|
||||||
*field = new(big.Int).SetBytes([]byte(*ptr))
|
var ok bool
|
||||||
|
*field, ok = new(big.Int).SetString(*ptr, 10)
|
||||||
|
if !ok {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("big.Int.SetString failed on \"%v\"", *ptr))
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,7 +180,7 @@ func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
|
|||||||
func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) {
|
func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) {
|
||||||
field := fieldPtr.(*big.Int)
|
field := fieldPtr.(*big.Int)
|
||||||
|
|
||||||
return field.Bytes(), nil
|
return field.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BigIntNullMeddler encodes or decodes the field value to or from JSON
|
// BigIntNullMeddler encodes or decodes the field value to or from JSON
|
||||||
@@ -198,7 +205,12 @@ func (b BigIntNullMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
|
|||||||
if ptr == nil {
|
if ptr == nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
|
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
|
||||||
}
|
}
|
||||||
*field = new(big.Int).SetBytes(ptr)
|
var ok bool
|
||||||
|
*field, ok = new(big.Int).SetString(string(ptr), 10)
|
||||||
|
if !ok {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("big.Int.SetString failed on \"%v\"", string(ptr)))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,7 +220,7 @@ func (b BigIntNullMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}
|
|||||||
if field == nil {
|
if field == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return field.Bytes(), nil
|
return field.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SliceToSlicePtrs converts any []Foo to []*Foo
|
// SliceToSlicePtrs converts any []Foo to []*Foo
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
package db
|
package db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/russross/meddler"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type foo struct {
|
type foo struct {
|
||||||
@@ -33,3 +37,42 @@ func TestSlicePtrsToSlice(t *testing.T) {
|
|||||||
assert.Equal(t, *a[i], b[i])
|
assert.Equal(t, *a[i], b[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBigInt(t *testing.T) {
|
||||||
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
|
db, err := InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_, err := db.Exec("DROP TABLE IF EXISTS test_big_int;")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = db.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = db.Exec("DROP TABLE IF EXISTS test_big_int;")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = db.Exec(`CREATE TABLE test_big_int (
|
||||||
|
item_id SERIAL PRIMARY KEY,
|
||||||
|
value1 DECIMAL(78, 0) NOT NULL,
|
||||||
|
value2 DECIMAL(78, 0),
|
||||||
|
value3 DECIMAL(78, 0)
|
||||||
|
);`)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
type Entry struct {
|
||||||
|
ItemID int `meddler:"item_id"`
|
||||||
|
Value1 *big.Int `meddler:"value1,bigint"`
|
||||||
|
Value2 *big.Int `meddler:"value2,bigintnull"`
|
||||||
|
Value3 *big.Int `meddler:"value3,bigintnull"`
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := Entry{ItemID: 1, Value1: big.NewInt(1234567890), Value2: big.NewInt(9876543210), Value3: nil}
|
||||||
|
err = meddler.Insert(db, "test_big_int", &entry)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var dbEntry Entry
|
||||||
|
err = meddler.QueryRow(db, &dbEntry, "SELECT * FROM test_big_int WHERE item_id = 1;")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, entry, dbEntry)
|
||||||
|
}
|
||||||
|
|||||||
@@ -309,9 +309,12 @@ func (_HermezAuctionProtocol *HermezAuctionProtocolCaller) Coordinators(opts *bi
|
|||||||
Forger common.Address
|
Forger common.Address
|
||||||
CoordinatorURL string
|
CoordinatorURL string
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return *outstruct, err
|
||||||
|
}
|
||||||
|
|
||||||
outstruct.Forger = out[0].(common.Address)
|
outstruct.Forger = *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
|
||||||
outstruct.CoordinatorURL = out[1].(string)
|
outstruct.CoordinatorURL = *abi.ConvertType(out[1], new(string)).(*string)
|
||||||
|
|
||||||
return *outstruct, err
|
return *outstruct, err
|
||||||
|
|
||||||
@@ -884,12 +887,15 @@ func (_HermezAuctionProtocol *HermezAuctionProtocolCaller) Slots(opts *bind.Call
|
|||||||
BidAmount *big.Int
|
BidAmount *big.Int
|
||||||
ClosedMinBid *big.Int
|
ClosedMinBid *big.Int
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return *outstruct, err
|
||||||
|
}
|
||||||
|
|
||||||
outstruct.Bidder = out[0].(common.Address)
|
outstruct.Bidder = *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
|
||||||
outstruct.Fulfilled = out[1].(bool)
|
outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool)
|
||||||
outstruct.ForgerCommitment = out[2].(bool)
|
outstruct.ForgerCommitment = *abi.ConvertType(out[2], new(bool)).(*bool)
|
||||||
outstruct.BidAmount = out[3].(*big.Int)
|
outstruct.BidAmount = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int)
|
||||||
outstruct.ClosedMinBid = out[4].(*big.Int)
|
outstruct.ClosedMinBid = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int)
|
||||||
|
|
||||||
return *outstruct, err
|
return *outstruct, err
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
271
node/node.go
271
node/node.go
@@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/hermez-node/priceupdater"
|
"github.com/hermeznetwork/hermez-node/priceupdater"
|
||||||
"github.com/hermeznetwork/hermez-node/prover"
|
"github.com/hermeznetwork/hermez-node/prover"
|
||||||
|
"github.com/hermeznetwork/hermez-node/stateapiupdater"
|
||||||
"github.com/hermeznetwork/hermez-node/synchronizer"
|
"github.com/hermeznetwork/hermez-node/synchronizer"
|
||||||
"github.com/hermeznetwork/hermez-node/test/debugapi"
|
"github.com/hermeznetwork/hermez-node/test/debugapi"
|
||||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||||
@@ -54,6 +55,7 @@ const (
|
|||||||
// Node is the Hermez Node
|
// Node is the Hermez Node
|
||||||
type Node struct {
|
type Node struct {
|
||||||
nodeAPI *NodeAPI
|
nodeAPI *NodeAPI
|
||||||
|
stateAPIUpdater *stateapiupdater.Updater
|
||||||
debugAPI *debugapi.DebugAPI
|
debugAPI *debugapi.DebugAPI
|
||||||
priceUpdater *priceupdater.PriceUpdater
|
priceUpdater *priceupdater.PriceUpdater
|
||||||
// Coordinator
|
// Coordinator
|
||||||
@@ -67,6 +69,7 @@ type Node struct {
|
|||||||
mode Mode
|
mode Mode
|
||||||
sqlConnRead *sqlx.DB
|
sqlConnRead *sqlx.DB
|
||||||
sqlConnWrite *sqlx.DB
|
sqlConnWrite *sqlx.DB
|
||||||
|
historyDB *historydb.HistoryDB
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@@ -241,12 +244,35 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
}
|
}
|
||||||
initSCVars := sync.SCVars()
|
initSCVars := sync.SCVars()
|
||||||
|
|
||||||
scConsts := synchronizer.SCConsts{
|
scConsts := common.SCConsts{
|
||||||
Rollup: *sync.RollupConstants(),
|
Rollup: *sync.RollupConstants(),
|
||||||
Auction: *sync.AuctionConstants(),
|
Auction: *sync.AuctionConstants(),
|
||||||
WDelayer: *sync.WDelayerConstants(),
|
WDelayer: *sync.WDelayerConstants(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hdbNodeCfg := historydb.NodeConfig{
|
||||||
|
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
|
||||||
|
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
|
||||||
|
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration.Seconds(),
|
||||||
|
}
|
||||||
|
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
hdbConsts := historydb.Constants{
|
||||||
|
SCConsts: common.SCConsts{
|
||||||
|
Rollup: scConsts.Rollup,
|
||||||
|
Auction: scConsts.Auction,
|
||||||
|
WDelayer: scConsts.WDelayer,
|
||||||
|
},
|
||||||
|
ChainID: chainIDU16,
|
||||||
|
HermezAddress: cfg.SmartContracts.Rollup,
|
||||||
|
}
|
||||||
|
if err := historyDB.SetConstants(&hdbConsts); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
|
||||||
|
|
||||||
var coord *coordinator.Coordinator
|
var coord *coordinator.Coordinator
|
||||||
if mode == ModeCoordinator {
|
if mode == ModeCoordinator {
|
||||||
// Unlock FeeAccount EthAddr in the keystore to generate the
|
// Unlock FeeAccount EthAddr in the keystore to generate the
|
||||||
@@ -339,6 +365,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
|
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
|
||||||
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
|
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
|
||||||
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
|
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
|
||||||
|
MustForgeAtSlotDeadline: cfg.Coordinator.MustForgeAtSlotDeadline,
|
||||||
|
IgnoreSlotCommitment: cfg.Coordinator.IgnoreSlotCommitment,
|
||||||
|
ForgeOncePerSlotIfTxs: cfg.Coordinator.ForgeOncePerSlotIfTxs,
|
||||||
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
|
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
|
||||||
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
||||||
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
|
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
|
||||||
@@ -367,11 +396,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
serverProofs,
|
serverProofs,
|
||||||
client,
|
client,
|
||||||
&scConsts,
|
&scConsts,
|
||||||
&synchronizer.SCVariables{
|
initSCVars,
|
||||||
Rollup: *initSCVars.Rollup,
|
|
||||||
Auction: *initSCVars.Auction,
|
|
||||||
WDelayer: *initSCVars.WDelayer,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -403,35 +428,29 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
coord, cfg.API.Explorer,
|
coord, cfg.API.Explorer,
|
||||||
server,
|
server,
|
||||||
historyDB,
|
historyDB,
|
||||||
stateDB,
|
|
||||||
l2DB,
|
l2DB,
|
||||||
&api.Config{
|
|
||||||
RollupConstants: scConsts.Rollup,
|
|
||||||
AuctionConstants: scConsts.Auction,
|
|
||||||
WDelayerConstants: scConsts.WDelayer,
|
|
||||||
ChainID: chainIDU16,
|
|
||||||
HermezAddress: cfg.SmartContracts.Rollup,
|
|
||||||
},
|
|
||||||
cfg.Coordinator.ForgeDelay.Duration,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
|
|
||||||
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
|
|
||||||
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
|
|
||||||
}
|
}
|
||||||
var debugAPI *debugapi.DebugAPI
|
var debugAPI *debugapi.DebugAPI
|
||||||
if cfg.Debug.APIAddress != "" {
|
if cfg.Debug.APIAddress != "" {
|
||||||
debugAPI = debugapi.NewDebugAPI(cfg.Debug.APIAddress, stateDB, sync)
|
debugAPI = debugapi.NewDebugAPI(cfg.Debug.APIAddress, stateDB, sync)
|
||||||
}
|
}
|
||||||
priceUpdater, err := priceupdater.NewPriceUpdater(cfg.PriceUpdater.URL,
|
priceUpdater, err := priceupdater.NewPriceUpdater(
|
||||||
priceupdater.APIType(cfg.PriceUpdater.Type), historyDB)
|
cfg.PriceUpdater.DefaultUpdateMethod,
|
||||||
|
cfg.PriceUpdater.TokensConfig,
|
||||||
|
historyDB,
|
||||||
|
cfg.PriceUpdater.URLBitfinexV2,
|
||||||
|
cfg.PriceUpdater.URLCoinGeckoV3,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
return &Node{
|
return &Node{
|
||||||
|
stateAPIUpdater: stateAPIUpdater,
|
||||||
nodeAPI: nodeAPI,
|
nodeAPI: nodeAPI,
|
||||||
debugAPI: debugAPI,
|
debugAPI: debugAPI,
|
||||||
priceUpdater: priceUpdater,
|
priceUpdater: priceUpdater,
|
||||||
@@ -441,11 +460,128 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
mode: mode,
|
mode: mode,
|
||||||
sqlConnRead: dbRead,
|
sqlConnRead: dbRead,
|
||||||
sqlConnWrite: dbWrite,
|
sqlConnWrite: dbWrite,
|
||||||
|
historyDB: historyDB,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIServer is a server that only runs the API
|
||||||
|
type APIServer struct {
|
||||||
|
nodeAPI *NodeAPI
|
||||||
|
mode Mode
|
||||||
|
ctx context.Context
|
||||||
|
wg sync.WaitGroup
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAPIServer creates a new APIServer
|
||||||
|
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
|
||||||
|
meddler.Debug = cfg.Debug.MeddlerLogs
|
||||||
|
// Stablish DB connection
|
||||||
|
dbWrite, err := dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortWrite,
|
||||||
|
cfg.PostgreSQL.HostWrite,
|
||||||
|
cfg.PostgreSQL.UserWrite,
|
||||||
|
cfg.PostgreSQL.PasswordWrite,
|
||||||
|
cfg.PostgreSQL.NameWrite,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
var dbRead *sqlx.DB
|
||||||
|
if cfg.PostgreSQL.HostRead == "" {
|
||||||
|
dbRead = dbWrite
|
||||||
|
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
|
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
dbRead, err = dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortRead,
|
||||||
|
cfg.PostgreSQL.HostRead,
|
||||||
|
cfg.PostgreSQL.UserRead,
|
||||||
|
cfg.PostgreSQL.PasswordRead,
|
||||||
|
cfg.PostgreSQL.NameRead,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
apiConnCon := dbUtils.NewAPIConnectionController(
|
||||||
|
cfg.API.MaxSQLConnections,
|
||||||
|
cfg.API.SQLConnectionTimeout.Duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
|
||||||
|
|
||||||
|
var l2DB *l2db.L2DB
|
||||||
|
if mode == ModeCoordinator {
|
||||||
|
l2DB = l2db.NewL2DB(
|
||||||
|
dbRead, dbWrite,
|
||||||
|
0,
|
||||||
|
cfg.Coordinator.L2DB.MaxTxs,
|
||||||
|
cfg.Coordinator.L2DB.MinFeeUSD,
|
||||||
|
0,
|
||||||
|
apiConnCon,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Debug.GinDebugMode {
|
||||||
|
gin.SetMode(gin.DebugMode)
|
||||||
|
} else {
|
||||||
|
gin.SetMode(gin.ReleaseMode)
|
||||||
|
}
|
||||||
|
server := gin.Default()
|
||||||
|
coord := false
|
||||||
|
if mode == ModeCoordinator {
|
||||||
|
coord = cfg.Coordinator.API.Coordinator
|
||||||
|
}
|
||||||
|
nodeAPI, err := NewNodeAPI(
|
||||||
|
cfg.API.Address,
|
||||||
|
coord, cfg.API.Explorer,
|
||||||
|
server,
|
||||||
|
historyDB,
|
||||||
|
l2DB,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
return &APIServer{
|
||||||
|
nodeAPI: nodeAPI,
|
||||||
|
mode: mode,
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the APIServer
|
||||||
|
func (s *APIServer) Start() {
|
||||||
|
log.Infow("Starting api server...", "mode", s.mode)
|
||||||
|
log.Info("Starting NodeAPI...")
|
||||||
|
s.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
log.Info("NodeAPI routine stopped")
|
||||||
|
s.wg.Done()
|
||||||
|
}()
|
||||||
|
if err := s.nodeAPI.Run(s.ctx); err != nil {
|
||||||
|
if s.ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Fatalw("NodeAPI.Run", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the APIServer
|
||||||
|
func (s *APIServer) Stop() {
|
||||||
|
log.Infow("Stopping NodeAPI...")
|
||||||
|
s.cancel()
|
||||||
|
s.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
// NodeAPI holds the node http API
|
// NodeAPI holds the node http API
|
||||||
type NodeAPI struct { //nolint:golint
|
type NodeAPI struct { //nolint:golint
|
||||||
api *api.API
|
api *api.API
|
||||||
@@ -465,10 +601,7 @@ func NewNodeAPI(
|
|||||||
coordinatorEndpoints, explorerEndpoints bool,
|
coordinatorEndpoints, explorerEndpoints bool,
|
||||||
server *gin.Engine,
|
server *gin.Engine,
|
||||||
hdb *historydb.HistoryDB,
|
hdb *historydb.HistoryDB,
|
||||||
sdb *statedb.StateDB,
|
|
||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
config *api.Config,
|
|
||||||
forgeDelay time.Duration,
|
|
||||||
) (*NodeAPI, error) {
|
) (*NodeAPI, error) {
|
||||||
engine := gin.Default()
|
engine := gin.Default()
|
||||||
engine.NoRoute(handleNoRoute)
|
engine.NoRoute(handleNoRoute)
|
||||||
@@ -478,10 +611,6 @@ func NewNodeAPI(
|
|||||||
engine,
|
engine,
|
||||||
hdb,
|
hdb,
|
||||||
l2db,
|
l2db,
|
||||||
config,
|
|
||||||
&api.NodeConfig{
|
|
||||||
ForgeDelay: forgeDelay.Seconds(),
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -527,58 +656,50 @@ func (a *NodeAPI) Run(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats,
|
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats,
|
||||||
vars synchronizer.SCVariablesPtr, batches []common.BatchData) {
|
vars *common.SCVariablesPtr, batches []common.BatchData) error {
|
||||||
if n.mode == ModeCoordinator {
|
if n.mode == ModeCoordinator {
|
||||||
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
|
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
|
||||||
Stats: *stats,
|
Stats: *stats,
|
||||||
Vars: vars,
|
Vars: *vars,
|
||||||
Batches: batches,
|
Batches: batches,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if n.nodeAPI != nil {
|
n.stateAPIUpdater.SetSCVars(vars)
|
||||||
if vars.Rollup != nil {
|
|
||||||
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
|
|
||||||
}
|
|
||||||
if vars.Auction != nil {
|
|
||||||
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
|
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if stats.Synced() {
|
if stats.Synced() {
|
||||||
if err := n.nodeAPI.api.UpdateNetworkInfo(
|
if err := n.stateAPIUpdater.UpdateNetworkInfo(
|
||||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
common.BatchNum(stats.Eth.LastBatchNum),
|
common.BatchNum(stats.Eth.LastBatchNum),
|
||||||
stats.Sync.Auction.CurrentSlot.SlotNum,
|
stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
n.nodeAPI.api.UpdateNetworkInfoBlock(
|
n.stateAPIUpdater.UpdateNetworkInfoBlock(
|
||||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
|
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
|
||||||
vars synchronizer.SCVariablesPtr) {
|
vars *common.SCVariables) error {
|
||||||
if n.mode == ModeCoordinator {
|
if n.mode == ModeCoordinator {
|
||||||
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
|
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
|
||||||
Stats: *stats,
|
Stats: *stats,
|
||||||
Vars: vars,
|
Vars: *vars.AsPtr(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if n.nodeAPI != nil {
|
n.stateAPIUpdater.SetSCVars(vars.AsPtr())
|
||||||
vars := n.sync.SCVars()
|
n.stateAPIUpdater.UpdateNetworkInfoBlock(
|
||||||
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
|
|
||||||
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
|
|
||||||
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
|
|
||||||
n.nodeAPI.api.UpdateNetworkInfoBlock(
|
|
||||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
)
|
)
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
|
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
|
||||||
@@ -594,16 +715,20 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
|
|||||||
// case: reorg
|
// case: reorg
|
||||||
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
|
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
|
||||||
vars := n.sync.SCVars()
|
vars := n.sync.SCVars()
|
||||||
n.handleReorg(ctx, stats, vars)
|
if err := n.handleReorg(ctx, stats, vars); err != nil {
|
||||||
|
return nil, time.Duration(0), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
return nil, time.Duration(0), nil
|
return nil, time.Duration(0), nil
|
||||||
} else if blockData != nil {
|
} else if blockData != nil {
|
||||||
// case: new block
|
// case: new block
|
||||||
vars := synchronizer.SCVariablesPtr{
|
vars := common.SCVariablesPtr{
|
||||||
Rollup: blockData.Rollup.Vars,
|
Rollup: blockData.Rollup.Vars,
|
||||||
Auction: blockData.Auction.Vars,
|
Auction: blockData.Auction.Vars,
|
||||||
WDelayer: blockData.WDelayer.Vars,
|
WDelayer: blockData.WDelayer.Vars,
|
||||||
}
|
}
|
||||||
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
|
if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
|
||||||
|
return nil, time.Duration(0), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
return &blockData.Block, time.Duration(0), nil
|
return &blockData.Block, time.Duration(0), nil
|
||||||
} else {
|
} else {
|
||||||
// case: no block
|
// case: no block
|
||||||
@@ -622,7 +747,9 @@ func (n *Node) StartSynchronizer() {
|
|||||||
// the last synced one) is synchronized
|
// the last synced one) is synchronized
|
||||||
stats := n.sync.Stats()
|
stats := n.sync.Stats()
|
||||||
vars := n.sync.SCVars()
|
vars := n.sync.SCVars()
|
||||||
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
|
if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil {
|
||||||
|
log.Fatalw("Node.handleNewBlock", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -709,18 +836,25 @@ func (n *Node) StartNodeAPI() {
|
|||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
// Do an initial update on startup
|
// Do an initial update on startup
|
||||||
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
|
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
|
||||||
log.Errorw("API.UpdateMetrics", "err", err)
|
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.ctx.Done():
|
||||||
log.Info("API.UpdateMetrics loop done")
|
log.Info("ApiStateUpdater.UpdateMetrics loop done")
|
||||||
n.wg.Done()
|
n.wg.Done()
|
||||||
return
|
return
|
||||||
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
|
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
|
||||||
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
|
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
|
||||||
log.Errorw("API.UpdateMetrics", "err", err)
|
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -729,18 +863,25 @@ func (n *Node) StartNodeAPI() {
|
|||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
// Do an initial update on startup
|
// Do an initial update on startup
|
||||||
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
|
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
|
||||||
log.Errorw("API.UpdateRecommendedFee", "err", err)
|
log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.ctx.Done():
|
||||||
log.Info("API.UpdateRecommendedFee loop done")
|
log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done")
|
||||||
n.wg.Done()
|
n.wg.Done()
|
||||||
return
|
return
|
||||||
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
|
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
|
||||||
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
|
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
|
||||||
log.Errorw("API.UpdateRecommendedFee", "err", err)
|
log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,57 +20,107 @@ const (
|
|||||||
defaultIdleConnTimeout = 2 * time.Second
|
defaultIdleConnTimeout = 2 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIType defines the token exchange API
|
// UpdateMethodType defines the token price update mechanism
|
||||||
type APIType string
|
type UpdateMethodType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// APITypeBitFinexV2 is the http API used by bitfinex V2
|
// UpdateMethodTypeBitFinexV2 is the http API used by bitfinex V2
|
||||||
APITypeBitFinexV2 APIType = "bitfinexV2"
|
UpdateMethodTypeBitFinexV2 UpdateMethodType = "bitfinexV2"
|
||||||
// APITypeCoingeckoV3 is the http API used by copingecko V3
|
// UpdateMethodTypeCoingeckoV3 is the http API used by copingecko V3
|
||||||
APITypeCoingeckoV3 APIType = "coingeckoV3"
|
UpdateMethodTypeCoingeckoV3 UpdateMethodType = "coingeckoV3"
|
||||||
|
// UpdateMethodTypeStatic is the value given by the configuration
|
||||||
|
UpdateMethodTypeStatic UpdateMethodType = "static"
|
||||||
|
// UpdateMethodTypeIgnore indicates to not update the value, to set value 0
|
||||||
|
// it's better to use UpdateMethodTypeStatic
|
||||||
|
UpdateMethodTypeIgnore UpdateMethodType = "ignore"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (t *APIType) valid() bool {
|
func (t *UpdateMethodType) valid() bool {
|
||||||
switch *t {
|
switch *t {
|
||||||
case APITypeBitFinexV2:
|
case UpdateMethodTypeBitFinexV2:
|
||||||
return true
|
return true
|
||||||
case APITypeCoingeckoV3:
|
case UpdateMethodTypeCoingeckoV3:
|
||||||
|
return true
|
||||||
|
case UpdateMethodTypeStatic:
|
||||||
|
return true
|
||||||
|
case UpdateMethodTypeIgnore:
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TokenConfig specifies how a single token get its price updated
|
||||||
|
type TokenConfig struct {
|
||||||
|
UpdateMethod UpdateMethodType
|
||||||
|
StaticValue float64 // required by UpdateMethodTypeStatic
|
||||||
|
Symbol string
|
||||||
|
Addr ethCommon.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TokenConfig) valid() bool {
|
||||||
|
if (t.Addr == common.EmptyAddr && t.Symbol != "ETH") ||
|
||||||
|
(t.Symbol == "" && t.UpdateMethod == UpdateMethodTypeBitFinexV2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return t.UpdateMethod.valid()
|
||||||
|
}
|
||||||
|
|
||||||
// PriceUpdater definition
|
// PriceUpdater definition
|
||||||
type PriceUpdater struct {
|
type PriceUpdater struct {
|
||||||
db *historydb.HistoryDB
|
db *historydb.HistoryDB
|
||||||
apiURL string
|
defaultUpdateMethod UpdateMethodType
|
||||||
apiType APIType
|
tokensList []historydb.TokenSymbolAndAddr
|
||||||
tokens []historydb.TokenSymbolAndAddr
|
tokensConfig map[ethCommon.Address]TokenConfig
|
||||||
|
clientCoingeckoV3 *sling.Sling
|
||||||
|
clientBitfinexV2 *sling.Sling
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPriceUpdater is the constructor for the updater
|
// NewPriceUpdater is the constructor for the updater
|
||||||
func NewPriceUpdater(apiURL string, apiType APIType, db *historydb.HistoryDB) (*PriceUpdater,
|
func NewPriceUpdater(
|
||||||
error) {
|
defaultUpdateMethodType UpdateMethodType,
|
||||||
if !apiType.valid() {
|
tokensConfig []TokenConfig,
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Invalid apiType: %v", apiType))
|
db *historydb.HistoryDB,
|
||||||
|
bitfinexV2URL, coingeckoV3URL string,
|
||||||
|
) (*PriceUpdater, error) {
|
||||||
|
// Validate params
|
||||||
|
if !defaultUpdateMethodType.valid() || defaultUpdateMethodType == UpdateMethodTypeStatic {
|
||||||
|
return nil, tracerr.Wrap(
|
||||||
|
fmt.Errorf("Invalid defaultUpdateMethodType: %v", defaultUpdateMethodType),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
tokensConfigMap := make(map[ethCommon.Address]TokenConfig)
|
||||||
|
for _, t := range tokensConfig {
|
||||||
|
if !t.valid() {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("Invalid tokensConfig, wrong entry: %+v", t))
|
||||||
|
}
|
||||||
|
tokensConfigMap[t.Addr] = t
|
||||||
|
}
|
||||||
|
// Init
|
||||||
|
tr := &http.Transport{
|
||||||
|
MaxIdleConns: defaultMaxIdleConns,
|
||||||
|
IdleConnTimeout: defaultIdleConnTimeout,
|
||||||
|
DisableCompression: true,
|
||||||
|
}
|
||||||
|
httpClient := &http.Client{Transport: tr}
|
||||||
return &PriceUpdater{
|
return &PriceUpdater{
|
||||||
db: db,
|
db: db,
|
||||||
apiURL: apiURL,
|
defaultUpdateMethod: defaultUpdateMethodType,
|
||||||
apiType: apiType,
|
tokensList: []historydb.TokenSymbolAndAddr{},
|
||||||
tokens: []historydb.TokenSymbolAndAddr{},
|
tokensConfig: tokensConfigMap,
|
||||||
|
clientCoingeckoV3: sling.New().Base(coingeckoV3URL).Client(httpClient),
|
||||||
|
clientBitfinexV2: sling.New().Base(bitfinexV2URL).Client(httpClient),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTokenPriceBitfinex(ctx context.Context, client *sling.Sling,
|
func (p *PriceUpdater) getTokenPriceBitfinex(ctx context.Context, tokenSymbol string) (float64, error) {
|
||||||
tokenSymbol string) (float64, error) {
|
|
||||||
state := [10]float64{}
|
state := [10]float64{}
|
||||||
req, err := client.New().Get("ticker/t" + tokenSymbol + "USD").Request()
|
url := "ticker/t" + tokenSymbol + "USD"
|
||||||
|
req, err := p.clientBitfinexV2.New().Get(url).Request()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, tracerr.Wrap(err)
|
return 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
res, err := client.Do(req.WithContext(ctx), &state, nil)
|
res, err := p.clientBitfinexV2.Do(req.WithContext(ctx), &state, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, tracerr.Wrap(err)
|
return 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -80,8 +130,7 @@ func getTokenPriceBitfinex(ctx context.Context, client *sling.Sling,
|
|||||||
return state[6], nil
|
return state[6], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTokenPriceCoingecko(ctx context.Context, client *sling.Sling,
|
func (p *PriceUpdater) getTokenPriceCoingecko(ctx context.Context, tokenAddr ethCommon.Address) (float64, error) {
|
||||||
tokenAddr ethCommon.Address) (float64, error) {
|
|
||||||
responseObject := make(map[string]map[string]float64)
|
responseObject := make(map[string]map[string]float64)
|
||||||
var url string
|
var url string
|
||||||
var id string
|
var id string
|
||||||
@@ -93,11 +142,11 @@ func getTokenPriceCoingecko(ctx context.Context, client *sling.Sling,
|
|||||||
url = "simple/token_price/ethereum?contract_addresses=" +
|
url = "simple/token_price/ethereum?contract_addresses=" +
|
||||||
id + "&vs_currencies=usd"
|
id + "&vs_currencies=usd"
|
||||||
}
|
}
|
||||||
req, err := client.New().Get(url).Request()
|
req, err := p.clientCoingeckoV3.New().Get(url).Request()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, tracerr.Wrap(err)
|
return 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
res, err := client.Do(req.WithContext(ctx), &responseObject, nil)
|
res, err := p.clientCoingeckoV3.Do(req.WithContext(ctx), &responseObject, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, tracerr.Wrap(err)
|
return 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -114,43 +163,50 @@ func getTokenPriceCoingecko(ctx context.Context, client *sling.Sling,
|
|||||||
// UpdatePrices is triggered by the Coordinator, and internally will update the
|
// UpdatePrices is triggered by the Coordinator, and internally will update the
|
||||||
// token prices in the db
|
// token prices in the db
|
||||||
func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
|
func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
|
||||||
tr := &http.Transport{
|
for _, token := range p.tokensConfig {
|
||||||
MaxIdleConns: defaultMaxIdleConns,
|
|
||||||
IdleConnTimeout: defaultIdleConnTimeout,
|
|
||||||
DisableCompression: true,
|
|
||||||
}
|
|
||||||
httpClient := &http.Client{Transport: tr}
|
|
||||||
client := sling.New().Base(p.apiURL).Client(httpClient)
|
|
||||||
|
|
||||||
for _, token := range p.tokens {
|
|
||||||
var tokenPrice float64
|
var tokenPrice float64
|
||||||
var err error
|
var err error
|
||||||
switch p.apiType {
|
switch token.UpdateMethod {
|
||||||
case APITypeBitFinexV2:
|
case UpdateMethodTypeBitFinexV2:
|
||||||
tokenPrice, err = getTokenPriceBitfinex(ctx, client, token.Symbol)
|
tokenPrice, err = p.getTokenPriceBitfinex(ctx, token.Symbol)
|
||||||
case APITypeCoingeckoV3:
|
case UpdateMethodTypeCoingeckoV3:
|
||||||
tokenPrice, err = getTokenPriceCoingecko(ctx, client, token.Addr)
|
tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr)
|
||||||
|
case UpdateMethodTypeStatic:
|
||||||
|
tokenPrice = token.StaticValue
|
||||||
|
case UpdateMethodTypeIgnore:
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("token price not updated (get error)",
|
log.Warnw("token price not updated (get error)",
|
||||||
"err", err, "token", token.Symbol, "apiType", p.apiType)
|
"err", err, "token", token.Symbol, "updateMethod", token.UpdateMethod)
|
||||||
}
|
}
|
||||||
if err = p.db.UpdateTokenValue(token.Symbol, tokenPrice); err != nil {
|
if err = p.db.UpdateTokenValue(token.Addr, tokenPrice); err != nil {
|
||||||
log.Errorw("token price not updated (db error)",
|
log.Errorw("token price not updated (db error)",
|
||||||
"err", err, "token", token.Symbol, "apiType", p.apiType)
|
"err", err, "token", token.Symbol, "updateMethod", token.UpdateMethod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateTokenList get the registered token symbols from HistoryDB
|
// UpdateTokenList get the registered token symbols from HistoryDB
|
||||||
func (p *PriceUpdater) UpdateTokenList() error {
|
func (p *PriceUpdater) UpdateTokenList() error {
|
||||||
tokens, err := p.db.GetTokenSymbolsAndAddrs()
|
dbTokens, err := p.db.GetTokenSymbolsAndAddrs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
p.tokens = tokens
|
// For each token from the DB
|
||||||
|
for _, dbToken := range dbTokens {
|
||||||
|
// If the token doesn't exists in the config list,
|
||||||
|
// add it with default update emthod
|
||||||
|
if _, ok := p.tokensConfig[dbToken.Addr]; !ok {
|
||||||
|
p.tokensConfig[dbToken.Addr] = TokenConfig{
|
||||||
|
UpdateMethod: p.defaultUpdateMethod,
|
||||||
|
Symbol: dbToken.Symbol,
|
||||||
|
Addr: dbToken.Addr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,9 @@ import (
|
|||||||
|
|
||||||
var historyDB *historydb.HistoryDB
|
var historyDB *historydb.HistoryDB
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
const usdtAddr = "0xdac17f958d2ee523a2206206994597c13d831ec7"
|
||||||
|
|
||||||
|
func TestPriceUpdaterBitfinex(t *testing.T) {
|
||||||
// Init DB
|
// Init DB
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
@@ -29,60 +31,113 @@ func TestMain(m *testing.M) {
|
|||||||
// Populate DB
|
// Populate DB
|
||||||
// Gen blocks and add them to DB
|
// Gen blocks and add them to DB
|
||||||
blocks := test.GenBlocks(1, 2)
|
blocks := test.GenBlocks(1, 2)
|
||||||
err = historyDB.AddBlocks(blocks)
|
require.NoError(t, historyDB.AddBlocks(blocks))
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
// Gen tokens and add them to DB
|
// Gen tokens and add them to DB
|
||||||
tokens := []common.Token{}
|
tokens := []common.Token{
|
||||||
tokens = append(tokens, common.Token{
|
{
|
||||||
TokenID: 1,
|
TokenID: 1,
|
||||||
EthBlockNum: blocks[0].Num,
|
EthBlockNum: blocks[0].Num,
|
||||||
EthAddr: ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f"),
|
EthAddr: ethCommon.HexToAddress("0x1"),
|
||||||
Name: "DAI",
|
Name: "DAI",
|
||||||
Symbol: "DAI",
|
Symbol: "DAI",
|
||||||
Decimals: 18,
|
Decimals: 18,
|
||||||
})
|
}, // Used to test get by SC addr
|
||||||
err = historyDB.AddTokens(tokens)
|
{
|
||||||
if err != nil {
|
TokenID: 2,
|
||||||
panic(err)
|
EthBlockNum: blocks[0].Num,
|
||||||
|
EthAddr: ethCommon.HexToAddress(usdtAddr),
|
||||||
|
Name: "Tether",
|
||||||
|
Symbol: "USDT",
|
||||||
|
Decimals: 18,
|
||||||
|
}, // Used to test get by token symbol
|
||||||
|
{
|
||||||
|
TokenID: 3,
|
||||||
|
EthBlockNum: blocks[0].Num,
|
||||||
|
EthAddr: ethCommon.HexToAddress("0x2"),
|
||||||
|
Name: "FOO",
|
||||||
|
Symbol: "FOO",
|
||||||
|
Decimals: 18,
|
||||||
|
}, // Used to test ignore
|
||||||
|
{
|
||||||
|
TokenID: 4,
|
||||||
|
EthBlockNum: blocks[0].Num,
|
||||||
|
EthAddr: ethCommon.HexToAddress("0x3"),
|
||||||
|
Name: "BAR",
|
||||||
|
Symbol: "BAR",
|
||||||
|
Decimals: 18,
|
||||||
|
}, // Used to test static
|
||||||
|
{
|
||||||
|
TokenID: 5,
|
||||||
|
EthBlockNum: blocks[0].Num,
|
||||||
|
EthAddr: ethCommon.HexToAddress("0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"),
|
||||||
|
Name: "Uniswap",
|
||||||
|
Symbol: "UNI",
|
||||||
|
Decimals: 18,
|
||||||
|
}, // Used to test default
|
||||||
|
}
|
||||||
|
require.NoError(t, historyDB.AddTokens(tokens)) // ETH token exist in DB by default
|
||||||
|
// Update token price used to test ignore
|
||||||
|
ignoreValue := 44.44
|
||||||
|
require.NoError(t, historyDB.UpdateTokenValue(tokens[2].EthAddr, ignoreValue))
|
||||||
|
|
||||||
|
// Prepare token config
|
||||||
|
staticValue := 0.12345
|
||||||
|
tc := []TokenConfig{
|
||||||
|
// ETH and UNI tokens use default method
|
||||||
|
{ // DAI uses SC addr
|
||||||
|
UpdateMethod: UpdateMethodTypeBitFinexV2,
|
||||||
|
Addr: ethCommon.HexToAddress("0x1"),
|
||||||
|
Symbol: "DAI",
|
||||||
|
},
|
||||||
|
{ // USDT uses symbol
|
||||||
|
UpdateMethod: UpdateMethodTypeCoingeckoV3,
|
||||||
|
Addr: ethCommon.HexToAddress(usdtAddr),
|
||||||
|
},
|
||||||
|
{ // FOO uses ignore
|
||||||
|
UpdateMethod: UpdateMethodTypeIgnore,
|
||||||
|
Addr: ethCommon.HexToAddress("0x2"),
|
||||||
|
},
|
||||||
|
{ // BAR uses static
|
||||||
|
UpdateMethod: UpdateMethodTypeStatic,
|
||||||
|
Addr: ethCommon.HexToAddress("0x3"),
|
||||||
|
StaticValue: staticValue,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
result := m.Run()
|
bitfinexV2URL := "https://api-pub.bitfinex.com/v2/"
|
||||||
os.Exit(result)
|
coingeckoV3URL := "https://api.coingecko.com/api/v3/"
|
||||||
}
|
|
||||||
|
|
||||||
func TestPriceUpdaterBitfinex(t *testing.T) {
|
|
||||||
// Init price updater
|
// Init price updater
|
||||||
pu, err := NewPriceUpdater("https://api-pub.bitfinex.com/v2/", APITypeBitFinexV2, historyDB)
|
pu, err := NewPriceUpdater(
|
||||||
|
UpdateMethodTypeCoingeckoV3,
|
||||||
|
tc,
|
||||||
|
historyDB,
|
||||||
|
bitfinexV2URL,
|
||||||
|
coingeckoV3URL,
|
||||||
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Update token list
|
// Update token list
|
||||||
assert.NoError(t, pu.UpdateTokenList())
|
require.NoError(t, pu.UpdateTokenList())
|
||||||
// Update prices
|
// Update prices
|
||||||
pu.UpdatePrices(context.Background())
|
pu.UpdatePrices(context.Background())
|
||||||
assertTokenHasPriceAndClean(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPriceUpdaterCoingecko(t *testing.T) {
|
// Check results: get tokens from DB
|
||||||
// Init price updater
|
|
||||||
pu, err := NewPriceUpdater("https://api.coingecko.com/api/v3/", APITypeCoingeckoV3, historyDB)
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Update token list
|
|
||||||
assert.NoError(t, pu.UpdateTokenList())
|
|
||||||
// Update prices
|
|
||||||
pu.UpdatePrices(context.Background())
|
|
||||||
assertTokenHasPriceAndClean(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertTokenHasPriceAndClean(t *testing.T) {
|
|
||||||
// Check that prices have been updated
|
|
||||||
fetchedTokens, err := historyDB.GetTokensTest()
|
fetchedTokens, err := historyDB.GetTokensTest()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// TokenID 0 (ETH) is always on the DB
|
// Check that tokens that are updated via API have value:
|
||||||
assert.Equal(t, 2, len(fetchedTokens))
|
// ETH
|
||||||
for _, token := range fetchedTokens {
|
require.NotNil(t, fetchedTokens[0].USDUpdate)
|
||||||
require.NotNil(t, token.USD)
|
assert.Greater(t, *fetchedTokens[0].USD, 0.0)
|
||||||
require.NotNil(t, token.USDUpdate)
|
// DAI
|
||||||
assert.Greater(t, *token.USD, 0.0)
|
require.NotNil(t, fetchedTokens[1].USDUpdate)
|
||||||
}
|
assert.Greater(t, *fetchedTokens[1].USD, 0.0)
|
||||||
|
// USDT
|
||||||
|
require.NotNil(t, fetchedTokens[2].USDUpdate)
|
||||||
|
assert.Greater(t, *fetchedTokens[2].USD, 0.0)
|
||||||
|
// UNI
|
||||||
|
require.NotNil(t, fetchedTokens[5].USDUpdate)
|
||||||
|
assert.Greater(t, *fetchedTokens[5].USD, 0.0)
|
||||||
|
// Check ignored token
|
||||||
|
assert.Equal(t, ignoreValue, *fetchedTokens[3].USD)
|
||||||
|
// Check static value
|
||||||
|
assert.Equal(t, staticValue, *fetchedTokens[4].USD)
|
||||||
}
|
}
|
||||||
|
|||||||
155
stateapiupdater/stateapiupdater.go
Normal file
155
stateapiupdater/stateapiupdater.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
package stateapiupdater
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/hermeznetwork/tracerr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Updater is an utility object to facilitate updating the StateAPI
|
||||||
|
type Updater struct {
|
||||||
|
hdb *historydb.HistoryDB
|
||||||
|
state historydb.StateAPI
|
||||||
|
config historydb.NodeConfig
|
||||||
|
vars common.SCVariablesPtr
|
||||||
|
consts historydb.Constants
|
||||||
|
rw sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpdater creates a new Updater
|
||||||
|
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
||||||
|
consts *historydb.Constants) *Updater {
|
||||||
|
u := Updater{
|
||||||
|
hdb: hdb,
|
||||||
|
config: *config,
|
||||||
|
consts: *consts,
|
||||||
|
state: historydb.StateAPI{
|
||||||
|
NodePublicConfig: historydb.NodePublicConfig{
|
||||||
|
ForgeDelay: config.ForgeDelay,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
u.SetSCVars(vars.AsPtr())
|
||||||
|
return &u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the State in the HistoryDB
|
||||||
|
func (u *Updater) Store() error {
|
||||||
|
u.rw.RLock()
|
||||||
|
defer u.rw.RUnlock()
|
||||||
|
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSCVars sets the smart contract vars (ony updates those that are not nil)
|
||||||
|
func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
|
||||||
|
u.rw.Lock()
|
||||||
|
defer u.rw.Unlock()
|
||||||
|
if vars.Rollup != nil {
|
||||||
|
u.vars.Rollup = vars.Rollup
|
||||||
|
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
|
||||||
|
u.state.Rollup = *rollupVars
|
||||||
|
}
|
||||||
|
if vars.Auction != nil {
|
||||||
|
u.vars.Auction = vars.Auction
|
||||||
|
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
|
||||||
|
u.state.Auction = *auctionVars
|
||||||
|
}
|
||||||
|
if vars.WDelayer != nil {
|
||||||
|
u.vars.WDelayer = vars.WDelayer
|
||||||
|
u.state.WithdrawalDelayer = *u.vars.WDelayer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||||
|
func (u *Updater) UpdateRecommendedFee() error {
|
||||||
|
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.RecommendedFee = *recommendedFee
|
||||||
|
u.rw.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMetrics update Status.Metrics information
|
||||||
|
func (u *Updater) UpdateMetrics() error {
|
||||||
|
u.rw.RLock()
|
||||||
|
lastBatch := u.state.Network.LastBatch
|
||||||
|
u.rw.RUnlock()
|
||||||
|
if lastBatch == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lastBatchNum := lastBatch.BatchNum
|
||||||
|
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.Metrics = *metrics
|
||||||
|
u.rw.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNetworkInfoBlock update Status.Network block related information
|
||||||
|
func (u *Updater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.Network.LastSyncBlock = lastSyncBlock.Num
|
||||||
|
u.state.Network.LastEthBlock = lastEthBlock.Num
|
||||||
|
u.rw.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNetworkInfo update Status.Network information
|
||||||
|
func (u *Updater) UpdateNetworkInfo(
|
||||||
|
lastEthBlock, lastSyncBlock common.Block,
|
||||||
|
lastBatchNum common.BatchNum, currentSlot int64,
|
||||||
|
) error {
|
||||||
|
// Get last batch in API format
|
||||||
|
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
|
||||||
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
|
lastBatch = nil
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.RLock()
|
||||||
|
auctionVars := u.vars.Auction
|
||||||
|
u.rw.RUnlock()
|
||||||
|
// Get next forgers
|
||||||
|
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
|
||||||
|
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
|
||||||
|
lastSyncBlock, currentSlot, lastClosedSlot)
|
||||||
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
|
nextForgers = nil
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
bucketUpdates = nil
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
u.rw.Lock()
|
||||||
|
// Update NodeInfo struct
|
||||||
|
for i, bucketParams := range u.state.Rollup.Buckets {
|
||||||
|
for _, bucketUpdate := range bucketUpdates {
|
||||||
|
if bucketUpdate.NumBucket == i {
|
||||||
|
bucketParams.Withdrawals = bucketUpdate.Withdrawals
|
||||||
|
u.state.Rollup.Buckets[i] = bucketParams
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u.state.Network.LastSyncBlock = lastSyncBlock.Num
|
||||||
|
u.state.Network.LastEthBlock = lastEthBlock.Num
|
||||||
|
u.state.Network.LastBatch = lastBatch
|
||||||
|
u.state.Network.CurrentSlot = currentSlot
|
||||||
|
u.state.Network.NextForgers = nextForgers
|
||||||
|
u.rw.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -183,28 +183,6 @@ type StartBlockNums struct {
|
|||||||
WDelayer int64
|
WDelayer int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// SCVariables joins all the smart contract variables in a single struct
|
|
||||||
type SCVariables struct {
|
|
||||||
Rollup common.RollupVariables `validate:"required"`
|
|
||||||
Auction common.AuctionVariables `validate:"required"`
|
|
||||||
WDelayer common.WDelayerVariables `validate:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SCVariablesPtr joins all the smart contract variables as pointers in a single
|
|
||||||
// struct
|
|
||||||
type SCVariablesPtr struct {
|
|
||||||
Rollup *common.RollupVariables `validate:"required"`
|
|
||||||
Auction *common.AuctionVariables `validate:"required"`
|
|
||||||
WDelayer *common.WDelayerVariables `validate:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SCConsts joins all the smart contract constants in a single struct
|
|
||||||
type SCConsts struct {
|
|
||||||
Rollup common.RollupConstants
|
|
||||||
Auction common.AuctionConstants
|
|
||||||
WDelayer common.WDelayerConstants
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is the Synchronizer configuration
|
// Config is the Synchronizer configuration
|
||||||
type Config struct {
|
type Config struct {
|
||||||
StatsRefreshPeriod time.Duration
|
StatsRefreshPeriod time.Duration
|
||||||
@@ -214,14 +192,14 @@ type Config struct {
|
|||||||
// Synchronizer implements the Synchronizer type
|
// Synchronizer implements the Synchronizer type
|
||||||
type Synchronizer struct {
|
type Synchronizer struct {
|
||||||
ethClient eth.ClientInterface
|
ethClient eth.ClientInterface
|
||||||
consts SCConsts
|
consts common.SCConsts
|
||||||
historyDB *historydb.HistoryDB
|
historyDB *historydb.HistoryDB
|
||||||
l2DB *l2db.L2DB
|
l2DB *l2db.L2DB
|
||||||
stateDB *statedb.StateDB
|
stateDB *statedb.StateDB
|
||||||
cfg Config
|
cfg Config
|
||||||
initVars SCVariables
|
initVars common.SCVariables
|
||||||
startBlockNum int64
|
startBlockNum int64
|
||||||
vars SCVariables
|
vars common.SCVariables
|
||||||
stats *StatsHolder
|
stats *StatsHolder
|
||||||
resetStateFailed bool
|
resetStateFailed bool
|
||||||
}
|
}
|
||||||
@@ -244,7 +222,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
|
|||||||
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
|
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
|
||||||
err))
|
err))
|
||||||
}
|
}
|
||||||
consts := SCConsts{
|
consts := common.SCConsts{
|
||||||
Rollup: *rollupConstants,
|
Rollup: *rollupConstants,
|
||||||
Auction: *auctionConstants,
|
Auction: *auctionConstants,
|
||||||
WDelayer: *wDelayerConstants,
|
WDelayer: *wDelayerConstants,
|
||||||
@@ -310,11 +288,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SCVars returns a copy of the Smart Contract Variables
|
// SCVars returns a copy of the Smart Contract Variables
|
||||||
func (s *Synchronizer) SCVars() SCVariablesPtr {
|
func (s *Synchronizer) SCVars() *common.SCVariables {
|
||||||
return SCVariablesPtr{
|
return &common.SCVariables{
|
||||||
Rollup: s.vars.Rollup.Copy(),
|
Rollup: *s.vars.Rollup.Copy(),
|
||||||
Auction: s.vars.Auction.Copy(),
|
Auction: *s.vars.Auction.Copy(),
|
||||||
WDelayer: s.vars.WDelayer.Copy(),
|
WDelayer: *s.vars.WDelayer.Copy(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -357,7 +335,7 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
|
|||||||
// We want the next block because the current one is already mined
|
// We want the next block because the current one is already mined
|
||||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||||
slotNum := s.consts.Auction.SlotNum(blockNum)
|
slotNum := s.consts.Auction.SlotNum(blockNum)
|
||||||
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
|
syncLastBlockNum := s.stats.Sync.LastBlock.Num
|
||||||
if reset {
|
if reset {
|
||||||
// Using this query only to know if there
|
// Using this query only to know if there
|
||||||
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
||||||
@@ -367,7 +345,7 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
|
|||||||
hasBatch = false
|
hasBatch = false
|
||||||
} else {
|
} else {
|
||||||
hasBatch = true
|
hasBatch = true
|
||||||
firstBatchBlockNum = dbFirstBatchBlockNum
|
syncLastBlockNum = dbFirstBatchBlockNum
|
||||||
}
|
}
|
||||||
slot.ForgerCommitment = false
|
slot.ForgerCommitment = false
|
||||||
} else if slotNum > slot.SlotNum {
|
} else if slotNum > slot.SlotNum {
|
||||||
@@ -376,16 +354,14 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
|
|||||||
}
|
}
|
||||||
slot.SlotNum = slotNum
|
slot.SlotNum = slotNum
|
||||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||||
|
if hasBatch && s.consts.Auction.RelativeBlock(syncLastBlockNum) < int64(s.vars.Auction.SlotDeadline) {
|
||||||
|
slot.ForgerCommitment = true
|
||||||
|
}
|
||||||
// If Synced, update the current coordinator
|
// If Synced, update the current coordinator
|
||||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||||
if err := s.setSlotCoordinator(slot); err != nil {
|
if err := s.setSlotCoordinator(slot); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if hasBatch &&
|
|
||||||
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
|
|
||||||
int64(s.vars.Auction.SlotDeadline) {
|
|
||||||
slot.ForgerCommitment = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Remove this SANITY CHECK once this code is tested enough
|
// TODO: Remove this SANITY CHECK once this code is tested enough
|
||||||
// BEGIN SANITY CHECK
|
// BEGIN SANITY CHECK
|
||||||
@@ -727,7 +703,7 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getInitialVariables(ethClient eth.ClientInterface,
|
func getInitialVariables(ethClient eth.ClientInterface,
|
||||||
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
|
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
|
||||||
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
|
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
|
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
|
||||||
@@ -743,7 +719,7 @@ func getInitialVariables(ethClient eth.ClientInterface,
|
|||||||
rollupVars := rollupInit.RollupVariables()
|
rollupVars := rollupInit.RollupVariables()
|
||||||
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
|
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
|
||||||
wDelayerVars := wDelayerInit.WDelayerVariables()
|
wDelayerVars := wDelayerInit.WDelayerVariables()
|
||||||
return &SCVariables{
|
return &common.SCVariables{
|
||||||
Rollup: *rollupVars,
|
Rollup: *rollupVars,
|
||||||
Auction: *auctionVars,
|
Auction: *auctionVars,
|
||||||
WDelayer: *wDelayerVars,
|
WDelayer: *wDelayerVars,
|
||||||
|
|||||||
@@ -378,9 +378,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
|
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
|
||||||
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
|
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
|
||||||
vars := s.SCVars()
|
vars := s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
|
|
||||||
dbBlocks, err := s.historyDB.GetAllBlocks()
|
dbBlocks, err := s.historyDB.GetAllBlocks()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -541,9 +541,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
|
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
|
||||||
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
|
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
|
||||||
vars = s.SCVars()
|
vars = s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
|
|
||||||
dbExits, err := s.historyDB.GetAllExits()
|
dbExits, err := s.historyDB.GetAllExits()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -673,9 +673,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, false, stats.Synced())
|
assert.Equal(t, false, stats.Synced())
|
||||||
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
|
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
|
||||||
vars = s.SCVars()
|
vars = s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
|
|
||||||
// At this point, the DB only has data up to block 1
|
// At this point, the DB only has data up to block 1
|
||||||
dbBlock, err := s.historyDB.GetLastBlock()
|
dbBlock, err := s.historyDB.GetLastBlock()
|
||||||
@@ -712,9 +712,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
vars = s.SCVars()
|
vars = s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbBlock, err = s.historyDB.GetLastBlock()
|
dbBlock, err = s.historyDB.GetLastBlock()
|
||||||
|
|||||||
@@ -212,6 +212,17 @@ PoolTransferToBJJ(1) A-C: 3 (1)
|
|||||||
// SetBlockchainMinimumFlow0 contains a set of transactions with a minimal flow
|
// SetBlockchainMinimumFlow0 contains a set of transactions with a minimal flow
|
||||||
var SetBlockchainMinimumFlow0 = `
|
var SetBlockchainMinimumFlow0 = `
|
||||||
Type: Blockchain
|
Type: Blockchain
|
||||||
|
// Idxs:
|
||||||
|
// 256: A(0)
|
||||||
|
// 257: C(1)
|
||||||
|
// 258: A(1)
|
||||||
|
// 259: B(0)
|
||||||
|
// 260: D(0)
|
||||||
|
// 261: Coord(1)
|
||||||
|
// 262: Coord(0)
|
||||||
|
// 263: B(1)
|
||||||
|
// 264: C(0)
|
||||||
|
// 265: F(0)
|
||||||
|
|
||||||
AddToken(1)
|
AddToken(1)
|
||||||
|
|
||||||
@@ -255,10 +266,11 @@ CreateAccountDeposit(0) D: 800
|
|||||||
// C(0): 0
|
// C(0): 0
|
||||||
|
|
||||||
// Coordinator creates needed accounts to receive Fees
|
// Coordinator creates needed accounts to receive Fees
|
||||||
CreateAccountCoordinator(1) Coord
|
|
||||||
CreateAccountCoordinator(0) Coord
|
|
||||||
// Coordinator creates needed 'To' accounts for the L2Txs
|
// Coordinator creates needed 'To' accounts for the L2Txs
|
||||||
|
// sorted in the way that the TxSelector creates them
|
||||||
|
CreateAccountCoordinator(1) Coord
|
||||||
CreateAccountCoordinator(1) B
|
CreateAccountCoordinator(1) B
|
||||||
|
CreateAccountCoordinator(0) Coord
|
||||||
CreateAccountCoordinator(0) C
|
CreateAccountCoordinator(0) C
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -186,7 +186,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"3844339393304253264418296322137281996442345663805792718218845145754742722151",
|
"4392049343656836675348565048374261353937130287163762821533580216441778455298",
|
||||||
bb.LocalStateDB().MT.Root().BigInt().String())
|
bb.LocalStateDB().MT.Root().BigInt().String())
|
||||||
sendProofAndCheckResp(t, zki)
|
sendProofAndCheckResp(t, zki)
|
||||||
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
@@ -215,7 +215,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"2537294203394018451170116789946369404362093672592091326351037700505720139801",
|
"8905191229562583213069132470917469035834300549892959854483573322676101624713",
|
||||||
bb.LocalStateDB().MT.Root().BigInt().String())
|
bb.LocalStateDB().MT.Root().BigInt().String())
|
||||||
sendProofAndCheckResp(t, zki)
|
sendProofAndCheckResp(t, zki)
|
||||||
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
|
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
|
||||||
@@ -242,7 +242,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"13463929859122729344499006353544877221550995454069650137270994940730475267399",
|
"20593679664586247774284790801579542411781976279024409415159440382607791042723",
|
||||||
bb.LocalStateDB().MT.Root().BigInt().String())
|
bb.LocalStateDB().MT.Root().BigInt().String())
|
||||||
sendProofAndCheckResp(t, zki)
|
sendProofAndCheckResp(t, zki)
|
||||||
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
|
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
|
||||||
@@ -264,7 +264,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
// same root as previous batch, as the L1CoordinatorTxs created by the
|
// same root as previous batch, as the L1CoordinatorTxs created by the
|
||||||
// Til set is not created by the TxSelector in this test
|
// Til set is not created by the TxSelector in this test
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"13463929859122729344499006353544877221550995454069650137270994940730475267399",
|
"20593679664586247774284790801579542411781976279024409415159440382607791042723",
|
||||||
bb.LocalStateDB().MT.Root().BigInt().String())
|
bb.LocalStateDB().MT.Root().BigInt().String())
|
||||||
sendProofAndCheckResp(t, zki)
|
sendProofAndCheckResp(t, zki)
|
||||||
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
|
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs),
|
||||||
|
|||||||
@@ -732,13 +732,13 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
||||||
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
||||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
// thisTypeould never be reached
|
// this in TypeSynchronizer should never be reached
|
||||||
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
||||||
return nil, nil, false,
|
return nil, nil, false,
|
||||||
tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
|
tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
|
||||||
}
|
}
|
||||||
// case when tx.Type== common.TxTypeTransferToEthAddr or common.TxTypeTransferToBJJ
|
// case when tx.Type == common.TxTypeTransferToEthAddr or
|
||||||
|
// common.TxTypeTransferToBJJ:
|
||||||
accSender, err := tp.s.GetAccount(tx.FromIdx)
|
accSender, err := tp.s.GetAccount(tx.FromIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, tracerr.Wrap(err)
|
return nil, nil, false, tracerr.Wrap(err)
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
// generate test transactions from test.SetBlockchain0 code
|
// generate test transactions from test.SetBlockchainMinimumFlow0 code
|
||||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(txsets.SetBlockchainMinimumFlow0)
|
blocks, err := tc.GenerateBlocks(txsets.SetBlockchainMinimumFlow0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -288,7 +288,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
"9061858435528794221929846392270405504056106238451760714188625065949729889651",
|
"9061858435528794221929846392270405504056106238451760714188625065949729889651",
|
||||||
tp.s.MT.Root().BigInt().String())
|
tp.s.MT.Root().BigInt().String())
|
||||||
|
|
||||||
coordIdxs := []common.Idx{261, 262}
|
coordIdxs := []common.Idx{261, 263}
|
||||||
log.Debug("block:0 batch:7")
|
log.Debug("block:0 batch:7")
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[6].L2Txs)
|
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[6].L2Txs)
|
||||||
@@ -303,7 +303,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
checkBalance(t, tc, sdb, "C", 0, "100")
|
checkBalance(t, tc, sdb, "C", 0, "100")
|
||||||
checkBalance(t, tc, sdb, "D", 0, "800")
|
checkBalance(t, tc, sdb, "D", 0, "800")
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"3844339393304253264418296322137281996442345663805792718218845145754742722151",
|
"4392049343656836675348565048374261353937130287163762821533580216441778455298",
|
||||||
tp.s.MT.Root().BigInt().String())
|
tp.s.MT.Root().BigInt().String())
|
||||||
|
|
||||||
log.Debug("block:0 batch:8")
|
log.Debug("block:0 batch:8")
|
||||||
@@ -321,7 +321,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
checkBalance(t, tc, sdb, "C", 1, "100")
|
checkBalance(t, tc, sdb, "C", 1, "100")
|
||||||
checkBalance(t, tc, sdb, "D", 0, "800")
|
checkBalance(t, tc, sdb, "D", 0, "800")
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"2537294203394018451170116789946369404362093672592091326351037700505720139801",
|
"8905191229562583213069132470917469035834300549892959854483573322676101624713",
|
||||||
tp.s.MT.Root().BigInt().String())
|
tp.s.MT.Root().BigInt().String())
|
||||||
|
|
||||||
coordIdxs = []common.Idx{262}
|
coordIdxs = []common.Idx{262}
|
||||||
@@ -330,8 +330,8 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
|
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
|
||||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
|
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkBalance(t, tc, sdb, "Coord", 0, "75")
|
|
||||||
checkBalance(t, tc, sdb, "Coord", 1, "30")
|
checkBalance(t, tc, sdb, "Coord", 1, "30")
|
||||||
|
checkBalance(t, tc, sdb, "Coord", 0, "35")
|
||||||
checkBalance(t, tc, sdb, "A", 0, "730")
|
checkBalance(t, tc, sdb, "A", 0, "730")
|
||||||
checkBalance(t, tc, sdb, "A", 1, "280")
|
checkBalance(t, tc, sdb, "A", 1, "280")
|
||||||
checkBalance(t, tc, sdb, "B", 0, "380")
|
checkBalance(t, tc, sdb, "B", 0, "380")
|
||||||
@@ -340,7 +340,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
checkBalance(t, tc, sdb, "C", 1, "100")
|
checkBalance(t, tc, sdb, "C", 1, "100")
|
||||||
checkBalance(t, tc, sdb, "D", 0, "470")
|
checkBalance(t, tc, sdb, "D", 0, "470")
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"13463929859122729344499006353544877221550995454069650137270994940730475267399",
|
"12063160053709941400160547588624831667157042937323422396363359123696668555050",
|
||||||
tp.s.MT.Root().BigInt().String())
|
tp.s.MT.Root().BigInt().String())
|
||||||
|
|
||||||
coordIdxs = []common.Idx{}
|
coordIdxs = []common.Idx{}
|
||||||
@@ -350,7 +350,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
"21058792089669864857092637997959333050678445584244682889041632034478049099916",
|
"20375835796927052406196249140510136992262283055544831070430919054949353249481",
|
||||||
tp.s.MT.Root().BigInt().String())
|
tp.s.MT.Root().BigInt().String())
|
||||||
|
|
||||||
// use Set of PoolL2 txs
|
// use Set of PoolL2 txs
|
||||||
@@ -359,8 +359,8 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
|
|
||||||
_, err = tp.ProcessTxs(coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
|
_, err = tp.ProcessTxs(coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkBalance(t, tc, sdb, "Coord", 0, "75")
|
|
||||||
checkBalance(t, tc, sdb, "Coord", 1, "30")
|
checkBalance(t, tc, sdb, "Coord", 1, "30")
|
||||||
|
checkBalance(t, tc, sdb, "Coord", 0, "35")
|
||||||
checkBalance(t, tc, sdb, "A", 0, "510")
|
checkBalance(t, tc, sdb, "A", 0, "510")
|
||||||
checkBalance(t, tc, sdb, "A", 1, "170")
|
checkBalance(t, tc, sdb, "A", 1, "170")
|
||||||
checkBalance(t, tc, sdb, "B", 0, "480")
|
checkBalance(t, tc, sdb, "B", 0, "480")
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -148,17 +148,37 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
discardedL2Txs, tracerr.Wrap(err)
|
discardedL2Txs, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getL1L2TxSelection returns the selection of L1 + L2 txs.
|
||||||
|
// It returns: the CoordinatorIdxs used to receive the fees of the selected
|
||||||
|
// L2Txs. An array of bytearrays with the signatures of the
|
||||||
|
// AccountCreationAuthorization of the accounts of the users created by the
|
||||||
|
// Coordinator with L1CoordinatorTxs of those accounts that does not exist yet
|
||||||
|
// but there is a transactions to them and the authorization of account
|
||||||
|
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||||
|
// included in the next batch.
|
||||||
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
||||||
// current version is implemented in order to have a functional
|
// current version is implemented in order to have a functional
|
||||||
// implementation that can be used asap.
|
// implementation that can be used ASAP.
|
||||||
//
|
|
||||||
// WIP.1: this method uses a 'cherry-pick' of internal calls of the
|
// Steps of this method:
|
||||||
// StateDB, a refactor of the StateDB to reorganize it internally is
|
// - getPendingTxs
|
||||||
// planned once the main functionallities are covered, with that
|
// - ProcessL1Txs
|
||||||
// refactor the TxSelector will be updated also.
|
// - getProfitable (sort by fee & nonce)
|
||||||
|
// - loop over l2Txs
|
||||||
|
// - Fill tx.TokenID tx.Nonce
|
||||||
|
// - Check enough Balance on sender
|
||||||
|
// - Check Nonce
|
||||||
|
// - Create CoordAccount L1CoordTx for TokenID if needed
|
||||||
|
// - & ProcessL1Tx of L1CoordTx
|
||||||
|
// - Check validity of receiver Account for ToEthAddr / ToBJJ
|
||||||
|
// - Create UserAccount L1CoordTx if needed (and possible)
|
||||||
|
// - If everything is fine, store l2Tx to validTxs & update NoncesMap
|
||||||
|
// - Prepare coordIdxsMap & AccumulatedFees
|
||||||
|
// - Distribute AccumulatedFees to CoordIdxs
|
||||||
|
// - MakeCheckpoint
|
||||||
|
|
||||||
// get pending l2-tx from tx-pool
|
// get pending l2-tx from tx-pool
|
||||||
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
|
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
|
||||||
@@ -185,25 +205,62 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
// Sort l2TxsRaw (cropping at MaxTx at this point).
|
// Sort l2TxsRaw (cropping at MaxTx at this point).
|
||||||
// discardedL2Txs contains an array of the L2Txs that have not been
|
// discardedL2Txs contains an array of the L2Txs that have not been
|
||||||
// selected in this Batch.
|
// selected in this Batch.
|
||||||
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx)
|
l2Txs, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx-uint32(len(l1UserTxs)))
|
||||||
for i := range discardedL2Txs {
|
for i := range discardedL2Txs {
|
||||||
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee (does not fit inside the profitable set)"
|
discardedL2Txs[i].Info =
|
||||||
|
"Tx not selected due to low absolute fee (does not fit inside the profitable set)"
|
||||||
}
|
}
|
||||||
|
|
||||||
noncesMap := make(map[common.Idx]common.Nonce)
|
var validTxs []common.PoolL2Tx
|
||||||
var l2Txs []common.PoolL2Tx
|
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
||||||
// iterate over l2Txs
|
// Iterate over l2Txs
|
||||||
// - if tx.TokenID does not exist at CoordsIdxDB
|
// - check Nonces
|
||||||
// - create new L1CoordinatorTx creating a CoordAccount, for
|
// - check enough Balance for the Amount+Fee
|
||||||
// Coordinator to receive the fee of the new TokenID
|
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
|
||||||
for i := 0; i < len(l2Txs0); i++ {
|
// - keep used accAuths
|
||||||
accSender, err := tp.StateDB().GetAccount(l2Txs0[i].FromIdx)
|
// - put the valid txs into validTxs array
|
||||||
|
for i := 0; i < len(l2Txs); i++ {
|
||||||
|
// Check if there is space for more L2Txs in the selection
|
||||||
|
maxL2Txs := int(selectionConfig.MaxTx) -
|
||||||
|
len(l1UserTxs) - len(l1CoordinatorTxs)
|
||||||
|
if len(validTxs) >= maxL2Txs {
|
||||||
|
// no more available slots for L2Txs
|
||||||
|
l2Txs[i].Info =
|
||||||
|
"Tx not selected due not available slots for L2Txs"
|
||||||
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// get Nonce & TokenID from the Account by l2Tx.FromIdx
|
||||||
|
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
l2Txs0[i].TokenID = accSender.TokenID
|
l2Txs[i].TokenID = accSender.TokenID
|
||||||
// populate the noncesMap used at the next iteration
|
|
||||||
noncesMap[l2Txs0[i].FromIdx] = accSender.Nonce
|
// Check enough Balance on sender
|
||||||
|
enoughBalance, balance, feeAndAmount := tp.CheckEnoughBalance(l2Txs[i])
|
||||||
|
if !enoughBalance {
|
||||||
|
// not valid Amount with current Balance. Discard L2Tx,
|
||||||
|
// and update Info parameter of the tx, and add it to
|
||||||
|
// the discardedTxs array
|
||||||
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
|
||||||
|
"Current sender account Balance: %s, Amount+Fee: %s",
|
||||||
|
balance.String(), feeAndAmount.String())
|
||||||
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if Nonce is correct
|
||||||
|
if l2Txs[i].Nonce != accSender.Nonce {
|
||||||
|
// not valid Nonce at tx. Discard L2Tx, and update Info
|
||||||
|
// parameter of the tx, and add it to the discardedTxs
|
||||||
|
// array
|
||||||
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
|
||||||
|
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, accSender.Nonce)
|
||||||
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// if TokenID does not exist yet, create new L1CoordinatorTx to
|
// if TokenID does not exist yet, create new L1CoordinatorTx to
|
||||||
// create the CoordinatorAccount for that TokenID, to receive
|
// create the CoordinatorAccount for that TokenID, to receive
|
||||||
@@ -218,52 +275,27 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if newL1CoordTx != nil {
|
if newL1CoordTx != nil {
|
||||||
// if there is no space for the L1CoordinatorTx, discard the L2Tx
|
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
|
||||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) {
|
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
|
||||||
|
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) ||
|
||||||
|
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) {
|
||||||
// discard L2Tx, and update Info parameter of
|
// discard L2Tx, and update Info parameter of
|
||||||
// the tx, and add it to the discardedTxs array
|
// the tx, and add it to the discardedTxs array
|
||||||
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
|
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
|
||||||
"L1CoordinatorTx and there is not enough space for L1Coordinator"
|
"L1CoordinatorTx and there is not enough space for L1Coordinator"
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// increase positionL1
|
// increase positionL1
|
||||||
positionL1++
|
positionL1++
|
||||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *newL1CoordTx)
|
l1CoordinatorTxs = append(l1CoordinatorTxs, *newL1CoordTx)
|
||||||
accAuths = append(accAuths, txsel.coordAccount.AccountCreationAuth)
|
accAuths = append(accAuths, txsel.coordAccount.AccountCreationAuth)
|
||||||
}
|
|
||||||
l2Txs = append(l2Txs, l2Txs0[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
var validTxs []common.PoolL2Tx
|
// process the L1CoordTx
|
||||||
// iterate over l2TxsRaw
|
_, _, _, _, err := tp.ProcessL1Tx(nil, newL1CoordTx)
|
||||||
// - check Nonces
|
if err != nil {
|
||||||
// - check enough Balance for the Amount+Fee
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
// - if needed, create new L1CoordinatorTxs for unexisting ToIdx
|
|
||||||
// - keep used accAuths
|
|
||||||
// - put the valid txs into validTxs array
|
|
||||||
for i := 0; i < len(l2Txs); i++ {
|
|
||||||
enoughBalance, balance, feeAndAmount := tp.CheckEnoughBalance(l2Txs[i])
|
|
||||||
if !enoughBalance {
|
|
||||||
// not valid Amount with current Balance. Discard L2Tx,
|
|
||||||
// and update Info parameter of the tx, and add it to
|
|
||||||
// the discardedTxs array
|
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
|
|
||||||
"Current sender account Balance: %s, Amount+Fee: %s",
|
|
||||||
balance.String(), feeAndAmount.String())
|
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
// check if Nonce is correct
|
|
||||||
nonce := noncesMap[l2Txs[i].FromIdx]
|
|
||||||
if l2Txs[i].Nonce != nonce {
|
|
||||||
// not valid Nonce at tx. Discard L2Tx, and update Info
|
|
||||||
// parameter of the tx, and add it to the discardedTxs
|
|
||||||
// array
|
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
|
|
||||||
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
|
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB,
|
// If tx.ToIdx>=256, tx.ToIdx should exist to localAccountsDB,
|
||||||
@@ -287,7 +319,19 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if l1CoordinatorTx != nil {
|
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
|
||||||
|
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
|
||||||
|
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) ||
|
||||||
|
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) {
|
||||||
|
// discard L2Tx, and update Info parameter of
|
||||||
|
// the tx, and add it to the discardedTxs array
|
||||||
|
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
|
||||||
|
"L1CoordinatorTx and there is not enough space for L1Coordinator"
|
||||||
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if l1CoordinatorTx != nil && validL2Tx != nil {
|
||||||
// If ToEthAddr == 0xff.. this means that we
|
// If ToEthAddr == 0xff.. this means that we
|
||||||
// are handling a TransferToBJJ, which doesn't
|
// are handling a TransferToBJJ, which doesn't
|
||||||
// require an authorization because it doesn't
|
// require an authorization because it doesn't
|
||||||
@@ -303,9 +347,16 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
||||||
positionL1++
|
positionL1++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// process the L1CoordTx
|
||||||
|
_, _, _, _, err := tp.ProcessL1Tx(nil, l1CoordinatorTx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if validL2Tx != nil {
|
}
|
||||||
validTxs = append(validTxs, *validL2Tx)
|
if validL2Tx == nil {
|
||||||
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
} else if l2Txs[i].ToIdx >= common.IdxUserThreshold {
|
} else if l2Txs[i].ToIdx >= common.IdxUserThreshold {
|
||||||
receiverAcc, err := txsel.localAccountsDB.GetAccount(l2Txs[i].ToIdx)
|
receiverAcc, err := txsel.localAccountsDB.GetAccount(l2Txs[i].ToIdx)
|
||||||
@@ -352,24 +403,42 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account found in the DB, include the l2Tx in the selection
|
|
||||||
validTxs = append(validTxs, l2Txs[i])
|
|
||||||
} else if l2Txs[i].ToIdx == common.Idx(1) {
|
|
||||||
// valid txs (of Exit type)
|
|
||||||
validTxs = append(validTxs, l2Txs[i])
|
|
||||||
}
|
|
||||||
noncesMap[l2Txs[i].FromIdx]++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process L1CoordinatorTxs
|
// get CoordIdxsMap for the TokenID of the current l2Txs[i]
|
||||||
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
// get TokenID from tx.Sender account
|
||||||
_, _, _, _, err := tp.ProcessL1Tx(nil, &l1CoordinatorTxs[i])
|
tokenID := accSender.TokenID
|
||||||
|
coordIdx, err := txsel.getCoordIdx(tokenID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
// if err is db.ErrNotFound, should not happen, as all
|
||||||
|
// the validTxs.TokenID should have a CoordinatorIdx
|
||||||
|
// created in the DB at this point
|
||||||
|
return nil, nil, nil, nil, nil, nil,
|
||||||
|
tracerr.Wrap(fmt.Errorf("Could not get CoordIdx for TokenID=%d, "+
|
||||||
|
"due: %s", tokenID, err))
|
||||||
}
|
}
|
||||||
|
// prepare temp coordIdxsMap & AccumulatedFees for the call to
|
||||||
|
// ProcessL2Tx
|
||||||
|
coordIdxsMap := map[common.TokenID]common.Idx{tokenID: coordIdx}
|
||||||
|
// tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
||||||
|
if _, ok := tp.AccumulatedFees[coordIdx]; !ok {
|
||||||
|
tp.AccumulatedFees[coordIdx] = big.NewInt(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &l2Txs[i])
|
||||||
|
if err != nil {
|
||||||
|
log.Debugw("txselector.getL1L2TxSelection at ProcessL2Tx", "err", err)
|
||||||
|
// Discard L2Tx, and update Info parameter of the tx,
|
||||||
|
// and add it to the discardedTxs array
|
||||||
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s",
|
||||||
|
err.Error())
|
||||||
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
validTxs = append(validTxs, l2Txs[i])
|
||||||
|
} // after this loop, no checks to discard txs should be done
|
||||||
|
|
||||||
// get CoordIdxsMap for the TokenIDs
|
// get CoordIdxsMap for the TokenIDs
|
||||||
coordIdxsMap := make(map[common.TokenID]common.Idx)
|
coordIdxsMap := make(map[common.TokenID]common.Idx)
|
||||||
for i := 0; i < len(validTxs); i++ {
|
for i := 0; i < len(validTxs); i++ {
|
||||||
@@ -391,9 +460,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
}
|
}
|
||||||
|
|
||||||
var coordIdxs []common.Idx
|
var coordIdxs []common.Idx
|
||||||
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
|
|
||||||
for _, idx := range coordIdxsMap {
|
for _, idx := range coordIdxsMap {
|
||||||
tp.AccumulatedFees[idx] = big.NewInt(0)
|
|
||||||
coordIdxs = append(coordIdxs, idx)
|
coordIdxs = append(coordIdxs, idx)
|
||||||
}
|
}
|
||||||
// sort CoordIdxs
|
// sort CoordIdxs
|
||||||
@@ -401,34 +468,6 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
return coordIdxs[i] < coordIdxs[j]
|
return coordIdxs[i] < coordIdxs[j]
|
||||||
})
|
})
|
||||||
|
|
||||||
// get most profitable L2-tx
|
|
||||||
maxL2Txs := int(selectionConfig.MaxTx) -
|
|
||||||
len(l1UserTxs) - len(l1CoordinatorTxs)
|
|
||||||
|
|
||||||
selectedL2Txs := validTxs
|
|
||||||
if len(validTxs) > maxL2Txs {
|
|
||||||
selectedL2Txs = selectedL2Txs[:maxL2Txs]
|
|
||||||
}
|
|
||||||
var finalL2Txs []common.PoolL2Tx
|
|
||||||
for i := 0; i < len(selectedL2Txs); i++ {
|
|
||||||
_, _, _, err = tp.ProcessL2Tx(coordIdxsMap, nil, nil, &selectedL2Txs[i])
|
|
||||||
if err != nil {
|
|
||||||
// the error can be due not valid tx data, or due other
|
|
||||||
// cases (such as StateDB error). At this initial
|
|
||||||
// version of the TxSelector, we discard the L2Tx and
|
|
||||||
// log the error, assuming that this will be iterated in
|
|
||||||
// a near future.
|
|
||||||
log.Error(err)
|
|
||||||
// Discard L2Tx, and update Info parameter of the tx,
|
|
||||||
// and add it to the discardedTxs array
|
|
||||||
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
|
|
||||||
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
|
|
||||||
noncesMap[selectedL2Txs[i].FromIdx]--
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
finalL2Txs = append(finalL2Txs, selectedL2Txs[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// distribute the AccumulatedFees from the processed L2Txs into the
|
// distribute the AccumulatedFees from the processed L2Txs into the
|
||||||
// Coordinator Idxs
|
// Coordinator Idxs
|
||||||
for idx, accumulatedFee := range tp.AccumulatedFees {
|
for idx, accumulatedFee := range tp.AccumulatedFees {
|
||||||
@@ -457,10 +496,11 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
|
|
||||||
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
||||||
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||||
metricSelectedL2Txs.Set(float64(len(finalL2Txs)))
|
metricSelectedL2Txs.Set(float64(len(validTxs)))
|
||||||
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||||
|
|
||||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
|
// return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
||||||
|
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
|
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
|
||||||
@@ -572,7 +612,10 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
|||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1UserTxs {
|
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
|
||||||
|
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
|
||||||
|
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1UserTxs ||
|
||||||
|
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1UserTxs {
|
||||||
// L2Tx discarded
|
// L2Tx discarded
|
||||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
|
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
|
||||||
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
|
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
|
||||||
|
|||||||
@@ -276,22 +276,23 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
assert.True(t, l2TxsFromDB[0].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[0].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
||||||
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
|
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
||||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[1])
|
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[2])
|
||||||
assert.Equal(t, accAuthSig0, accAuths[2])
|
assert.Equal(t, accAuthSig0, accAuths[1])
|
||||||
assert.Equal(t, accAuthSig1, accAuths[3])
|
assert.Equal(t, accAuthSig1, accAuths[3])
|
||||||
assert.Equal(t, 1, len(oL1UserTxs))
|
assert.Equal(t, 1, len(oL1UserTxs))
|
||||||
assert.Equal(t, 4, len(oL1CoordTxs))
|
assert.Equal(t, 4, len(oL1CoordTxs))
|
||||||
assert.Equal(t, 2, len(oL2Txs))
|
assert.Equal(t, 2, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||||
assert.Equal(t, common.BatchNum(7), txsel.localAccountsDB.CurrentBatch())
|
assert.Equal(t, common.BatchNum(7), txsel.localAccountsDB.CurrentBatch())
|
||||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||||
checkBalanceByIdx(t, txsel, 261, "20") // CoordIdx for TokenID=1
|
checkBalance(t, tc, txsel, "Coord", 1, "20") // CoordIdx for TokenID=1
|
||||||
checkBalanceByIdx(t, txsel, 262, "10") // CoordIdx for TokenID=0
|
checkBalance(t, tc, txsel, "Coord", 0, "10") // CoordIdx for TokenID=1
|
||||||
checkBalance(t, tc, txsel, "A", 0, "600")
|
checkBalance(t, tc, txsel, "A", 0, "600")
|
||||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||||
checkBalance(t, tc, txsel, "B", 0, "290")
|
checkBalance(t, tc, txsel, "B", 0, "290")
|
||||||
@@ -324,19 +325,20 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
assert.True(t, l2TxsFromDB[2].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[2].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||||
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []common.Idx{261, 262}, coordIdxs)
|
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||||
assert.Equal(t, 0, len(accAuths))
|
assert.Equal(t, 0, len(accAuths))
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
assert.Equal(t, 4, len(oL2Txs))
|
assert.Equal(t, 4, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
assert.Equal(t, len(oL1CoordTxs), len(accAuths))
|
||||||
assert.Equal(t, common.BatchNum(8), txsel.localAccountsDB.CurrentBatch())
|
assert.Equal(t, common.BatchNum(8), txsel.localAccountsDB.CurrentBatch())
|
||||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||||
checkBalanceByIdx(t, txsel, 261, "30")
|
checkBalance(t, tc, txsel, "Coord", 1, "30") // CoordIdx for TokenID=1
|
||||||
checkBalanceByIdx(t, txsel, 262, "35")
|
checkBalance(t, tc, txsel, "Coord", 0, "35") // CoordIdx for TokenID=1
|
||||||
checkBalance(t, tc, txsel, "A", 0, "430")
|
checkBalance(t, tc, txsel, "A", 0, "430")
|
||||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||||
checkBalance(t, tc, txsel, "B", 0, "390")
|
checkBalance(t, tc, txsel, "B", 0, "390")
|
||||||
@@ -370,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []common.Idx{262}, coordIdxs)
|
assert.Equal(t, []common.Idx{263}, coordIdxs)
|
||||||
assert.Equal(t, 0, len(accAuths))
|
assert.Equal(t, 0, len(accAuths))
|
||||||
assert.Equal(t, 4, len(oL1UserTxs))
|
assert.Equal(t, 4, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -379,7 +381,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
assert.Equal(t, common.BatchNum(9), txsel.localAccountsDB.CurrentBatch())
|
assert.Equal(t, common.BatchNum(9), txsel.localAccountsDB.CurrentBatch())
|
||||||
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
assert.Equal(t, common.Idx(264), txsel.localAccountsDB.CurrentIdx())
|
||||||
checkBalanceByIdx(t, txsel, 261, "30")
|
checkBalanceByIdx(t, txsel, 261, "30")
|
||||||
checkBalanceByIdx(t, txsel, 262, "75")
|
checkBalanceByIdx(t, txsel, 263, "75")
|
||||||
checkBalance(t, tc, txsel, "A", 0, "730")
|
checkBalance(t, tc, txsel, "A", 0, "730")
|
||||||
checkBalance(t, tc, txsel, "A", 1, "280")
|
checkBalance(t, tc, txsel, "A", 1, "280")
|
||||||
checkBalance(t, tc, txsel, "B", 0, "380")
|
checkBalance(t, tc, txsel, "B", 0, "380")
|
||||||
@@ -580,7 +582,6 @@ func TestTransferToBjj(t *testing.T) {
|
|||||||
require.Equal(t, 1, len(oL1CoordTxs))
|
require.Equal(t, 1, len(oL1CoordTxs))
|
||||||
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
|
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
|
||||||
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
|
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
|
||||||
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
|
|
||||||
assert.Equal(t, 1, len(oL2Txs))
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
assert.Equal(t, 0, len(discardedL2Txs))
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
@@ -712,7 +713,8 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
|||||||
|
|
||||||
// add the PoolL2Txs to the l2DB
|
// add the PoolL2Txs to the l2DB
|
||||||
addL2Txs(t, txsel, poolL2Txs)
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx
|
||||||
|
// transfers from account A
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||||
@@ -720,7 +722,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
|||||||
assert.Equal(t, 3, len(oL1UserTxs))
|
assert.Equal(t, 3, len(oL1UserTxs))
|
||||||
require.Equal(t, 0, len(oL1CoordTxs))
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
assert.Equal(t, 7, len(oL2Txs))
|
assert.Equal(t, 7, len(oL2Txs))
|
||||||
assert.Equal(t, 1, len(discardedL2Txs))
|
assert.Equal(t, 4, len(discardedL2Txs))
|
||||||
|
|
||||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
txsel.localAccountsDB.CurrentBatch())
|
txsel.localAccountsDB.CurrentBatch())
|
||||||
@@ -803,8 +805,8 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 3, len(oL1UserTxs))
|
require.Equal(t, 3, len(oL1UserTxs))
|
||||||
require.Equal(t, 0, len(oL1CoordTxs))
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
require.Equal(t, 2, len(oL2Txs))
|
require.Equal(t, 0, len(oL2Txs))
|
||||||
require.Equal(t, 8, len(discardedL2Txs))
|
require.Equal(t, 10, len(discardedL2Txs))
|
||||||
require.Equal(t, 0, len(accAuths))
|
require.Equal(t, 0, len(accAuths))
|
||||||
|
|
||||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
@@ -819,7 +821,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, 0, len(oL1UserTxs))
|
require.Equal(t, 0, len(oL1UserTxs))
|
||||||
require.Equal(t, 3, len(oL1CoordTxs))
|
require.Equal(t, 3, len(oL1CoordTxs))
|
||||||
require.Equal(t, 6, len(oL2Txs))
|
require.Equal(t, 8, len(oL2Txs))
|
||||||
require.Equal(t, 2, len(discardedL2Txs))
|
require.Equal(t, 2, len(discardedL2Txs))
|
||||||
require.Equal(t, 3, len(accAuths))
|
require.Equal(t, 3, len(accAuths))
|
||||||
|
|
||||||
@@ -843,3 +845,79 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
txsel.localAccountsDB.CurrentBatch())
|
txsel.localAccountsDB.CurrentBatch())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessL2Selection(t *testing.T) {
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) Coord: 0
|
||||||
|
CreateAccountDeposit(0) A: 18
|
||||||
|
CreateAccountDeposit(0) B: 0
|
||||||
|
|
||||||
|
> batchL1 // freeze L1User{3}
|
||||||
|
> batchL1 // forge L1User{3}
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
|
||||||
|
chainID := uint16(0)
|
||||||
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||||
|
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||||
|
|
||||||
|
// restart nonces of TilContext, as will be set by generating directly
|
||||||
|
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||||
|
tc.RestartNonces()
|
||||||
|
|
||||||
|
tpc := txprocessor.Config{
|
||||||
|
NLevels: 16,
|
||||||
|
MaxFeeTx: 10,
|
||||||
|
MaxTx: 10,
|
||||||
|
MaxL1Tx: 10,
|
||||||
|
ChainID: chainID,
|
||||||
|
}
|
||||||
|
// batch1 to freeze L1UserTxs
|
||||||
|
l1UserTxs := []common.L1Tx{}
|
||||||
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// 8 transfers from the same account
|
||||||
|
batchPoolL2 := `
|
||||||
|
Type: PoolL2
|
||||||
|
PoolTransfer(0) A-B: 10 (126)
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // not enough funds
|
||||||
|
PoolTransfer(0) A-B: 5 (126) // enough funds
|
||||||
|
`
|
||||||
|
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 3, len(poolL2Txs))
|
||||||
|
|
||||||
|
// add the PoolL2Txs to the l2DB
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||||
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 3, len(oL1UserTxs))
|
||||||
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
|
// only 1st L2Tx should be accepted, as:
|
||||||
|
// - 2nd will not be selected as has not enough funds
|
||||||
|
// - 3rd will not be selected as has Nonce=2, and the account Nonce==1
|
||||||
|
// (due the 2nd txs not being accepted)
|
||||||
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
|
assert.Equal(t, 2, len(discardedL2Txs))
|
||||||
|
assert.Equal(t, common.Nonce(0), oL2Txs[0].Nonce)
|
||||||
|
assert.Equal(t, common.Nonce(1), discardedL2Txs[0].Nonce)
|
||||||
|
assert.Equal(t, common.Nonce(2), discardedL2Txs[1].Nonce)
|
||||||
|
assert.Equal(t, "Tx not selected due to not enough Balance at the sender. "+
|
||||||
|
"Current sender account Balance: 7, Amount+Fee: 11", discardedL2Txs[0].Info)
|
||||||
|
assert.Equal(t, "Tx not selected due to not current Nonce. Tx.Nonce: 2, "+
|
||||||
|
"Account.Nonce: 1", discardedL2Txs[1].Info)
|
||||||
|
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
|
txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user