mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
17 Commits
feature/fa
...
feature/ge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0511acd5e6 | ||
|
|
8d087e2727 | ||
|
|
22ffd93292 | ||
|
|
b565c9da1a | ||
|
|
7901705dfd | ||
|
|
3a706e7775 | ||
|
|
c84b3a4d0f | ||
|
|
3f643f022a | ||
|
|
b8d339d568 | ||
|
|
6c1c157bc3 | ||
|
|
f9ddf88c93 | ||
|
|
a1eea43443 | ||
|
|
2125812e90 | ||
|
|
f07fd82822 | ||
|
|
d465d51e78 | ||
|
|
e23d0a07d2 | ||
|
|
88b17cbe99 |
29
.github/workflows/release.yml
vendored
Normal file
29
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: goreleaser
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.16
|
||||||
|
-
|
||||||
|
name: Run GoReleaser
|
||||||
|
uses: goreleaser/goreleaser-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
args: release --rm-dist
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1 +1,2 @@
|
|||||||
bin/
|
bin/
|
||||||
|
dist/
|
||||||
|
|||||||
35
.goreleaser.yml
Normal file
35
.goreleaser.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod download
|
||||||
|
- make migration-pack
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- main: ./cli/node/main.go
|
||||||
|
binary: node
|
||||||
|
id: node
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
|
||||||
|
archives:
|
||||||
|
- replacements:
|
||||||
|
darwin: Darwin
|
||||||
|
linux: Linux
|
||||||
|
windows: Windows
|
||||||
|
386: i386
|
||||||
|
amd64: x86_64
|
||||||
|
|
||||||
|
checksum:
|
||||||
|
name_template: 'checksums.txt'
|
||||||
|
|
||||||
|
snapshot:
|
||||||
|
name_template: "{{ .Tag }}-next"
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
sort: asc
|
||||||
|
filters:
|
||||||
|
exclude:
|
||||||
|
- '^docs:'
|
||||||
|
- '^test:'
|
||||||
12
Makefile
12
Makefile
@@ -3,8 +3,8 @@
|
|||||||
# Project variables.
|
# Project variables.
|
||||||
PACKAGE := github.com/hermeznetwork/hermez-node
|
PACKAGE := github.com/hermeznetwork/hermez-node
|
||||||
VERSION := $(shell git describe --tags --always)
|
VERSION := $(shell git describe --tags --always)
|
||||||
BUILD := $(shell git rev-parse --short HEAD)
|
COMMIT := $(shell git rev-parse --short HEAD)
|
||||||
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
||||||
PROJECT_NAME := $(shell basename "$(PWD)")
|
PROJECT_NAME := $(shell basename "$(PWD)")
|
||||||
|
|
||||||
# Go related variables.
|
# Go related variables.
|
||||||
@@ -23,7 +23,7 @@ CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
|
|||||||
POSTGRES_PASS ?= yourpasswordhere
|
POSTGRES_PASS ?= yourpasswordhere
|
||||||
|
|
||||||
# Use linker flags to provide version/build settings.
|
# Use linker flags to provide version/build settings.
|
||||||
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
|
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE)"
|
||||||
|
|
||||||
# PID file will keep the process id of the server.
|
# PID file will keep the process id of the server.
|
||||||
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
|
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
|
||||||
@@ -94,11 +94,11 @@ install:
|
|||||||
@echo " > Checking if there is any missing dependencies..."
|
@echo " > Checking if there is any missing dependencies..."
|
||||||
$(GOENVVARS) go get $(GOCMD)/... $(get)
|
$(GOENVVARS) go get $(GOCMD)/... $(get)
|
||||||
|
|
||||||
## run: Run Hermez node.
|
## run-node: Run Hermez node.
|
||||||
run:
|
run-node:
|
||||||
@bash -c "$(MAKE) clean build"
|
@bash -c "$(MAKE) clean build"
|
||||||
@echo " > Running $(PROJECT_NAME)"
|
@echo " > Running $(PROJECT_NAME)"
|
||||||
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
|
@$(GOBIN)/$(GOBINARY) run --mode $(MODE) --cfg $(CONFIG)
|
||||||
|
|
||||||
## run-proof-mock: Run proof server mock API.
|
## run-proof-mock: Run proof server mock API.
|
||||||
run-proof-mock: stop-proof-mock
|
run-proof-mock: stop-proof-mock
|
||||||
|
|||||||
@@ -25,13 +25,13 @@ there are more information about the config file into [cli/node/README.md](cli/n
|
|||||||
After setting the config, you can build and run the Hermez Node as a synchronizer:
|
After setting the config, you can build and run the Hermez Node as a synchronizer:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ make run
|
$ make run-node
|
||||||
```
|
```
|
||||||
|
|
||||||
Or build and run as a coordinator, and also passing the config file from other location:
|
Or build and run as a coordinator, and also passing the config file from other location:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
|
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run-node
|
||||||
```
|
```
|
||||||
|
|
||||||
To check the useful make commands:
|
To check the useful make commands:
|
||||||
|
|||||||
@@ -60,6 +60,7 @@ func NewAPI(
|
|||||||
// Transaction
|
// Transaction
|
||||||
v1.POST("/transactions-pool", a.postPoolTx)
|
v1.POST("/transactions-pool", a.postPoolTx)
|
||||||
v1.GET("/transactions-pool/:id", a.getPoolTx)
|
v1.GET("/transactions-pool/:id", a.getPoolTx)
|
||||||
|
v1.GET("/transactions-pool", a.getPoolTxs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add explorer endpoints
|
// Add explorer endpoints
|
||||||
|
|||||||
@@ -522,11 +522,16 @@ func TestMain(m *testing.M) {
|
|||||||
WithdrawalDelay: uint64(3000),
|
WithdrawalDelay: uint64(3000),
|
||||||
}
|
}
|
||||||
|
|
||||||
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
||||||
Rollup: rollupVars,
|
Rollup: rollupVars,
|
||||||
Auction: auctionVars,
|
Auction: auctionVars,
|
||||||
WDelayer: wdelayerVars,
|
WDelayer: wdelayerVars,
|
||||||
}, constants)
|
}, constants, &stateapiupdater.RecommendedFeePolicy{
|
||||||
|
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Generate test data, as expected to be received/sended from/to the API
|
// Generate test data, as expected to be received/sended from/to the API
|
||||||
testCoords := genTestCoordinators(commonCoords)
|
testCoords := genTestCoordinators(commonCoords)
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ func (a *API) getFullBatch(c *gin.Context) {
|
|||||||
// Fetch txs forged in the batch from historyDB
|
// Fetch txs forged in the batch from historyDB
|
||||||
maxTxsPerBatch := uint(2048) //nolint:gomnd
|
maxTxsPerBatch := uint(2048) //nolint:gomnd
|
||||||
txs, _, err := a.h.GetTxsAPI(
|
txs, _, err := a.h.GetTxsAPI(
|
||||||
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
|
nil, nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
|
||||||
)
|
)
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
|
|||||||
@@ -96,6 +96,32 @@ func parseQueryBJJ(c querier) (*babyjub.PublicKeyComp, error) {
|
|||||||
return hezStringToBJJ(bjjStr, name)
|
return hezStringToBJJ(bjjStr, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseQueryPoolL2TxState(c querier) (*common.PoolL2TxState, error) {
|
||||||
|
const name = "state"
|
||||||
|
stateStr := c.Query(name)
|
||||||
|
if stateStr == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
switch common.PoolL2TxState(stateStr) {
|
||||||
|
case common.PoolL2TxStatePending:
|
||||||
|
ret := common.PoolL2TxStatePending
|
||||||
|
return &ret, nil
|
||||||
|
case common.PoolL2TxStateForged:
|
||||||
|
ret := common.PoolL2TxStateForged
|
||||||
|
return &ret, nil
|
||||||
|
case common.PoolL2TxStateForging:
|
||||||
|
ret := common.PoolL2TxStateForging
|
||||||
|
return &ret, nil
|
||||||
|
case common.PoolL2TxStateInvalid:
|
||||||
|
ret := common.PoolL2TxStateInvalid
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
|
"invalid %s, %s is not a valid option. Check the valid options in the docmentation",
|
||||||
|
name, stateStr,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
func parseQueryTxType(c querier) (*common.TxType, error) {
|
func parseQueryTxType(c querier) (*common.TxType, error) {
|
||||||
const name = "type"
|
const name = "type"
|
||||||
typeStr := c.Query(name)
|
typeStr := c.Query(name)
|
||||||
@@ -146,6 +172,18 @@ func parseIdx(c querier) (*common.Idx, error) {
|
|||||||
return stringToIdx(idxStr, name)
|
return stringToIdx(idxStr, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseFromIdx(c querier) (*common.Idx, error) {
|
||||||
|
const name = "fromAccountIndex"
|
||||||
|
idxStr := c.Query(name)
|
||||||
|
return stringToIdx(idxStr, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseToIdx(c querier) (*common.Idx, error) {
|
||||||
|
const name = "toAccountIndex"
|
||||||
|
idxStr := c.Query(name)
|
||||||
|
return stringToIdx(idxStr, name)
|
||||||
|
}
|
||||||
|
|
||||||
func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.PublicKeyComp, *common.Idx, error) {
|
func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.PublicKeyComp, *common.Idx, error) {
|
||||||
// TokenID
|
// TokenID
|
||||||
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
|
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
|
||||||
@@ -181,6 +219,47 @@ func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.
|
|||||||
return tokenID, addr, bjj, idx, nil
|
return tokenID, addr, bjj, idx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseTxsHistoryFilters(c querier) (*common.TokenID, *ethCommon.Address,
|
||||||
|
*babyjub.PublicKeyComp, *common.Idx, *common.Idx, error) {
|
||||||
|
// TokenID
|
||||||
|
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
var tokenID *common.TokenID
|
||||||
|
if tid != nil {
|
||||||
|
tokenID = new(common.TokenID)
|
||||||
|
*tokenID = common.TokenID(*tid)
|
||||||
|
}
|
||||||
|
// Hez Eth addr
|
||||||
|
addr, err := parseQueryHezEthAddr(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// BJJ
|
||||||
|
bjj, err := parseQueryBJJ(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if addr != nil && bjj != nil {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(errors.New("bjj and hezEthereumAddress params are incompatible"))
|
||||||
|
}
|
||||||
|
// from Idx
|
||||||
|
fromIdx, err := parseFromIdx(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// to Idx
|
||||||
|
toIdx, err := parseToIdx(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if (fromIdx != nil || toIdx != nil) && (addr != nil || bjj != nil || tokenID != nil) {
|
||||||
|
return nil, nil, nil, nil, nil, tracerr.Wrap(errors.New("accountIndex is incompatible with BJJ, hezEthereumAddress and tokenId"))
|
||||||
|
}
|
||||||
|
return tokenID, addr, bjj, fromIdx, toIdx, nil
|
||||||
|
}
|
||||||
|
|
||||||
func parseTokenFilters(c querier) ([]common.TokenID, []string, string, error) {
|
func parseTokenFilters(c querier) ([]common.TokenID, []string, string, error) {
|
||||||
idsStr := c.Query("ids")
|
idsStr := c.Query("ids")
|
||||||
symbolsStr := c.Query("symbols")
|
symbolsStr := c.Query("symbols")
|
||||||
|
|||||||
@@ -2,10 +2,12 @@ package stateapiupdater
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,11 +19,45 @@ type Updater struct {
|
|||||||
vars common.SCVariablesPtr
|
vars common.SCVariablesPtr
|
||||||
consts historydb.Constants
|
consts historydb.Constants
|
||||||
rw sync.RWMutex
|
rw sync.RWMutex
|
||||||
|
rfp *RecommendedFeePolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecommendedFeePolicy describes how the recommended fee is calculated
|
||||||
|
type RecommendedFeePolicy struct {
|
||||||
|
PolicyType RecommendedFeePolicyType `validate:"required"`
|
||||||
|
StaticValue float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecommendedFeePolicyType describes the different available recommended fee strategies
|
||||||
|
type RecommendedFeePolicyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
|
||||||
|
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
|
||||||
|
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
|
||||||
|
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (rfp *RecommendedFeePolicy) valid() bool {
|
||||||
|
switch rfp.PolicyType {
|
||||||
|
case RecommendedFeePolicyTypeStatic:
|
||||||
|
if rfp.StaticValue == 0 {
|
||||||
|
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case RecommendedFeePolicyTypeAvgLastHour:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpdater creates a new Updater
|
// NewUpdater creates a new Updater
|
||||||
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
||||||
consts *historydb.Constants) *Updater {
|
consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) {
|
||||||
|
if ok := rfp.valid(); !ok {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
|
||||||
|
}
|
||||||
u := Updater{
|
u := Updater{
|
||||||
hdb: hdb,
|
hdb: hdb,
|
||||||
config: *config,
|
config: *config,
|
||||||
@@ -31,9 +67,10 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
|
|||||||
ForgeDelay: config.ForgeDelay,
|
ForgeDelay: config.ForgeDelay,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
rfp: rfp,
|
||||||
}
|
}
|
||||||
u.SetSCVars(vars.AsPtr())
|
u.SetSCVars(vars.AsPtr())
|
||||||
return &u
|
return &u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store the State in the HistoryDB
|
// Store the State in the HistoryDB
|
||||||
@@ -65,13 +102,27 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
|
|||||||
|
|
||||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||||
func (u *Updater) UpdateRecommendedFee() error {
|
func (u *Updater) UpdateRecommendedFee() error {
|
||||||
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
|
switch u.rfp.PolicyType {
|
||||||
if err != nil {
|
case RecommendedFeePolicyTypeStatic:
|
||||||
return tracerr.Wrap(err)
|
u.rw.Lock()
|
||||||
|
u.state.RecommendedFee = common.RecommendedFee{
|
||||||
|
ExistingAccount: u.rfp.StaticValue,
|
||||||
|
CreatesAccount: u.rfp.StaticValue,
|
||||||
|
CreatesAccountInternal: u.rfp.StaticValue,
|
||||||
|
}
|
||||||
|
u.rw.Unlock()
|
||||||
|
case RecommendedFeePolicyTypeAvgLastHour:
|
||||||
|
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.RecommendedFee = *recommendedFee
|
||||||
|
u.rw.Unlock()
|
||||||
|
default:
|
||||||
|
return tracerr.New("Invalid recommende fee policy")
|
||||||
}
|
}
|
||||||
u.rw.Lock()
|
|
||||||
u.state.RecommendedFee = *recommendedFee
|
|
||||||
u.rw.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -415,6 +415,55 @@ paths:
|
|||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/Error500'
|
$ref: '#/components/schemas/Error500'
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Coordinator
|
||||||
|
summary: Get transactions that are in the pool.
|
||||||
|
operationId: getPoolTxs
|
||||||
|
parameters:
|
||||||
|
- name: state
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: State of the transactions, e.g. "pend"
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/PoolL2TransactionState'
|
||||||
|
- name: fromAccountIndex
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: Id of the from account
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/AccountIndex'
|
||||||
|
- name: toAccountIndex
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: Id of the to account
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/AccountIndex'
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successful operation.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/PoolL2Transactions'
|
||||||
|
'400':
|
||||||
|
description: Bad request.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/Error400'
|
||||||
|
'404':
|
||||||
|
description: Not found.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/Error404'
|
||||||
|
'500':
|
||||||
|
description: Internal server error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/Error500'
|
||||||
'/transactions-pool/{id}':
|
'/transactions-pool/{id}':
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
@@ -487,10 +536,16 @@ paths:
|
|||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/BJJ'
|
$ref: '#/components/schemas/BJJ'
|
||||||
- name: accountIndex
|
- name: fromAccountIndex
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
description: Only get transactions sent from or to a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
|
description: Only get transactions sent from a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/AccountIndex'
|
||||||
|
- name: toAccountIndex
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: Only get transactions sent to a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/AccountIndex'
|
$ref: '#/components/schemas/AccountIndex'
|
||||||
- name: batchNum
|
- name: batchNum
|
||||||
@@ -1439,6 +1494,14 @@ components:
|
|||||||
- requestFee
|
- requestFee
|
||||||
- requestNonce
|
- requestNonce
|
||||||
- token
|
- token
|
||||||
|
PoolL2Transactions:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
transactions:
|
||||||
|
type: array
|
||||||
|
description: List of pool l2 transactions
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/PoolL2Transaction'
|
||||||
TransactionId:
|
TransactionId:
|
||||||
type: string
|
type: string
|
||||||
description: Identifier for transactions. Used for any kind of transaction (both L1 and L2). More info on how the identifiers are built [here](https://idocs.hermez.io/#/spec/architecture/db/README?id=txid)
|
description: Identifier for transactions. Used for any kind of transaction (both L1 and L2). More info on how the identifiers are built [here](https://idocs.hermez.io/#/spec/architecture/db/README?id=txid)
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
func (a *API) getHistoryTxs(c *gin.Context) {
|
func (a *API) getHistoryTxs(c *gin.Context) {
|
||||||
// Get query parameters
|
// Get query parameters
|
||||||
tokenID, addr, bjj, idx, err := parseExitFilters(c)
|
tokenID, addr, bjj, fromIdx, toIdx, err := parseTxsHistoryFilters(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
retBadReq(err, c)
|
retBadReq(err, c)
|
||||||
return
|
return
|
||||||
@@ -35,7 +35,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
|
|||||||
|
|
||||||
// Fetch txs from historyDB
|
// Fetch txs from historyDB
|
||||||
txs, pendingItems, err := a.h.GetTxsAPI(
|
txs, pendingItems, err := a.h.GetTxsAPI(
|
||||||
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
addr, bjj, tokenID, fromIdx, toIdx, batchNum, txType, fromItem, limit, order,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
|
|||||||
@@ -324,8 +324,8 @@ func TestGetHistoryTxs(t *testing.T) {
|
|||||||
idx, err := stringToIdx(idxStr, "")
|
idx, err := stringToIdx(idxStr, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
path = fmt.Sprintf(
|
path = fmt.Sprintf(
|
||||||
"%s?accountIndex=%s&limit=%d",
|
"%s?fromAccountIndex=%s&toAccountIndex=%s&limit=%d",
|
||||||
endpoint, idxStr, limit,
|
endpoint, idxStr, idxStr, limit,
|
||||||
)
|
)
|
||||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testTxsResponse{}, appendIter)
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testTxsResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -431,8 +431,8 @@ func TestGetHistoryTxs(t *testing.T) {
|
|||||||
assertTxs(t, []testTx{}, fetchedTxs)
|
assertTxs(t, []testTx{}, fetchedTxs)
|
||||||
// 400
|
// 400
|
||||||
path = fmt.Sprintf(
|
path = fmt.Sprintf(
|
||||||
"%s?accountIndex=%s&hezEthereumAddress=%s",
|
"%s?fromAccountIndex=%s&toAccountIndex=%s&hezEthereumAddress=%s",
|
||||||
endpoint, idx, account.EthAddr,
|
endpoint, idx, idx, account.EthAddr,
|
||||||
)
|
)
|
||||||
err = doBadReq("GET", path, nil, 400)
|
err = doBadReq("GET", path, nil, 400)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|||||||
@@ -55,6 +55,41 @@ func (a *API) getPoolTx(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, tx)
|
c.JSON(http.StatusOK, tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *API) getPoolTxs(c *gin.Context) {
|
||||||
|
// Get from idx
|
||||||
|
fromIdx, err := parseFromIdx(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Get to idx
|
||||||
|
toIdx, err := parseToIdx(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Get state
|
||||||
|
state, err := parseQueryPoolL2TxState(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch txs from l2DB
|
||||||
|
txs, err := a.l2.GetPoolTxs(fromIdx, toIdx, state)
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build successful response
|
||||||
|
type txsResponse struct {
|
||||||
|
Txs []*l2db.PoolTxAPI `json:"transactions"`
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, &txsResponse{
|
||||||
|
Txs: txs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
type receivedPoolTx struct {
|
type receivedPoolTx struct {
|
||||||
TxID common.TxID `json:"id" binding:"required"`
|
TxID common.TxID `json:"id" binding:"required"`
|
||||||
Type common.TxType `json:"type" binding:"required"`
|
Type common.TxType `json:"type" binding:"required"`
|
||||||
|
|||||||
@@ -47,6 +47,10 @@ type testPoolTxReceive struct {
|
|||||||
Token historydb.TokenWithUSD `json:"token"`
|
Token historydb.TokenWithUSD `json:"token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type testPoolTxsResponse struct {
|
||||||
|
Txs []testPoolTxReceive `json:"transactions"`
|
||||||
|
}
|
||||||
|
|
||||||
// testPoolTxSend is a struct to be used as a JSON body
|
// testPoolTxSend is a struct to be used as a JSON body
|
||||||
// when testing POST /transactions-pool
|
// when testing POST /transactions-pool
|
||||||
type testPoolTxSend struct {
|
type testPoolTxSend struct {
|
||||||
@@ -225,6 +229,24 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// GET
|
// GET
|
||||||
|
// get by idx
|
||||||
|
fetchedTxs := testPoolTxsResponse{}
|
||||||
|
require.NoError(t, doGoodReq(
|
||||||
|
"GET",
|
||||||
|
endpoint+"?fromAccountIndex=hez:ETH:263",
|
||||||
|
nil, &fetchedTxs))
|
||||||
|
assert.Equal(t, 1, len(fetchedTxs.Txs))
|
||||||
|
assert.Equal(t, "hez:ETH:263", fetchedTxs.Txs[0].FromIdx)
|
||||||
|
// get by state
|
||||||
|
require.NoError(t, doGoodReq(
|
||||||
|
"GET",
|
||||||
|
endpoint+"?state=pend",
|
||||||
|
nil, &fetchedTxs))
|
||||||
|
assert.Equal(t, 4, len(fetchedTxs.Txs))
|
||||||
|
for _, v := range fetchedTxs.Txs {
|
||||||
|
assert.Equal(t, common.PoolL2TxStatePending, v.State)
|
||||||
|
}
|
||||||
|
// GET
|
||||||
endpoint += "/"
|
endpoint += "/"
|
||||||
for _, tx := range tc.poolTxsToReceive {
|
for _, tx := range tc.poolTxsToReceive {
|
||||||
fetchedTx := testPoolTxReceive{}
|
fetchedTx := testPoolTxReceive{}
|
||||||
|
|||||||
@@ -73,6 +73,9 @@ of the node configuration. Please, check the `type APIServer` at
|
|||||||
monitor the size of the folder to avoid running out of space.
|
monitor the size of the folder to avoid running out of space.
|
||||||
- The node requires a PostgreSQL database. The parameters of the server and
|
- The node requires a PostgreSQL database. The parameters of the server and
|
||||||
database must be set in the `PostgreSQL` section.
|
database must be set in the `PostgreSQL` section.
|
||||||
|
- The node requires a web3 RPC server to work. The node has only been tested
|
||||||
|
with geth and may not work correctly with other ethereum nodes
|
||||||
|
implementations.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
|
|||||||
@@ -145,3 +145,11 @@ Coordinator = true
|
|||||||
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
||||||
LightScrypt = true
|
LightScrypt = true
|
||||||
# RollupVerifierIndex = 0
|
# RollupVerifierIndex = 0
|
||||||
|
|
||||||
|
[RecommendedFeePolicy]
|
||||||
|
# Strategy used to calculate the recommended fee that the API will expose.
|
||||||
|
# Available options:
|
||||||
|
# - Static: always return the same value (StaticValue) in USD
|
||||||
|
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
|
||||||
|
PolicyType = "Static"
|
||||||
|
StaticValue = 0.99
|
||||||
@@ -35,18 +35,18 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Version represents the program based on the git tag
|
// version represents the program based on the git tag
|
||||||
Version = "v0.1.0"
|
version = "v0.1.0"
|
||||||
// Build represents the program based on the git commit
|
// commit represents the program based on the git commit
|
||||||
Build = "dev"
|
commit = "dev"
|
||||||
// Date represents the date of application was built
|
// date represents the date of application was built
|
||||||
Date = ""
|
date = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
func cmdVersion(c *cli.Context) error {
|
func cmdVersion(c *cli.Context) error {
|
||||||
fmt.Printf("Version = \"%v\"\n", Version)
|
fmt.Printf("Version = \"%v\"\n", version)
|
||||||
fmt.Printf("Build = \"%v\"\n", Build)
|
fmt.Printf("Build = \"%v\"\n", commit)
|
||||||
fmt.Printf("Date = \"%v\"\n", Date)
|
fmt.Printf("Date = \"%v\"\n", date)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -421,7 +421,7 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
|||||||
func main() {
|
func main() {
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = "hermez-node"
|
app.Name = "hermez-node"
|
||||||
app.Version = Version
|
app.Version = version
|
||||||
flags := []cli.Flag{
|
flags := []cli.Flag{
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: flagMode,
|
Name: flagMode,
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/priceupdater"
|
"github.com/hermeznetwork/hermez-node/priceupdater"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
@@ -299,7 +300,8 @@ type Node struct {
|
|||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
PostgreSQL PostgreSQL `validate:"required"`
|
PostgreSQL PostgreSQL `validate:"required"`
|
||||||
Web3 struct {
|
Web3 struct {
|
||||||
// URL is the URL of the web3 ethereum-node RPC server
|
// URL is the URL of the web3 ethereum-node RPC server. Only
|
||||||
|
// geth is officially supported.
|
||||||
URL string `validate:"required"`
|
URL string `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
Synchronizer struct {
|
Synchronizer struct {
|
||||||
@@ -346,8 +348,9 @@ type Node struct {
|
|||||||
// can wait to stablish a SQL connection
|
// can wait to stablish a SQL connection
|
||||||
SQLConnectionTimeout Duration
|
SQLConnectionTimeout Duration
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
Debug NodeDebug `validate:"required"`
|
RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"`
|
||||||
Coordinator Coordinator `validate:"-"`
|
Debug NodeDebug `validate:"required"`
|
||||||
|
Coordinator Coordinator `validate:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIServer is the api server configuration parameters
|
// APIServer is the api server configuration parameters
|
||||||
|
|||||||
@@ -1,3 +1,43 @@
|
|||||||
|
/*
|
||||||
|
Package coordinator handles all the logic related to forging batches as a
|
||||||
|
coordinator in the hermez network.
|
||||||
|
|
||||||
|
The forging of batches is done with a pipeline in order to allow multiple
|
||||||
|
batches being forged in parallel. The maximum number of batches that can be
|
||||||
|
forged in parallel is determined by the number of available proof servers.
|
||||||
|
|
||||||
|
The Coordinator begins with the pipeline stopped. The main Coordinator
|
||||||
|
goroutine keeps listening for synchronizer events sent by the node package,
|
||||||
|
which allow the coordinator to determine if the configured forger address is
|
||||||
|
allowed to forge at the current block or not. When the forger address becomes
|
||||||
|
allowed to forge, the pipeline is started, and when it terminates being allowed
|
||||||
|
to forge, the pipeline is stopped.
|
||||||
|
|
||||||
|
The Pipeline consists of two goroutines. The first one is in charge of
|
||||||
|
preparing a batch internally, which involves making a selection of transactions
|
||||||
|
and calculating the ZKInputs for the batch proof, and sending these ZKInputs to
|
||||||
|
an idle proof server. This goroutine will keep preparing batches while there
|
||||||
|
are idle proof servers, if the forging policy determines that a batch should be
|
||||||
|
forged in the current state. The second goroutine is in charge of waiting for
|
||||||
|
the proof server to finish computing the proof, retreiving it, prepare the
|
||||||
|
arguments for the `forgeBatch` Rollup transaction, and sending the result to
|
||||||
|
the TxManager. All the batch information moves between functions and
|
||||||
|
goroutines via the BatchInfo struct.
|
||||||
|
|
||||||
|
Finally, the TxManager contains a single goroutine that makes forgeBatch
|
||||||
|
ethereum transactions for the batches sent by the Pipeline, and keeps them in a
|
||||||
|
list to check them periodically. In the periodic checks, the ethereum
|
||||||
|
transaction is checked for successfulness, and it's only forgotten after a
|
||||||
|
number of confirmation blocks have passed after being successfully mined. At
|
||||||
|
any point if a transaction failure is detected, the TxManager can signal the
|
||||||
|
Coordinator to reset the Pipeline in order to reforge the failed batches.
|
||||||
|
|
||||||
|
The Coordinator goroutine acts as a manager. The synchronizer events (which
|
||||||
|
notify about new blocks and associated new state) that it receives are
|
||||||
|
broadcasted to the Pipeline and the TxManager. This allows the Coordinator,
|
||||||
|
Pipeline and TxManager to have a copy of the current hermez network state
|
||||||
|
required to perform their duties.
|
||||||
|
*/
|
||||||
package coordinator
|
package coordinator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -456,7 +456,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
|
|||||||
// and pagination info
|
// and pagination info
|
||||||
func (hdb *HistoryDB) GetTxsAPI(
|
func (hdb *HistoryDB) GetTxsAPI(
|
||||||
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp,
|
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp,
|
||||||
tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType,
|
tokenID *common.TokenID, fromIdx, toIdx *common.Idx, batchNum *uint, txType *common.TxType,
|
||||||
fromItem, limit *uint, order string,
|
fromItem, limit *uint, order string,
|
||||||
) ([]TxAPI, uint64, error) {
|
) ([]TxAPI, uint64, error) {
|
||||||
// Warning: amount_success and deposit_amount_success have true as default for
|
// Warning: amount_success and deposit_amount_success have true as default for
|
||||||
@@ -508,14 +508,32 @@ func (hdb *HistoryDB) GetTxsAPI(
|
|||||||
nextIsAnd = true
|
nextIsAnd = true
|
||||||
}
|
}
|
||||||
// idx filter
|
// idx filter
|
||||||
if idx != nil {
|
if fromIdx != nil && toIdx != nil {
|
||||||
if nextIsAnd {
|
if nextIsAnd {
|
||||||
queryStr += "AND "
|
queryStr += "AND "
|
||||||
} else {
|
} else {
|
||||||
queryStr += "WHERE "
|
queryStr += "WHERE "
|
||||||
}
|
}
|
||||||
queryStr += "(tx.effective_from_idx = ? OR tx.to_idx = ?) "
|
queryStr += "(tx.effective_from_idx = ? "
|
||||||
args = append(args, idx, idx)
|
queryStr += "OR tx.to_idx = ?) "
|
||||||
|
args = append(args, fromIdx, toIdx)
|
||||||
|
nextIsAnd = true
|
||||||
|
} else if fromIdx != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "tx.effective_from_idx = ? "
|
||||||
|
nextIsAnd = true
|
||||||
|
} else if toIdx != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "tx.to_idx = ? "
|
||||||
|
args = append(args, toIdx)
|
||||||
nextIsAnd = true
|
nextIsAnd = true
|
||||||
}
|
}
|
||||||
// batchNum filter
|
// batchNum filter
|
||||||
|
|||||||
@@ -127,3 +127,57 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
|||||||
txID,
|
txID,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPoolTxs return Txs from the pool
|
||||||
|
func (l2db *L2DB) GetPoolTxs(fromIdx, toIdx *common.Idx, state *common.PoolL2TxState) ([]*PoolTxAPI, error) {
|
||||||
|
cancel, err := l2db.apiConnCon.Acquire()
|
||||||
|
defer cancel()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
defer l2db.apiConnCon.Release()
|
||||||
|
// Apply filters
|
||||||
|
nextIsAnd := false
|
||||||
|
queryStr := selectPoolTxAPI
|
||||||
|
var args []interface{}
|
||||||
|
if state != nil {
|
||||||
|
queryStr += "WHERE state = ? "
|
||||||
|
args = append(args, state)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if fromIdx != nil && toIdx != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND ("
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE ("
|
||||||
|
}
|
||||||
|
queryStr += "tx_pool.from_idx = ? "
|
||||||
|
queryStr += "OR tx_pool.to_idx = ?) "
|
||||||
|
args = append(args, fromIdx, toIdx)
|
||||||
|
} else if fromIdx != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "tx_pool.from_idx = ? "
|
||||||
|
args = append(args, fromIdx)
|
||||||
|
} else if toIdx != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "tx_pool.to_idx = ? "
|
||||||
|
args = append(args, toIdx)
|
||||||
|
}
|
||||||
|
queryStr += "AND NOT external_delete;"
|
||||||
|
query := l2db.dbRead.Rebind(queryStr)
|
||||||
|
txs := []*PoolTxAPI{}
|
||||||
|
err = meddler.QueryAll(
|
||||||
|
l2db.dbRead, &txs,
|
||||||
|
query,
|
||||||
|
args...)
|
||||||
|
return txs, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|||||||
@@ -311,6 +311,28 @@ func TestGetPending(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestL2DB_GetPoolTxs(t *testing.T) {
|
||||||
|
err := prepareHistoryDB(historyDB)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error prepare historyDB", err)
|
||||||
|
}
|
||||||
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
|
require.NoError(t, err)
|
||||||
|
state := common.PoolL2TxState("pend")
|
||||||
|
idx := common.Idx(256)
|
||||||
|
var pendingTxs []*common.PoolL2Tx
|
||||||
|
for i := range poolL2Txs {
|
||||||
|
if poolL2Txs[i].FromIdx == idx || poolL2Txs[i].ToIdx == idx {
|
||||||
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
|
require.NoError(t, err)
|
||||||
|
pendingTxs = append(pendingTxs, &poolL2Txs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fetchedTxs, err := l2DBWithACC.GetPoolTxs(&idx, &idx, &state)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
|
||||||
|
}
|
||||||
|
|
||||||
func TestStartForging(t *testing.T) {
|
func TestStartForging(t *testing.T) {
|
||||||
// Generate txs
|
// Generate txs
|
||||||
var fakeBatchNum common.BatchNum = 33
|
var fakeBatchNum common.BatchNum = 33
|
||||||
|
|||||||
@@ -245,15 +245,15 @@ func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*c
|
|||||||
if number == -1 {
|
if number == -1 {
|
||||||
blockNum = nil
|
blockNum = nil
|
||||||
}
|
}
|
||||||
block, err := c.client.BlockByNumber(ctx, blockNum)
|
header, err := c.client.HeaderByNumber(ctx, blockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
b := &common.Block{
|
b := &common.Block{
|
||||||
Num: block.Number().Int64(),
|
Num: header.Number.Int64(),
|
||||||
Timestamp: time.Unix(int64(block.Time()), 0),
|
Timestamp: time.Unix(int64(header.Time), 0),
|
||||||
ParentHash: block.ParentHash(),
|
ParentHash: header.ParentHash,
|
||||||
Hash: block.Hash(),
|
Hash: header.Hash(),
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|||||||
26
node/node.go
26
node/node.go
@@ -1,3 +1,18 @@
|
|||||||
|
/*
|
||||||
|
Package node does the initialization of all the required objects to either run
|
||||||
|
as a synchronizer or as a coordinator.
|
||||||
|
|
||||||
|
The Node contains several goroutines that run in the background or that
|
||||||
|
periodically perform tasks. One of this goroutines periodically calls the
|
||||||
|
`Synchronizer.Sync` function, allowing the synchronization of one block at a
|
||||||
|
time. After every call to `Synchronizer.Sync`, the Node sends a message to the
|
||||||
|
Coordinator to notify it about the new synced block (and associated state) or
|
||||||
|
reorg (and resetted state) in case one happens.
|
||||||
|
|
||||||
|
Other goroutines perform tasks such as: updating the token prices, update
|
||||||
|
metrics stored in the historyDB, update recommended fee stored in the
|
||||||
|
historyDB, run the http API server, run the debug http API server, etc.
|
||||||
|
*/
|
||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -273,7 +288,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
|
stateAPIUpdater, err := stateapiupdater.NewUpdater(
|
||||||
|
historyDB,
|
||||||
|
&hdbNodeCfg,
|
||||||
|
initSCVars,
|
||||||
|
&hdbConsts,
|
||||||
|
&cfg.RecommendedFeePolicy,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
var coord *coordinator.Coordinator
|
var coord *coordinator.Coordinator
|
||||||
if mode == ModeCoordinator {
|
if mode == ModeCoordinator {
|
||||||
|
|||||||
@@ -1,3 +1,35 @@
|
|||||||
|
/*
|
||||||
|
Package synchronizer synchronizes the hermez network state by querying events
|
||||||
|
emitted by the three smart contracts: `Hermez.sol` (referred as Rollup here),
|
||||||
|
`HermezAuctionProtocol.sol` (referred as Auction here) and
|
||||||
|
`WithdrawalDelayer.sol` (referred as WDelayer here).
|
||||||
|
|
||||||
|
The main entry point for synchronization is the `Sync` function, which at most
|
||||||
|
will synchronize one ethereum block, and all the hermez events that happened in
|
||||||
|
that block. During a `Sync` call, a reorg can be detected; in such case, uncle
|
||||||
|
blocks will be discarded, and only in a future `Sync` call correct blocks will
|
||||||
|
be synced.
|
||||||
|
|
||||||
|
The synchronization of the events in each smart contracts are done
|
||||||
|
in the methods `rollupSync`, `auctionSync` and `wdelayerSync`, which in turn
|
||||||
|
use the interface code to read each smart contract state and events found in
|
||||||
|
"github.com/hermeznetwork/hermez-node/eth". After these three methods are
|
||||||
|
called, an object of type `common.BlockData` is built containing all the
|
||||||
|
updates and events that happened in that block, and it is inserted in the
|
||||||
|
HistoryDB in a single SQL transaction.
|
||||||
|
|
||||||
|
`rollupSync` is the method that synchronizes batches sent via the `forgeBatch`
|
||||||
|
transaction in `Hermez.sol`. In `rollupSync`, for every batch, the accounts
|
||||||
|
state is updated in the StateDB by processing all transactions that have been
|
||||||
|
forged in that batch.
|
||||||
|
|
||||||
|
The consistency of the stored data is guaranteed by the HistoryDB: All the
|
||||||
|
block information is inserted in a single SQL transaction at the end of the
|
||||||
|
`Sync` method, once the StateDB has been updated. And every time the
|
||||||
|
Synchronizer starts, it continues from the last block in the HistoryDB. The
|
||||||
|
StateDB stores updates organized by checkpoints for every batch, and each batch
|
||||||
|
is only accessed if it appears in the HistoryDB.
|
||||||
|
*/
|
||||||
package synchronizer
|
package synchronizer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
Reference in New Issue
Block a user