Compare commits

..

16 Commits

Author SHA1 Message Date
Mikelle
0511acd5e6 fixed txshistory_test.go 2021-03-31 12:48:49 +03:00
Mikelle
8d087e2727 added fromIdx and toIdx to transactionHistory 2021-03-31 12:33:57 +03:00
Mikelle
22ffd93292 implemented fromIdx and toIdx in transaction-pool request 2021-03-30 12:10:13 +03:00
Mikelle
b565c9da1a fixed lint errors 2021-03-29 14:01:01 +03:00
Mikelle
7901705dfd Merge branch 'develop' into feature/getPoolTxs 2021-03-29 13:42:01 +03:00
Mikelle
3a706e7775 implemented get pool txs endpoint 2021-03-29 13:41:36 +03:00
Mikelle
c84b3a4d0f implemented get pool txs endpoint 2021-03-29 13:39:05 +03:00
Eduard S
3f643f022a Merge pull request #677 from hermeznetwork/feature/fastsync-get-headerByNumber
Faster synchronization by fetching only block headers
2021-03-29 11:10:24 +02:00
Danilo Pantani
b8d339d568 Merge pull request #670 from hermeznetwork/fix/remove-release-os
fix the invalid goarch build
2021-03-26 16:16:29 -03:00
Eduard S
6c1c157bc3 Merge pull request #672 from hermeznetwork/feature/configurable-recommendedfee-strategy
Add configuration option to choose recommended fee strategy, and add …
2021-03-25 12:22:57 +01:00
arnaubennassar
f9ddf88c93 Add configuration option to choose recommended fee strategy, and add static strategy 2021-03-25 12:13:04 +01:00
Pantani
a1eea43443 fix the invalid goarch build and avoid calling the migration-pack each build 2021-03-24 10:41:32 -03:00
Danilo Pantani
f07fd82822 Merge pull request #653 from hermeznetwork/feature/goreleaser-integration
Generating automatically releases with Goreleaser
2021-03-23 13:52:41 -03:00
arnau
d465d51e78 Merge pull request #667 from hermeznetwork/feature/documentpackages
Document synchronizer, node and coordinator
2021-03-23 13:33:40 +01:00
Eduard S
e23d0a07d2 Document synchronizer, node and coordinator 2021-03-23 13:19:23 +01:00
Pantani
88b17cbe99 add release distribution with Goreleaser (https://goreleaser.com) 2021-03-22 20:44:27 -03:00
29 changed files with 577 additions and 169 deletions

29
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: goreleaser
on:
push:
tags:
- '*'
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

3
.gitignore vendored
View File

@@ -1 +1,2 @@
bin/
bin/
dist/

35
.goreleaser.yml Normal file
View File

@@ -0,0 +1,35 @@
before:
hooks:
- go mod download
- make migration-pack
builds:
- main: ./cli/node/main.go
binary: node
id: node
goos:
- linux
- darwin
goarch:
- amd64
archives:
- replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

View File

@@ -3,8 +3,8 @@
# Project variables.
PACKAGE := github.com/hermeznetwork/hermez-node
VERSION := $(shell git describe --tags --always)
BUILD := $(shell git rev-parse --short HEAD)
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
COMMIT := $(shell git rev-parse --short HEAD)
DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
PROJECT_NAME := $(shell basename "$(PWD)")
# Go related variables.
@@ -23,7 +23,7 @@ CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
POSTGRES_PASS ?= yourpasswordhere
# Use linker flags to provide version/build settings.
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE)"
# PID file will keep the process id of the server.
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
@@ -94,11 +94,11 @@ install:
@echo " > Checking if there is any missing dependencies..."
$(GOENVVARS) go get $(GOCMD)/... $(get)
## run: Run Hermez node.
run:
## run-node: Run Hermez node.
run-node:
@bash -c "$(MAKE) clean build"
@echo " > Running $(PROJECT_NAME)"
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
@$(GOBIN)/$(GOBINARY) run --mode $(MODE) --cfg $(CONFIG)
## run-proof-mock: Run proof server mock API.
run-proof-mock: stop-proof-mock

View File

@@ -25,13 +25,13 @@ there are more information about the config file into [cli/node/README.md](cli/n
After setting the config, you can build and run the Hermez Node as a synchronizer:
```shell
$ make run
$ make run-node
```
Or build and run as a coordinator, and also passing the config file from other location:
```shell
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run-node
```
To check the useful make commands:

View File

@@ -60,6 +60,7 @@ func NewAPI(
// Transaction
v1.POST("/transactions-pool", a.postPoolTx)
v1.GET("/transactions-pool/:id", a.getPoolTx)
v1.GET("/transactions-pool", a.getPoolTxs)
}
// Add explorer endpoints

View File

@@ -522,11 +522,16 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000),
}
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
}, constants, &stateapiupdater.RecommendedFeePolicy{
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
})
if err != nil {
panic(err)
}
// Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords)

View File

@@ -109,7 +109,7 @@ func (a *API) getFullBatch(c *gin.Context) {
// Fetch txs forged in the batch from historyDB
maxTxsPerBatch := uint(2048) //nolint:gomnd
txs, _, err := a.h.GetTxsAPI(
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
nil, nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
retSQLErr(err, c)

View File

@@ -96,6 +96,32 @@ func parseQueryBJJ(c querier) (*babyjub.PublicKeyComp, error) {
return hezStringToBJJ(bjjStr, name)
}
func parseQueryPoolL2TxState(c querier) (*common.PoolL2TxState, error) {
const name = "state"
stateStr := c.Query(name)
if stateStr == "" {
return nil, nil
}
switch common.PoolL2TxState(stateStr) {
case common.PoolL2TxStatePending:
ret := common.PoolL2TxStatePending
return &ret, nil
case common.PoolL2TxStateForged:
ret := common.PoolL2TxStateForged
return &ret, nil
case common.PoolL2TxStateForging:
ret := common.PoolL2TxStateForging
return &ret, nil
case common.PoolL2TxStateInvalid:
ret := common.PoolL2TxStateInvalid
return &ret, nil
}
return nil, tracerr.Wrap(fmt.Errorf(
"invalid %s, %s is not a valid option. Check the valid options in the docmentation",
name, stateStr,
))
}
func parseQueryTxType(c querier) (*common.TxType, error) {
const name = "type"
typeStr := c.Query(name)
@@ -146,6 +172,18 @@ func parseIdx(c querier) (*common.Idx, error) {
return stringToIdx(idxStr, name)
}
func parseFromIdx(c querier) (*common.Idx, error) {
const name = "fromAccountIndex"
idxStr := c.Query(name)
return stringToIdx(idxStr, name)
}
func parseToIdx(c querier) (*common.Idx, error) {
const name = "toAccountIndex"
idxStr := c.Query(name)
return stringToIdx(idxStr, name)
}
func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.PublicKeyComp, *common.Idx, error) {
// TokenID
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
@@ -181,6 +219,47 @@ func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.
return tokenID, addr, bjj, idx, nil
}
func parseTxsHistoryFilters(c querier) (*common.TokenID, *ethCommon.Address,
*babyjub.PublicKeyComp, *common.Idx, *common.Idx, error) {
// TokenID
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
if err != nil {
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
var tokenID *common.TokenID
if tid != nil {
tokenID = new(common.TokenID)
*tokenID = common.TokenID(*tid)
}
// Hez Eth addr
addr, err := parseQueryHezEthAddr(c)
if err != nil {
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
// BJJ
bjj, err := parseQueryBJJ(c)
if err != nil {
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
if addr != nil && bjj != nil {
return nil, nil, nil, nil, nil, tracerr.Wrap(errors.New("bjj and hezEthereumAddress params are incompatible"))
}
// from Idx
fromIdx, err := parseFromIdx(c)
if err != nil {
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
// to Idx
toIdx, err := parseToIdx(c)
if err != nil {
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
if (fromIdx != nil || toIdx != nil) && (addr != nil || bjj != nil || tokenID != nil) {
return nil, nil, nil, nil, nil, tracerr.Wrap(errors.New("accountIndex is incompatible with BJJ, hezEthereumAddress and tokenId"))
}
return tokenID, addr, bjj, fromIdx, toIdx, nil
}
func parseTokenFilters(c querier) ([]common.TokenID, []string, string, error) {
idsStr := c.Query("ids")
symbolsStr := c.Query("symbols")

View File

@@ -2,10 +2,12 @@ package stateapiupdater
import (
"database/sql"
"fmt"
"sync"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
)
@@ -17,11 +19,45 @@ type Updater struct {
vars common.SCVariablesPtr
consts historydb.Constants
rw sync.RWMutex
rfp *RecommendedFeePolicy
}
// RecommendedFeePolicy describes how the recommended fee is calculated
type RecommendedFeePolicy struct {
PolicyType RecommendedFeePolicyType `validate:"required"`
StaticValue float64
}
// RecommendedFeePolicyType describes the different available recommended fee strategies
type RecommendedFeePolicyType string
const (
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
)
func (rfp *RecommendedFeePolicy) valid() bool {
switch rfp.PolicyType {
case RecommendedFeePolicyTypeStatic:
if rfp.StaticValue == 0 {
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
}
return true
case RecommendedFeePolicyTypeAvgLastHour:
return true
default:
return false
}
}
// NewUpdater creates a new Updater
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants) *Updater {
consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) {
if ok := rfp.valid(); !ok {
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
}
u := Updater{
hdb: hdb,
config: *config,
@@ -31,9 +67,10 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
ForgeDelay: config.ForgeDelay,
},
},
rfp: rfp,
}
u.SetSCVars(vars.AsPtr())
return &u
return &u, nil
}
// Store the State in the HistoryDB
@@ -65,13 +102,27 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *Updater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
switch u.rfp.PolicyType {
case RecommendedFeePolicyTypeStatic:
u.rw.Lock()
u.state.RecommendedFee = common.RecommendedFee{
ExistingAccount: u.rfp.StaticValue,
CreatesAccount: u.rfp.StaticValue,
CreatesAccountInternal: u.rfp.StaticValue,
}
u.rw.Unlock()
case RecommendedFeePolicyTypeAvgLastHour:
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
default:
return tracerr.New("Invalid recommende fee policy")
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
}

View File

@@ -415,6 +415,55 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/Error500'
get:
tags:
- Coordinator
summary: Get transactions that are in the pool.
operationId: getPoolTxs
parameters:
- name: state
in: query
required: false
description: State of the transactions, e.g. "pend"
schema:
$ref: '#/components/schemas/PoolL2TransactionState'
- name: fromAccountIndex
in: query
required: false
description: Id of the from account
schema:
$ref: '#/components/schemas/AccountIndex'
- name: toAccountIndex
in: query
required: false
description: Id of the to account
schema:
$ref: '#/components/schemas/AccountIndex'
responses:
'200':
description: Successful operation.
content:
application/json:
schema:
$ref: '#/components/schemas/PoolL2Transactions'
'400':
description: Bad request.
content:
application/json:
schema:
$ref: '#/components/schemas/Error400'
'404':
description: Not found.
content:
application/json:
schema:
$ref: '#/components/schemas/Error404'
'500':
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/Error500'
'/transactions-pool/{id}':
get:
tags:
@@ -487,10 +536,16 @@ paths:
required: false
schema:
$ref: '#/components/schemas/BJJ'
- name: accountIndex
- name: fromAccountIndex
in: query
required: false
description: Only get transactions sent from or to a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
description: Only get transactions sent from a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
schema:
$ref: '#/components/schemas/AccountIndex'
- name: toAccountIndex
in: query
required: false
description: Only get transactions sent to a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
schema:
$ref: '#/components/schemas/AccountIndex'
- name: batchNum
@@ -1439,6 +1494,14 @@ components:
- requestFee
- requestNonce
- token
PoolL2Transactions:
type: object
properties:
transactions:
type: array
description: List of pool l2 transactions
items:
$ref: '#/components/schemas/PoolL2Transaction'
TransactionId:
type: string
description: Identifier for transactions. Used for any kind of transaction (both L1 and L2). More info on how the identifiers are built [here](https://idocs.hermez.io/#/spec/architecture/db/README?id=txid)

View File

@@ -9,7 +9,7 @@ import (
func (a *API) getHistoryTxs(c *gin.Context) {
// Get query parameters
tokenID, addr, bjj, idx, err := parseExitFilters(c)
tokenID, addr, bjj, fromIdx, toIdx, err := parseTxsHistoryFilters(c)
if err != nil {
retBadReq(err, c)
return
@@ -35,7 +35,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
// Fetch txs from historyDB
txs, pendingItems, err := a.h.GetTxsAPI(
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
addr, bjj, tokenID, fromIdx, toIdx, batchNum, txType, fromItem, limit, order,
)
if err != nil {
retSQLErr(err, c)

View File

@@ -324,8 +324,8 @@ func TestGetHistoryTxs(t *testing.T) {
idx, err := stringToIdx(idxStr, "")
assert.NoError(t, err)
path = fmt.Sprintf(
"%s?accountIndex=%s&limit=%d",
endpoint, idxStr, limit,
"%s?fromAccountIndex=%s&toAccountIndex=%s&limit=%d",
endpoint, idxStr, idxStr, limit,
)
err = doGoodReqPaginated(path, historydb.OrderAsc, &testTxsResponse{}, appendIter)
assert.NoError(t, err)
@@ -431,8 +431,8 @@ func TestGetHistoryTxs(t *testing.T) {
assertTxs(t, []testTx{}, fetchedTxs)
// 400
path = fmt.Sprintf(
"%s?accountIndex=%s&hezEthereumAddress=%s",
endpoint, idx, account.EthAddr,
"%s?fromAccountIndex=%s&toAccountIndex=%s&hezEthereumAddress=%s",
endpoint, idx, idx, account.EthAddr,
)
err = doBadReq("GET", path, nil, 400)
assert.NoError(t, err)

View File

@@ -55,6 +55,41 @@ func (a *API) getPoolTx(c *gin.Context) {
c.JSON(http.StatusOK, tx)
}
func (a *API) getPoolTxs(c *gin.Context) {
// Get from idx
fromIdx, err := parseFromIdx(c)
if err != nil {
retBadReq(err, c)
return
}
// Get to idx
toIdx, err := parseToIdx(c)
if err != nil {
retBadReq(err, c)
return
}
// Get state
state, err := parseQueryPoolL2TxState(c)
if err != nil {
retBadReq(err, c)
return
}
// Fetch txs from l2DB
txs, err := a.l2.GetPoolTxs(fromIdx, toIdx, state)
if err != nil {
retSQLErr(err, c)
return
}
// Build successful response
type txsResponse struct {
Txs []*l2db.PoolTxAPI `json:"transactions"`
}
c.JSON(http.StatusOK, &txsResponse{
Txs: txs,
})
}
type receivedPoolTx struct {
TxID common.TxID `json:"id" binding:"required"`
Type common.TxType `json:"type" binding:"required"`

View File

@@ -47,6 +47,10 @@ type testPoolTxReceive struct {
Token historydb.TokenWithUSD `json:"token"`
}
type testPoolTxsResponse struct {
Txs []testPoolTxReceive `json:"transactions"`
}
// testPoolTxSend is a struct to be used as a JSON body
// when testing POST /transactions-pool
type testPoolTxSend struct {
@@ -225,6 +229,24 @@ func TestPoolTxs(t *testing.T) {
err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err)
// GET
// get by idx
fetchedTxs := testPoolTxsResponse{}
require.NoError(t, doGoodReq(
"GET",
endpoint+"?fromAccountIndex=hez:ETH:263",
nil, &fetchedTxs))
assert.Equal(t, 1, len(fetchedTxs.Txs))
assert.Equal(t, "hez:ETH:263", fetchedTxs.Txs[0].FromIdx)
// get by state
require.NoError(t, doGoodReq(
"GET",
endpoint+"?state=pend",
nil, &fetchedTxs))
assert.Equal(t, 4, len(fetchedTxs.Txs))
for _, v := range fetchedTxs.Txs {
assert.Equal(t, common.PoolL2TxStatePending, v.State)
}
// GET
endpoint += "/"
for _, tx := range tc.poolTxsToReceive {
fetchedTx := testPoolTxReceive{}

View File

@@ -73,6 +73,9 @@ of the node configuration. Please, check the `type APIServer` at
monitor the size of the folder to avoid running out of space.
- The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section.
- The node requires a web3 RPC server to work. The node has only been tested
with geth and may not work correctly with other ethereum nodes
implementations.
## Building

View File

@@ -145,3 +145,11 @@ Coordinator = true
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
LightScrypt = true
# RollupVerifierIndex = 0
[RecommendedFeePolicy]
# Strategy used to calculate the recommended fee that the API will expose.
# Available options:
# - Static: always return the same value (StaticValue) in USD
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
PolicyType = "Static"
StaticValue = 0.99

View File

@@ -35,18 +35,18 @@ const (
)
var (
// Version represents the program based on the git tag
Version = "v0.1.0"
// Build represents the program based on the git commit
Build = "dev"
// Date represents the date of application was built
Date = ""
// version represents the program based on the git tag
version = "v0.1.0"
// commit represents the program based on the git commit
commit = "dev"
// date represents the date of application was built
date = ""
)
func cmdVersion(c *cli.Context) error {
fmt.Printf("Version = \"%v\"\n", Version)
fmt.Printf("Build = \"%v\"\n", Build)
fmt.Printf("Date = \"%v\"\n", Date)
fmt.Printf("Version = \"%v\"\n", version)
fmt.Printf("Build = \"%v\"\n", commit)
fmt.Printf("Date = \"%v\"\n", date)
return nil
}
@@ -421,7 +421,7 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
func main() {
app := cli.NewApp()
app.Name = "hermez-node"
app.Version = Version
app.Version = version
flags := []cli.Flag{
&cli.StringFlag{
Name: flagMode,

View File

@@ -8,6 +8,7 @@ import (
"github.com/BurntSushi/toml"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/tracerr"
@@ -299,7 +300,8 @@ type Node struct {
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server
// URL is the URL of the web3 ethereum-node RPC server. Only
// geth is officially supported.
URL string `validate:"required"`
} `validate:"required"`
Synchronizer struct {
@@ -346,8 +348,9 @@ type Node struct {
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
Debug NodeDebug `validate:"required"`
Coordinator Coordinator `validate:"-"`
RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"`
Debug NodeDebug `validate:"required"`
Coordinator Coordinator `validate:"-"`
}
// APIServer is the api server configuration parameters

View File

@@ -1,3 +1,43 @@
/*
Package coordinator handles all the logic related to forging batches as a
coordinator in the hermez network.
The forging of batches is done with a pipeline in order to allow multiple
batches being forged in parallel. The maximum number of batches that can be
forged in parallel is determined by the number of available proof servers.
The Coordinator begins with the pipeline stopped. The main Coordinator
goroutine keeps listening for synchronizer events sent by the node package,
which allow the coordinator to determine if the configured forger address is
allowed to forge at the current block or not. When the forger address becomes
allowed to forge, the pipeline is started, and when it terminates being allowed
to forge, the pipeline is stopped.
The Pipeline consists of two goroutines. The first one is in charge of
preparing a batch internally, which involves making a selection of transactions
and calculating the ZKInputs for the batch proof, and sending these ZKInputs to
an idle proof server. This goroutine will keep preparing batches while there
are idle proof servers, if the forging policy determines that a batch should be
forged in the current state. The second goroutine is in charge of waiting for
the proof server to finish computing the proof, retreiving it, prepare the
arguments for the `forgeBatch` Rollup transaction, and sending the result to
the TxManager. All the batch information moves between functions and
goroutines via the BatchInfo struct.
Finally, the TxManager contains a single goroutine that makes forgeBatch
ethereum transactions for the batches sent by the Pipeline, and keeps them in a
list to check them periodically. In the periodic checks, the ethereum
transaction is checked for successfulness, and it's only forgotten after a
number of confirmation blocks have passed after being successfully mined. At
any point if a transaction failure is detected, the TxManager can signal the
Coordinator to reset the Pipeline in order to reforge the failed batches.
The Coordinator goroutine acts as a manager. The synchronizer events (which
notify about new blocks and associated new state) that it receives are
broadcasted to the Pipeline and the TxManager. This allows the Coordinator,
Pipeline and TxManager to have a copy of the current hermez network state
required to perform their duties.
*/
package coordinator
import (

View File

@@ -456,7 +456,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
// and pagination info
func (hdb *HistoryDB) GetTxsAPI(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp,
tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType,
tokenID *common.TokenID, fromIdx, toIdx *common.Idx, batchNum *uint, txType *common.TxType,
fromItem, limit *uint, order string,
) ([]TxAPI, uint64, error) {
// Warning: amount_success and deposit_amount_success have true as default for
@@ -508,14 +508,32 @@ func (hdb *HistoryDB) GetTxsAPI(
nextIsAnd = true
}
// idx filter
if idx != nil {
if fromIdx != nil && toIdx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "(tx.effective_from_idx = ? OR tx.to_idx = ?) "
args = append(args, idx, idx)
queryStr += "(tx.effective_from_idx = ? "
queryStr += "OR tx.to_idx = ?) "
args = append(args, fromIdx, toIdx)
nextIsAnd = true
} else if fromIdx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.effective_from_idx = ? "
nextIsAnd = true
} else if toIdx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.to_idx = ? "
args = append(args, toIdx)
nextIsAnd = true
}
// batchNum filter

View File

@@ -49,8 +49,6 @@ type KVDB struct {
CurrentIdx common.Idx
CurrentBatch common.BatchNum
m sync.Mutex
mutexDelOld sync.Mutex
wg sync.WaitGroup
last *Last
}
@@ -446,15 +444,10 @@ func (k *KVDB) MakeCheckpoint() error {
return tracerr.Wrap(err)
}
}
k.wg.Add(1)
go func() {
delErr := k.DeleteOldCheckpoints()
if delErr != nil {
log.Errorw("delete old checkpoints failed", "err", delErr)
}
k.wg.Done()
}()
// delete old checkpoints
if err := k.deleteOldCheckpoints(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
@@ -516,12 +509,9 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
return checkpoints, nil
}
// DeleteOldCheckpoints deletes old checkpoints when there are more than
// deleteOldCheckpoints deletes old checkpoints when there are more than
// `s.keep` checkpoints
func (k *KVDB) DeleteOldCheckpoints() error {
k.mutexDelOld.Lock()
defer k.mutexDelOld.Unlock()
func (k *KVDB) deleteOldCheckpoints() error {
list, err := k.ListCheckpoints()
if err != nil {
return tracerr.Wrap(err)
@@ -594,6 +584,4 @@ func (k *KVDB) Close() {
if k.last != nil {
k.last.close()
}
// wait for deletion of old checkpoints
k.wg.Wait()
}

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"io/ioutil"
"os"
"sync"
"testing"
"github.com/hermeznetwork/hermez-node/common"
@@ -191,67 +190,12 @@ func TestDeleteOldCheckpoints(t *testing.T) {
for i := 0; i < numCheckpoints; i++ {
err = db.MakeCheckpoint()
require.NoError(t, err)
err = db.DeleteOldCheckpoints()
require.NoError(t, err)
checkpoints, err := db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
}
}
func TestConcurrentDeleteOldCheckpoints(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
db, err := NewKVDB(Config{Path: dir, Keep: keep})
require.NoError(t, err)
numCheckpoints := 32
var wg sync.WaitGroup
wg.Add(numCheckpoints)
// do checkpoints and check that we never have more than `keep`
// checkpoints.
// 1 async DeleteOldCheckpoint after 1 MakeCheckpoint
for i := 0; i < numCheckpoints; i++ {
err = db.MakeCheckpoint()
require.NoError(t, err)
go func() {
err = db.DeleteOldCheckpoints()
require.NoError(t, err)
wg.Done()
}()
}
wg.Wait()
checkpoints, err := db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
wg.Add(numCheckpoints)
// do checkpoints and check that we never have more than `keep`
// checkpoints
// 32 concurrent DeleteOldCheckpoint after 32 MakeCheckpoint
for i := 0; i < numCheckpoints; i++ {
err = db.MakeCheckpoint()
require.NoError(t, err)
}
for i := 0; i < numCheckpoints; i++ {
go func() {
err = db.DeleteOldCheckpoints()
require.NoError(t, err)
wg.Done()
}()
}
wg.Wait()
checkpoints, err = db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
}
func TestGetCurrentIdx(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)

View File

@@ -127,3 +127,57 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
txID,
))
}
// GetPoolTxs return Txs from the pool
func (l2db *L2DB) GetPoolTxs(fromIdx, toIdx *common.Idx, state *common.PoolL2TxState) ([]*PoolTxAPI, error) {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
// Apply filters
nextIsAnd := false
queryStr := selectPoolTxAPI
var args []interface{}
if state != nil {
queryStr += "WHERE state = ? "
args = append(args, state)
nextIsAnd = true
}
if fromIdx != nil && toIdx != nil {
if nextIsAnd {
queryStr += "AND ("
} else {
queryStr += "WHERE ("
}
queryStr += "tx_pool.from_idx = ? "
queryStr += "OR tx_pool.to_idx = ?) "
args = append(args, fromIdx, toIdx)
} else if fromIdx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx_pool.from_idx = ? "
args = append(args, fromIdx)
} else if toIdx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx_pool.to_idx = ? "
args = append(args, toIdx)
}
queryStr += "AND NOT external_delete;"
query := l2db.dbRead.Rebind(queryStr)
txs := []*PoolTxAPI{}
err = meddler.QueryAll(
l2db.dbRead, &txs,
query,
args...)
return txs, tracerr.Wrap(err)
}

View File

@@ -311,6 +311,28 @@ func TestGetPending(t *testing.T) {
}
}
func TestL2DB_GetPoolTxs(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err)
state := common.PoolL2TxState("pend")
idx := common.Idx(256)
var pendingTxs []*common.PoolL2Tx
for i := range poolL2Txs {
if poolL2Txs[i].FromIdx == idx || poolL2Txs[i].ToIdx == idx {
err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err)
pendingTxs = append(pendingTxs, &poolL2Txs[i])
}
}
fetchedTxs, err := l2DBWithACC.GetPoolTxs(&idx, &idx, &state)
require.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
}
func TestStartForging(t *testing.T) {
// Generate txs
var fakeBatchNum common.BatchNum = 33

View File

@@ -227,12 +227,6 @@ func (s *StateDB) MakeCheckpoint() error {
return s.db.MakeCheckpoint()
}
// DeleteOldCheckpoints deletes old checkpoints when there are more than
// `cfg.keep` checkpoints
func (s *StateDB) DeleteOldCheckpoints() error {
return s.db.DeleteOldCheckpoints()
}
// CurrentBatch returns the current in-memory CurrentBatch of the StateDB.db
func (s *StateDB) CurrentBatch() common.BatchNum {
return s.db.CurrentBatch

View File

@@ -7,7 +7,6 @@ import (
"math/big"
"os"
"strings"
"sync"
"testing"
ethCommon "github.com/ethereum/go-ethereum/common"
@@ -589,48 +588,6 @@ func TestDeleteOldCheckpoints(t *testing.T) {
for i := 0; i < numCheckpoints; i++ {
err = sdb.MakeCheckpoint()
require.NoError(t, err)
err := sdb.DeleteOldCheckpoints()
require.NoError(t, err)
checkpoints, err := sdb.db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
}
}
// TestConcurrentDeleteOldCheckpoints performs almost the same test than
// kvdb/kvdb_test.go TestConcurrentDeleteOldCheckpoints, but over the StateDB
func TestConcurrentDeleteOldCheckpoints(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
require.NoError(t, err)
numCheckpoints := 32
// do checkpoints and check that we never have more than `keep`
// checkpoints
for i := 0; i < numCheckpoints; i++ {
err = sdb.MakeCheckpoint()
require.NoError(t, err)
wg := sync.WaitGroup{}
n := 10
wg.Add(n)
for j := 0; j < n; j++ {
go func() {
err := sdb.DeleteOldCheckpoints()
require.NoError(t, err)
checkpoints, err := sdb.db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)
wg.Done()
}()
_, err := sdb.db.ListCheckpoints()
// only checking here for absence of errors, not the count of checkpoints
require.NoError(t, err)
}
wg.Wait()
checkpoints, err := sdb.db.ListCheckpoints()
require.NoError(t, err)
assert.LessOrEqual(t, len(checkpoints), keep)

View File

@@ -1,3 +1,18 @@
/*
Package node does the initialization of all the required objects to either run
as a synchronizer or as a coordinator.
The Node contains several goroutines that run in the background or that
periodically perform tasks. One of this goroutines periodically calls the
`Synchronizer.Sync` function, allowing the synchronization of one block at a
time. After every call to `Synchronizer.Sync`, the Node sends a message to the
Coordinator to notify it about the new synced block (and associated state) or
reorg (and resetted state) in case one happens.
Other goroutines perform tasks such as: updating the token prices, update
metrics stored in the historyDB, update recommended fee stored in the
historyDB, run the http API server, run the debug http API server, etc.
*/
package node
import (
@@ -273,7 +288,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
stateAPIUpdater, err := stateapiupdater.NewUpdater(
historyDB,
&hdbNodeCfg,
initSCVars,
&hdbConsts,
&cfg.RecommendedFeePolicy,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
var coord *coordinator.Coordinator
if mode == ModeCoordinator {

View File

@@ -1,3 +1,35 @@
/*
Package synchronizer synchronizes the hermez network state by querying events
emitted by the three smart contracts: `Hermez.sol` (referred as Rollup here),
`HermezAuctionProtocol.sol` (referred as Auction here) and
`WithdrawalDelayer.sol` (referred as WDelayer here).
The main entry point for synchronization is the `Sync` function, which at most
will synchronize one ethereum block, and all the hermez events that happened in
that block. During a `Sync` call, a reorg can be detected; in such case, uncle
blocks will be discarded, and only in a future `Sync` call correct blocks will
be synced.
The synchronization of the events in each smart contracts are done
in the methods `rollupSync`, `auctionSync` and `wdelayerSync`, which in turn
use the interface code to read each smart contract state and events found in
"github.com/hermeznetwork/hermez-node/eth". After these three methods are
called, an object of type `common.BlockData` is built containing all the
updates and events that happened in that block, and it is inserted in the
HistoryDB in a single SQL transaction.
`rollupSync` is the method that synchronizes batches sent via the `forgeBatch`
transaction in `Hermez.sol`. In `rollupSync`, for every batch, the accounts
state is updated in the StateDB by processing all transactions that have been
forged in that batch.
The consistency of the stored data is guaranteed by the HistoryDB: All the
block information is inserted in a single SQL transaction at the end of the
`Sync` method, once the StateDB has been updated. And every time the
Synchronizer starts, it continues from the last block in the HistoryDB. The
StateDB stores updates organized by checkpoints for every batch, and each batch
is only accessed if it appears in the HistoryDB.
*/
package synchronizer
import (