mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
2 Commits
fix/packr-
...
feature/fa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5aa2b0e977 | ||
|
|
2125812e90 |
30
.github/workflows/release.yml
vendored
30
.github/workflows/release.yml
vendored
@@ -1,30 +0,0 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
- name: Get packr
|
||||
run: go get -u github.com/gobuffalo/packr
|
||||
- name: Prepare
|
||||
run: git reset --hard
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -1,26 +0,0 @@
|
||||
before:
|
||||
hooks:
|
||||
- go mod download
|
||||
|
||||
builds:
|
||||
- main: ./cli/node/main.go
|
||||
binary: node
|
||||
id: node
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
12
Makefile
12
Makefile
@@ -3,8 +3,8 @@
|
||||
# Project variables.
|
||||
PACKAGE := github.com/hermeznetwork/hermez-node
|
||||
VERSION := $(shell git describe --tags --always)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
||||
BUILD := $(shell git rev-parse --short HEAD)
|
||||
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
||||
PROJECT_NAME := $(shell basename "$(PWD)")
|
||||
|
||||
# Go related variables.
|
||||
@@ -23,7 +23,7 @@ CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
|
||||
POSTGRES_PASS ?= yourpasswordhere
|
||||
|
||||
# Use linker flags to provide version/build settings.
|
||||
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE)"
|
||||
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
|
||||
|
||||
# PID file will keep the process id of the server.
|
||||
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
|
||||
@@ -94,11 +94,11 @@ install:
|
||||
@echo " > Checking if there is any missing dependencies..."
|
||||
$(GOENVVARS) go get $(GOCMD)/... $(get)
|
||||
|
||||
## run-node: Run Hermez node.
|
||||
run-node:
|
||||
## run: Run Hermez node.
|
||||
run:
|
||||
@bash -c "$(MAKE) clean build"
|
||||
@echo " > Running $(PROJECT_NAME)"
|
||||
@$(GOBIN)/$(GOBINARY) run --mode $(MODE) --cfg $(CONFIG)
|
||||
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
|
||||
|
||||
## run-proof-mock: Run proof server mock API.
|
||||
run-proof-mock: stop-proof-mock
|
||||
|
||||
@@ -25,13 +25,13 @@ there are more information about the config file into [cli/node/README.md](cli/n
|
||||
After setting the config, you can build and run the Hermez Node as a synchronizer:
|
||||
|
||||
```shell
|
||||
$ make run-node
|
||||
$ make run
|
||||
```
|
||||
|
||||
Or build and run as a coordinator, and also passing the config file from other location:
|
||||
|
||||
```shell
|
||||
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run-node
|
||||
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
|
||||
```
|
||||
|
||||
To check the useful make commands:
|
||||
|
||||
@@ -522,16 +522,11 @@ func TestMain(m *testing.M) {
|
||||
WithdrawalDelay: uint64(3000),
|
||||
}
|
||||
|
||||
stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
||||
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
||||
Rollup: rollupVars,
|
||||
Auction: auctionVars,
|
||||
WDelayer: wdelayerVars,
|
||||
}, constants, &stateapiupdater.RecommendedFeePolicy{
|
||||
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}, constants)
|
||||
|
||||
// Generate test data, as expected to be received/sended from/to the API
|
||||
testCoords := genTestCoordinators(commonCoords)
|
||||
|
||||
@@ -2,12 +2,10 @@ package stateapiupdater
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
@@ -19,45 +17,11 @@ type Updater struct {
|
||||
vars common.SCVariablesPtr
|
||||
consts historydb.Constants
|
||||
rw sync.RWMutex
|
||||
rfp *RecommendedFeePolicy
|
||||
}
|
||||
|
||||
// RecommendedFeePolicy describes how the recommended fee is calculated
|
||||
type RecommendedFeePolicy struct {
|
||||
PolicyType RecommendedFeePolicyType `validate:"required"`
|
||||
StaticValue float64
|
||||
}
|
||||
|
||||
// RecommendedFeePolicyType describes the different available recommended fee strategies
|
||||
type RecommendedFeePolicyType string
|
||||
|
||||
const (
|
||||
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
|
||||
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
|
||||
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
|
||||
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
|
||||
)
|
||||
|
||||
func (rfp *RecommendedFeePolicy) valid() bool {
|
||||
switch rfp.PolicyType {
|
||||
case RecommendedFeePolicyTypeStatic:
|
||||
if rfp.StaticValue == 0 {
|
||||
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
|
||||
}
|
||||
return true
|
||||
case RecommendedFeePolicyTypeAvgLastHour:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// NewUpdater creates a new Updater
|
||||
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
||||
consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) {
|
||||
if ok := rfp.valid(); !ok {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
|
||||
}
|
||||
consts *historydb.Constants) *Updater {
|
||||
u := Updater{
|
||||
hdb: hdb,
|
||||
config: *config,
|
||||
@@ -67,10 +31,9 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
|
||||
ForgeDelay: config.ForgeDelay,
|
||||
},
|
||||
},
|
||||
rfp: rfp,
|
||||
}
|
||||
u.SetSCVars(vars.AsPtr())
|
||||
return &u, nil
|
||||
return &u
|
||||
}
|
||||
|
||||
// Store the State in the HistoryDB
|
||||
@@ -102,16 +65,6 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
|
||||
|
||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||
func (u *Updater) UpdateRecommendedFee() error {
|
||||
switch u.rfp.PolicyType {
|
||||
case RecommendedFeePolicyTypeStatic:
|
||||
u.rw.Lock()
|
||||
u.state.RecommendedFee = common.RecommendedFee{
|
||||
ExistingAccount: u.rfp.StaticValue,
|
||||
CreatesAccount: u.rfp.StaticValue,
|
||||
CreatesAccountInternal: u.rfp.StaticValue,
|
||||
}
|
||||
u.rw.Unlock()
|
||||
case RecommendedFeePolicyTypeAvgLastHour:
|
||||
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
@@ -119,10 +72,6 @@ func (u *Updater) UpdateRecommendedFee() error {
|
||||
u.rw.Lock()
|
||||
u.state.RecommendedFee = *recommendedFee
|
||||
u.rw.Unlock()
|
||||
default:
|
||||
return tracerr.New("Invalid recommende fee policy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -73,9 +73,6 @@ of the node configuration. Please, check the `type APIServer` at
|
||||
monitor the size of the folder to avoid running out of space.
|
||||
- The node requires a PostgreSQL database. The parameters of the server and
|
||||
database must be set in the `PostgreSQL` section.
|
||||
- The node requires a web3 RPC server to work. The node has only been tested
|
||||
with geth and may not work correctly with other ethereum nodes
|
||||
implementations.
|
||||
|
||||
## Building
|
||||
|
||||
|
||||
@@ -145,11 +145,3 @@ Coordinator = true
|
||||
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
||||
LightScrypt = true
|
||||
# RollupVerifierIndex = 0
|
||||
|
||||
[RecommendedFeePolicy]
|
||||
# Strategy used to calculate the recommended fee that the API will expose.
|
||||
# Available options:
|
||||
# - Static: always return the same value (StaticValue) in USD
|
||||
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
|
||||
PolicyType = "Static"
|
||||
StaticValue = 0.99
|
||||
@@ -35,18 +35,18 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// version represents the program based on the git tag
|
||||
version = "v0.1.0"
|
||||
// commit represents the program based on the git commit
|
||||
commit = "dev"
|
||||
// date represents the date of application was built
|
||||
date = ""
|
||||
// Version represents the program based on the git tag
|
||||
Version = "v0.1.0"
|
||||
// Build represents the program based on the git commit
|
||||
Build = "dev"
|
||||
// Date represents the date of application was built
|
||||
Date = ""
|
||||
)
|
||||
|
||||
func cmdVersion(c *cli.Context) error {
|
||||
fmt.Printf("Version = \"%v\"\n", version)
|
||||
fmt.Printf("Build = \"%v\"\n", commit)
|
||||
fmt.Printf("Date = \"%v\"\n", date)
|
||||
fmt.Printf("Version = \"%v\"\n", Version)
|
||||
fmt.Printf("Build = \"%v\"\n", Build)
|
||||
fmt.Printf("Date = \"%v\"\n", Date)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -421,7 +421,7 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "hermez-node"
|
||||
app.Version = version
|
||||
app.Version = Version
|
||||
flags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: flagMode,
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/priceupdater"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
@@ -300,8 +299,7 @@ type Node struct {
|
||||
} `validate:"required"`
|
||||
PostgreSQL PostgreSQL `validate:"required"`
|
||||
Web3 struct {
|
||||
// URL is the URL of the web3 ethereum-node RPC server. Only
|
||||
// geth is officially supported.
|
||||
// URL is the URL of the web3 ethereum-node RPC server
|
||||
URL string `validate:"required"`
|
||||
} `validate:"required"`
|
||||
Synchronizer struct {
|
||||
@@ -348,7 +346,6 @@ type Node struct {
|
||||
// can wait to stablish a SQL connection
|
||||
SQLConnectionTimeout Duration
|
||||
} `validate:"required"`
|
||||
RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"`
|
||||
Debug NodeDebug `validate:"required"`
|
||||
Coordinator Coordinator `validate:"-"`
|
||||
}
|
||||
|
||||
@@ -1,43 +1,3 @@
|
||||
/*
|
||||
Package coordinator handles all the logic related to forging batches as a
|
||||
coordinator in the hermez network.
|
||||
|
||||
The forging of batches is done with a pipeline in order to allow multiple
|
||||
batches being forged in parallel. The maximum number of batches that can be
|
||||
forged in parallel is determined by the number of available proof servers.
|
||||
|
||||
The Coordinator begins with the pipeline stopped. The main Coordinator
|
||||
goroutine keeps listening for synchronizer events sent by the node package,
|
||||
which allow the coordinator to determine if the configured forger address is
|
||||
allowed to forge at the current block or not. When the forger address becomes
|
||||
allowed to forge, the pipeline is started, and when it terminates being allowed
|
||||
to forge, the pipeline is stopped.
|
||||
|
||||
The Pipeline consists of two goroutines. The first one is in charge of
|
||||
preparing a batch internally, which involves making a selection of transactions
|
||||
and calculating the ZKInputs for the batch proof, and sending these ZKInputs to
|
||||
an idle proof server. This goroutine will keep preparing batches while there
|
||||
are idle proof servers, if the forging policy determines that a batch should be
|
||||
forged in the current state. The second goroutine is in charge of waiting for
|
||||
the proof server to finish computing the proof, retreiving it, prepare the
|
||||
arguments for the `forgeBatch` Rollup transaction, and sending the result to
|
||||
the TxManager. All the batch information moves between functions and
|
||||
goroutines via the BatchInfo struct.
|
||||
|
||||
Finally, the TxManager contains a single goroutine that makes forgeBatch
|
||||
ethereum transactions for the batches sent by the Pipeline, and keeps them in a
|
||||
list to check them periodically. In the periodic checks, the ethereum
|
||||
transaction is checked for successfulness, and it's only forgotten after a
|
||||
number of confirmation blocks have passed after being successfully mined. At
|
||||
any point if a transaction failure is detected, the TxManager can signal the
|
||||
Coordinator to reset the Pipeline in order to reforge the failed batches.
|
||||
|
||||
The Coordinator goroutine acts as a manager. The synchronizer events (which
|
||||
notify about new blocks and associated new state) that it receives are
|
||||
broadcasted to the Pipeline and the TxManager. This allows the Coordinator,
|
||||
Pipeline and TxManager to have a copy of the current hermez network state
|
||||
required to perform their duties.
|
||||
*/
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
|
||||
@@ -49,6 +49,8 @@ type KVDB struct {
|
||||
CurrentIdx common.Idx
|
||||
CurrentBatch common.BatchNum
|
||||
m sync.Mutex
|
||||
mutexDelOld sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
last *Last
|
||||
}
|
||||
|
||||
@@ -444,10 +446,15 @@ func (k *KVDB) MakeCheckpoint() error {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
// delete old checkpoints
|
||||
if err := k.deleteOldCheckpoints(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
|
||||
k.wg.Add(1)
|
||||
go func() {
|
||||
delErr := k.DeleteOldCheckpoints()
|
||||
if delErr != nil {
|
||||
log.Errorw("delete old checkpoints failed", "err", delErr)
|
||||
}
|
||||
k.wg.Done()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -509,9 +516,12 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
|
||||
return checkpoints, nil
|
||||
}
|
||||
|
||||
// deleteOldCheckpoints deletes old checkpoints when there are more than
|
||||
// DeleteOldCheckpoints deletes old checkpoints when there are more than
|
||||
// `s.keep` checkpoints
|
||||
func (k *KVDB) deleteOldCheckpoints() error {
|
||||
func (k *KVDB) DeleteOldCheckpoints() error {
|
||||
k.mutexDelOld.Lock()
|
||||
defer k.mutexDelOld.Unlock()
|
||||
|
||||
list, err := k.ListCheckpoints()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
@@ -584,4 +594,6 @@ func (k *KVDB) Close() {
|
||||
if k.last != nil {
|
||||
k.last.close()
|
||||
}
|
||||
// wait for deletion of old checkpoints
|
||||
k.wg.Wait()
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
@@ -190,12 +191,67 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
||||
for i := 0; i < numCheckpoints; i++ {
|
||||
err = db.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
err = db.DeleteOldCheckpoints()
|
||||
require.NoError(t, err)
|
||||
checkpoints, err := db.ListCheckpoints()
|
||||
require.NoError(t, err)
|
||||
assert.LessOrEqual(t, len(checkpoints), keep)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentDeleteOldCheckpoints(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
db, err := NewKVDB(Config{Path: dir, Keep: keep})
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 32
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numCheckpoints)
|
||||
|
||||
// do checkpoints and check that we never have more than `keep`
|
||||
// checkpoints.
|
||||
// 1 async DeleteOldCheckpoint after 1 MakeCheckpoint
|
||||
for i := 0; i < numCheckpoints; i++ {
|
||||
err = db.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
err = db.DeleteOldCheckpoints()
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
checkpoints, err := db.ListCheckpoints()
|
||||
require.NoError(t, err)
|
||||
assert.LessOrEqual(t, len(checkpoints), keep)
|
||||
|
||||
wg.Add(numCheckpoints)
|
||||
|
||||
// do checkpoints and check that we never have more than `keep`
|
||||
// checkpoints
|
||||
// 32 concurrent DeleteOldCheckpoint after 32 MakeCheckpoint
|
||||
for i := 0; i < numCheckpoints; i++ {
|
||||
err = db.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < numCheckpoints; i++ {
|
||||
go func() {
|
||||
err = db.DeleteOldCheckpoints()
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
checkpoints, err = db.ListCheckpoints()
|
||||
require.NoError(t, err)
|
||||
assert.LessOrEqual(t, len(checkpoints), keep)
|
||||
}
|
||||
|
||||
func TestGetCurrentIdx(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -227,6 +227,12 @@ func (s *StateDB) MakeCheckpoint() error {
|
||||
return s.db.MakeCheckpoint()
|
||||
}
|
||||
|
||||
// DeleteOldCheckpoints deletes old checkpoints when there are more than
|
||||
// `cfg.keep` checkpoints
|
||||
func (s *StateDB) DeleteOldCheckpoints() error {
|
||||
return s.db.DeleteOldCheckpoints()
|
||||
}
|
||||
|
||||
// CurrentBatch returns the current in-memory CurrentBatch of the StateDB.db
|
||||
func (s *StateDB) CurrentBatch() common.BatchNum {
|
||||
return s.db.CurrentBatch
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
@@ -588,6 +589,48 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
||||
for i := 0; i < numCheckpoints; i++ {
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
err := sdb.DeleteOldCheckpoints()
|
||||
require.NoError(t, err)
|
||||
checkpoints, err := sdb.db.ListCheckpoints()
|
||||
require.NoError(t, err)
|
||||
assert.LessOrEqual(t, len(checkpoints), keep)
|
||||
}
|
||||
}
|
||||
|
||||
// TestConcurrentDeleteOldCheckpoints performs almost the same test than
|
||||
// kvdb/kvdb_test.go TestConcurrentDeleteOldCheckpoints, but over the StateDB
|
||||
func TestConcurrentDeleteOldCheckpoints(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 32
|
||||
// do checkpoints and check that we never have more than `keep`
|
||||
// checkpoints
|
||||
for i := 0; i < numCheckpoints; i++ {
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
n := 10
|
||||
wg.Add(n)
|
||||
for j := 0; j < n; j++ {
|
||||
go func() {
|
||||
err := sdb.DeleteOldCheckpoints()
|
||||
require.NoError(t, err)
|
||||
checkpoints, err := sdb.db.ListCheckpoints()
|
||||
require.NoError(t, err)
|
||||
assert.LessOrEqual(t, len(checkpoints), keep)
|
||||
wg.Done()
|
||||
}()
|
||||
_, err := sdb.db.ListCheckpoints()
|
||||
// only checking here for absence of errors, not the count of checkpoints
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wg.Wait()
|
||||
checkpoints, err := sdb.db.ListCheckpoints()
|
||||
require.NoError(t, err)
|
||||
assert.LessOrEqual(t, len(checkpoints), keep)
|
||||
|
||||
@@ -245,15 +245,15 @@ func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*c
|
||||
if number == -1 {
|
||||
blockNum = nil
|
||||
}
|
||||
block, err := c.client.BlockByNumber(ctx, blockNum)
|
||||
header, err := c.client.HeaderByNumber(ctx, blockNum)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
b := &common.Block{
|
||||
Num: block.Number().Int64(),
|
||||
Timestamp: time.Unix(int64(block.Time()), 0),
|
||||
ParentHash: block.ParentHash(),
|
||||
Hash: block.Hash(),
|
||||
Num: header.Number.Int64(),
|
||||
Timestamp: time.Unix(int64(header.Time), 0),
|
||||
ParentHash: header.ParentHash,
|
||||
Hash: header.Hash(),
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
26
node/node.go
26
node/node.go
@@ -1,18 +1,3 @@
|
||||
/*
|
||||
Package node does the initialization of all the required objects to either run
|
||||
as a synchronizer or as a coordinator.
|
||||
|
||||
The Node contains several goroutines that run in the background or that
|
||||
periodically perform tasks. One of this goroutines periodically calls the
|
||||
`Synchronizer.Sync` function, allowing the synchronization of one block at a
|
||||
time. After every call to `Synchronizer.Sync`, the Node sends a message to the
|
||||
Coordinator to notify it about the new synced block (and associated state) or
|
||||
reorg (and resetted state) in case one happens.
|
||||
|
||||
Other goroutines perform tasks such as: updating the token prices, update
|
||||
metrics stored in the historyDB, update recommended fee stored in the
|
||||
historyDB, run the http API server, run the debug http API server, etc.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
@@ -288,16 +273,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
stateAPIUpdater, err := stateapiupdater.NewUpdater(
|
||||
historyDB,
|
||||
&hdbNodeCfg,
|
||||
initSCVars,
|
||||
&hdbConsts,
|
||||
&cfg.RecommendedFeePolicy,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
|
||||
|
||||
var coord *coordinator.Coordinator
|
||||
if mode == ModeCoordinator {
|
||||
|
||||
@@ -1,35 +1,3 @@
|
||||
/*
|
||||
Package synchronizer synchronizes the hermez network state by querying events
|
||||
emitted by the three smart contracts: `Hermez.sol` (referred as Rollup here),
|
||||
`HermezAuctionProtocol.sol` (referred as Auction here) and
|
||||
`WithdrawalDelayer.sol` (referred as WDelayer here).
|
||||
|
||||
The main entry point for synchronization is the `Sync` function, which at most
|
||||
will synchronize one ethereum block, and all the hermez events that happened in
|
||||
that block. During a `Sync` call, a reorg can be detected; in such case, uncle
|
||||
blocks will be discarded, and only in a future `Sync` call correct blocks will
|
||||
be synced.
|
||||
|
||||
The synchronization of the events in each smart contracts are done
|
||||
in the methods `rollupSync`, `auctionSync` and `wdelayerSync`, which in turn
|
||||
use the interface code to read each smart contract state and events found in
|
||||
"github.com/hermeznetwork/hermez-node/eth". After these three methods are
|
||||
called, an object of type `common.BlockData` is built containing all the
|
||||
updates and events that happened in that block, and it is inserted in the
|
||||
HistoryDB in a single SQL transaction.
|
||||
|
||||
`rollupSync` is the method that synchronizes batches sent via the `forgeBatch`
|
||||
transaction in `Hermez.sol`. In `rollupSync`, for every batch, the accounts
|
||||
state is updated in the StateDB by processing all transactions that have been
|
||||
forged in that batch.
|
||||
|
||||
The consistency of the stored data is guaranteed by the HistoryDB: All the
|
||||
block information is inserted in a single SQL transaction at the end of the
|
||||
`Sync` method, once the StateDB has been updated. And every time the
|
||||
Synchronizer starts, it continues from the last block in the HistoryDB. The
|
||||
StateDB stores updates organized by checkpoints for every batch, and each batch
|
||||
is only accessed if it appears in the HistoryDB.
|
||||
*/
|
||||
package synchronizer
|
||||
|
||||
import (
|
||||
|
||||
Reference in New Issue
Block a user