Browse Source

Merge remote-tracking branch 'origin/feature/update-smart-contracts' into refactore/rollupUpdateBuckets

# Conflicts:
#	eth/contracts/hermez/Hermez.go
refactore/rollupUpdateBuckets
Mikelle 3 years ago
parent
commit
602e1e7634
20 changed files with 928 additions and 297 deletions
  1. +1
    -0
      .gitignore
  2. +135
    -0
      Makefile
  3. +62
    -21
      README.md
  4. +4
    -29
      apitypes/apitypes.go
  5. +23
    -18
      cli/node/README.md
  6. +31
    -8
      cli/node/main.go
  7. +10
    -4
      config/config.go
  8. +1
    -1
      coordinator/batch.go
  9. +8
    -4
      coordinator/coordinator.go
  10. +126
    -82
      coordinator/pipeline.go
  11. +2
    -2
      coordinator/pipeline_test.go
  12. +1
    -1
      coordinator/txmanager.go
  13. +3
    -3
      db/historydb/historydb.go
  14. +18
    -12
      db/migrations/0001.sql
  15. +16
    -4
      db/utils.go
  16. +43
    -0
      db/utils_test.go
  17. +13
    -7
      eth/contracts/auction/HermezAuctionProtocol.go
  18. +424
    -77
      eth/contracts/hermez/Hermez.go
  19. +6
    -24
      eth/contracts/withdrawdelayer/WithdrawalDelayer.go
  20. +1
    -0
      node/node.go

+ 1
- 0
.gitignore

@ -0,0 +1 @@
bin/

+ 135
- 0
Makefile

@ -0,0 +1,135 @@
#! /usr/bin/make -f
# Project variables.
PACKAGE := github.com/hermeznetwork/hermez-node
VERSION := $(shell git describe --tags --always)
BUILD := $(shell git rev-parse --short HEAD)
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
PROJECT_NAME := $(shell basename "$(PWD)")
# Go related variables.
GO_FILES ?= $$(find . -name '*.go' | grep -v vendor)
GOBASE := $(shell pwd)
GOBIN := $(GOBASE)/bin
GOPKG := $(.)
GOENVVARS := GOBIN=$(GOBIN)
GOCMD := $(GOBASE)/cli/node
GOPROOF := $(GOBASE)/test/proofserver/cli
GOBINARY := node
# Project configs.
MODE ?= sync
CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
POSTGRES_PASS ?= yourpasswordhere
# Use linker flags to provide version/build settings.
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
# PID file will keep the process id of the server.
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
# Make is verbose in Linux. Make it silent.
MAKEFLAGS += --silent
.PHONY: help
help: Makefile
@echo
@echo " Choose a command run in "$(PROJECT_NAME)":"
@echo
@sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /'
@echo
## test: Run the application check and all tests.
test: govet gocilint test-unit
## test-unit: Run all unit tests.
test-unit:
@echo " > Running unit tests"
$(GOENVVARS) go test -race -p 1 -failfast -timeout 300s -v ./...
## test-api-server: Run the API server using the Go tests.
test-api-server:
@echo " > Running unit tests"
$(GOENVVARS) FAKE_SERVER=yes go test -timeout 0 ./api -p 1 -count 1 -v
## gofmt: Run `go fmt` for all go files.
gofmt:
@echo " > Format all go files"
$(GOENVVARS) gofmt -w ${GO_FILES}
## govet: Run go vet.
govet:
@echo " > Running go vet"
$(GOENVVARS) go vet ./...
## golint: Run default golint.
golint:
@echo " > Running golint"
$(GOENVVARS) golint -set_exit_status ./...
## gocilint: Run Golang CI Lint.
gocilint:
@echo " > Running Golang CI Lint"
$-golangci-lint run --timeout=5m -E whitespace -E gosec -E gci -E misspell -E gomnd -E gofmt -E goimports -E golint --exclude-use-default=false --max-same-issues 0
## exec: Run given command. e.g; make exec run="go test ./..."
exec:
GOBIN=$(GOBIN) $(run)
## clean: Clean build files. Runs `go clean` internally.
clean:
@-rm $(GOBIN)/ 2> /dev/null
@echo " > Cleaning build cache"
$(GOENVVARS) go clean
## build: Build the project.
build: install
@echo " > Building Hermez binary..."
@bash -c "$(MAKE) migration-pack"
$(GOENVVARS) go build $(LDFLAGS) -o $(GOBIN)/$(GOBINARY) $(GOCMD)
@bash -c "$(MAKE) migration-clean"
## install: Install missing dependencies. Runs `go get` internally. e.g; make install get=github.com/foo/bar
install:
@echo " > Checking if there is any missing dependencies..."
$(GOENVVARS) go get $(GOCMD)/... $(get)
## run: Run Hermez node.
run:
@bash -c "$(MAKE) clean build"
@echo " > Running $(PROJECT_NAME)"
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
## run-proof-mock: Run proof server mock API.
run-proof-mock: stop-proof-mock
@echo " > Running Proof Server Mock"
$(GOENVVARS) go build -o $(GOBIN)/proof $(GOPROOF)
@$(GOBIN)/proof 2>&1 & echo $$! > $(PID_PROOF_MOCK)
@cat $(PID_PROOF_MOCK) | sed "/^/s/^/ \> Proof Server Mock PID: /"
## stop-proof-mock: Stop proof server mock API.
stop-proof-mock:
@-touch $(PID_PROOF_MOCK)
@-kill -s INT `cat $(PID_PROOF_MOCK)` 2> /dev/null || true
@-rm $(PID_PROOF_MOCK) $(GOBIN)/proof 2> /dev/null || true
## migration-pack: Pack the database migrations into the binary.
migration-pack:
@echo " > Packing the migrations..."
@cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
@cd $(GOBASE)/db && packr2 && cd -
## migration-clean: Clean the database migrations pack.
migration-clean:
@echo " > Cleaning the migrations..."
@cd $(GOBASE)/db && packr2 clean && cd -
## run-database-container: Run the Postgres container
run-database-container:
@echo " > Running the postgreSQL DB..."
@-docker run --rm --name hermez-db -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$(POSTGRES_PASS)" -d postgres
## stop-database-container: Stop the Postgres container
stop-database-container:
@echo " > Stopping the postgreSQL DB..."
@-docker stop hermez-db

+ 62
- 21
README.md

@ -8,42 +8,75 @@ Go implementation of the Hermez node.
The `hermez-node` has been tested with go version 1.14
### Build
Build the binary and check the current version:
```shell
$ make build
$ bin/node version
```
### Run
First you must edit the default/template config file into [cli/node/cfg.buidler.toml](cli/node/cfg.buidler.toml),
there are more information about the config file into [cli/node/README.md](cli/node/README.md)
After setting the config, you can build and run the Hermez Node as a synchronizer:
```shell
$ make run
```
Or build and run as a coordinator, and also passing the config file from other location:
```shell
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
```
To check the useful make commands:
```shell
$ make help
```
### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can
start PostgreSQL with docker easily this way (where `yourpasswordhere` should
run PostgreSQL with docker easily this way (where `yourpasswordhere` should
be your password):
```
POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
```shell
$ POSTGRES_PASS="yourpasswordhere" make run-database-container
```
Afterwards, run the tests with the password as env var:
Afterward, run the tests with the password as env var:
```
POSTGRES_PASS=yourpasswordhere go test -p 1 ./...
```shell
$ POSTGRES_PASS="yourpasswordhere" make test
```
NOTE: `-p 1` forces execution of package test in serial. Otherwise they may be
executed in paralel and the test may find unexpected entries in the SQL databse
NOTE: `-p 1` forces execution of package test in serial. Otherwise, they may be
executed in parallel, and the test may find unexpected entries in the SQL database
because it's shared among all tests.
There is an extra temporary option that allows you to run the API server using
the Go tests. This will be removed once the API can be properly initialized,
with data from the synchronizer and so on. To use this, run:
There is an extra temporary option that allows you to run the API server using the
Go tests. It will be removed once the API can be properly initialized with data
from the synchronizer. To use this, run:
```
FAKE_SERVER=yes POSTGRES_PASS=yourpasswordhere go test -timeout 0 ./api -p 1 -count 1 -v`
```shell
$ POSTGRES_PASS="yourpasswordhere" make test-api-server
```
### Lint
All Pull Requests need to pass the configured linter.
To run the linter locally, first install [golangci-lint](https://golangci-lint.run). Afterwards you can check the lints with this command:
To run the linter locally, first, install [golangci-lint](https://golangci-lint.run).
Afterward, you can check the lints with this command:
```
golangci-lint run --timeout=5m -E whitespace -E gosec -E gci -E misspell -E gomnd -E gofmt -E goimports -E golint --exclude-use-default=false --max-same-issues 0
```shell
$ make gocilint
```
## Usage
@ -54,13 +87,13 @@ See [cli/node/README.md](cli/node/README.md)
### Proof Server
The node in mode coordinator requires a proof server (a server that is capable
of calculating proofs from the zkInputs). For testing purposes there is a mock
proof server cli at `test/proofserver/cli`.
The node in mode coordinator requires a proof server (a server capable of
calculating proofs from the zkInputs). There is a mock proof server CLI
at `test/proofserver/cli` for testing purposes.
Usage of `test/proofserver/cli`:
```
```shell
USAGE:
go run ./test/proofserver/cli OPTIONS
@ -71,11 +104,19 @@ OPTIONS:
proving time duration (default 2s)
```
Also, the Makefile commands can be used to run and stop the proof server
in the background:
```shell
$ make run-proof-mock
$ make stop-proof-mock
```
### `/tmp` as tmpfs
For every processed batch, the node builds a temporary exit tree in a key-value
DB stored in `/tmp`. It is highly recommended that `/tmp` is mounted as a RAM
file system in production to avoid unecessary reads an writes to disk. This
file system in production to avoid unnecessary reads a writes to disk. This
can be done by mounting `/tmp` as tmpfs; for example, by having this line in
`/etc/fstab`:
```

+ 4
- 29
apitypes/apitypes.go

@ -18,7 +18,10 @@ import (
// BigIntStr is used to scan/value *big.Int directly into strings from/to sql DBs.
// It assumes that *big.Int are inserted/fetched to/from the DB using the BigIntMeddler meddler
// defined at github.com/hermeznetwork/hermez-node/db
// defined at github.com/hermeznetwork/hermez-node/db. Since *big.Int is
// stored as DECIMAL in SQL, there's no need to implement Scan()/Value()
// because DECIMALS are encoded/decoded as strings by the sql driver, and
// BigIntStr is already a string.
type BigIntStr string
// NewBigIntStr creates a *BigIntStr from a *big.Int.
@ -31,34 +34,6 @@ func NewBigIntStr(bigInt *big.Int) *BigIntStr {
return &bigIntStr
}
// Scan implements Scanner for database/sql
func (b *BigIntStr) Scan(src interface{}) error {
srcBytes, ok := src.([]byte)
if !ok {
return tracerr.Wrap(fmt.Errorf("can't scan %T into apitypes.BigIntStr", src))
}
// bytes to *big.Int
bigInt := new(big.Int).SetBytes(srcBytes)
// *big.Int to BigIntStr
bigIntStr := NewBigIntStr(bigInt)
if bigIntStr == nil {
return nil
}
*b = *bigIntStr
return nil
}
// Value implements valuer for database/sql
func (b BigIntStr) Value() (driver.Value, error) {
// string to *big.Int
bigInt, ok := new(big.Int).SetString(string(b), 10)
if !ok || bigInt == nil {
return nil, tracerr.Wrap(errors.New("invalid representation of a *big.Int"))
}
// *big.Int to bytes
return bigInt.Bytes(), nil
}
// StrBigInt is used to unmarshal BigIntStr directly into an alias of big.Int
type StrBigInt big.Int

+ 23
- 18
cli/node/README.md

@ -8,7 +8,7 @@ The `hermez-node` has been tested with go version 1.14
## Usage
```
```shell
NAME:
hermez-node - A new cli application
@ -16,18 +16,18 @@ USAGE:
node [global options] command [command options] [arguments...]
VERSION:
0.1.0-alpha
v0.1.0-6-gd8a50c5
COMMANDS:
version Show the application version
importkey Import ethereum private key
genbjj Generate a new BabyJubJub key
wipesql Wipe the SQL DB (HistoryDB and L2DB), leaving the DB in a clean state
wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state
run Run the hermez-node in the indicated mode
discard Discard blocks up to a specified block number
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--mode MODE Set node MODE (can be "sync" or "coord")
--cfg FILE Node configuration FILE
--help, -h show help (default: false)
--version, -v print the version (default: false)
```
@ -75,7 +75,7 @@ when running the coordinator in sync mode
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```
```shell
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
@ -83,7 +83,7 @@ Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```
```shell
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
@ -98,35 +98,40 @@ run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer:
```
./node --mode sync --cfg cfg.buidler.toml run
```shell
./node run --mode sync --cfg cfg.buidler.toml
```
Run the node in mode coordinator:
```
./node --mode coord --cfg cfg.buidler.toml run
```shell
./node run --mode coord --cfg cfg.buidler.toml
```
Import an ethereum private key into the keystore:
```
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
```shell
./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
```
Generate a new BabyJubJub key pair:
```shell
./node genbjj
```
./node --mode coord --cfg cfg.buidler.toml genbjj
Check the binary version:
```shell
./node version
```
Wipe the entier SQL database (this will destroy all synchronized and pool
data):
```
./node --mode coord --cfg cfg.buidler.toml wipesql
```shell
./node wipesql --mode coord --cfg cfg.buidler.toml
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
```shell
./node discard --mode coord --cfg cfg.buidler.toml --block 8061330
```

+ 31
- 8
cli/node/main.go

@ -34,6 +34,22 @@ const (
modeCoord = "coord"
)
var (
// Version represents the program based on the git tag
Version = "v0.1.0"
// Build represents the program based on the git commit
Build = "dev"
// Date represents the date of application was built
Date = ""
)
func cmdVersion(c *cli.Context) error {
fmt.Printf("Version = \"%v\"\n", Version)
fmt.Printf("Build = \"%v\"\n", Build)
fmt.Printf("Date = \"%v\"\n", Date)
return nil
}
func cmdGenBJJ(c *cli.Context) error {
sk := babyjub.NewRandPrivKey()
skBuf := [32]byte(sk)
@ -404,8 +420,8 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
func main() {
app := cli.NewApp()
app.Name = "hermez-node"
app.Version = "0.1.0-alpha"
app.Flags = []cli.Flag{
app.Version = Version
flags := []cli.Flag{
&cli.StringFlag{
Name: flagMode,
Usage: fmt.Sprintf("Set node `MODE` (can be \"%v\" or \"%v\")", modeSync, modeCoord),
@ -419,17 +435,23 @@ func main() {
}
app.Commands = []*cli.Command{
{
Name: "version",
Aliases: []string{},
Usage: "Show the application version and build",
Action: cmdVersion,
},
{
Name: "importkey",
Aliases: []string{},
Usage: "Import ethereum private key",
Action: cmdImportKey,
Flags: []cli.Flag{
Flags: append(flags,
&cli.StringFlag{
Name: flagSK,
Usage: "ethereum `PRIVATE_KEY` in hex",
Required: true,
}},
}),
},
{
Name: "genbjj",
@ -443,18 +465,19 @@ func main() {
Usage: "Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, " +
"leaving the DB in a clean state",
Action: cmdWipeSQL,
Flags: []cli.Flag{
Flags: append(flags,
&cli.BoolFlag{
Name: flagYes,
Usage: "automatic yes to the prompt",
Required: false,
}},
}),
},
{
Name: "run",
Aliases: []string{},
Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun,
Flags: flags,
},
{
Name: "serveapi",
@ -467,12 +490,12 @@ func main() {
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: []cli.Flag{
Flags: append(flags,
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}},
}),
},
}

+ 10
- 4
config/config.go

@ -112,11 +112,17 @@ type Coordinator struct {
// MustForgeAtSlotDeadline enables the coordinator to forge slots if
// the empty slots reach the slot deadline.
MustForgeAtSlotDeadline bool
// IgnoreSlotCommitment IgnoreSlotCommitment disables forcing the
// coordinator to forge a slot immediately when the slot is not
// committed. If set to false, the coordinator will immediately forge
// a batch at the beginning of a slot if it's the slot winner.
// IgnoreSlotCommitment disables forcing the coordinator to forge a
// slot immediately when the slot is not committed. If set to false,
// the coordinator will immediately forge a batch at the beginning of a
// slot if it's the slot winner.
IgnoreSlotCommitment bool
// ForgeOncePerSlotIfTxs will make the coordinator forge at most one
// batch per slot, only if there are included txs in that batch, or
// pending l1UserTxs in the smart contract. Setting this parameter
// overrides `ForgeDelay`, `ForgeNoTxsDelay`, `MustForgeAtSlotDeadline`
// and `IgnoreSlotCommitment`.
ForgeOncePerSlotIfTxs bool
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"`

+ 1
- 1
coordinator/batch.go

@ -85,7 +85,7 @@ type BatchInfo struct {
PublicInputs []*big.Int
L1Batch bool
VerifierIdx uint8
L1UserTxsExtra []common.L1Tx
L1UserTxs []common.L1Tx
L1CoordTxs []common.L1Tx
L1CoordinatorTxsAuths [][]byte
L2Txs []common.L2Tx

+ 8
- 4
coordinator/coordinator.go

@ -24,10 +24,8 @@ import (
)
var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf(
"no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errSkipBatchByPolicy = fmt.Errorf("skip batch by policy")
)
const (
@ -92,6 +90,12 @@ type Config struct {
// the coordinator will immediately forge a batch at the beginning of
// a slot if it's the slot winner.
IgnoreSlotCommitment bool
// ForgeOncePerSlotIfTxs will make the coordinator forge at most one
// batch per slot, only if there are included txs in that batch, or
// pending l1UserTxs in the smart contract. Setting this parameter
// overrides `ForgeDelay`, `ForgeNoTxsDelay`, `MustForgeAtSlotDeadline`
// and `IgnoreSlotCommitment`.
ForgeOncePerSlotIfTxs bool
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval time.Duration

+ 126
- 82
coordinator/pipeline.go

@ -224,8 +224,9 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// 2. Forge the batch internally (make a selection of txs and prepare
// all the smart contract arguments)
var skipReason *string
p.mutexL2DBUpdateDelete.Lock()
batchInfo, err = p.forgeBatch(batchNum)
batchInfo, skipReason, err = p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil {
return nil, ctx.Err()
@ -234,13 +235,13 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else {
log.Errorw("forgeBatch", "err", err)
}
return nil, tracerr.Wrap(err)
} else if skipReason != nil {
log.Debugw("skipping batch", "batch", batchNum, "reason", *skipReason)
return nil, tracerr.Wrap(errSkipBatchByPolicy)
}
// 3. Send the ZKInputs to the proof server
@ -295,8 +296,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
if p.ctx.Err() != nil {
continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
tracerr.Unwrap(err) == errSkipBatchByPolicy {
continue
} else if err != nil {
p.setErrAtBatchNum(batchNum)
@ -389,25 +389,109 @@ func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) er
return nil
}
// check if we reach the ForgeDelay or not before batch forging
func (p *Pipeline) preForgeBatchCheck(slotCommitted bool, now time.Time) error {
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return errForgeBeforeDelay
// slotCommitted returns true if the current slot has already been committed
func (p *Pipeline) slotCommitted() bool {
// Synchronizer has synchronized a batch in the current slot (setting
// CurrentSlot.ForgerCommitment) or the pipeline has already
// internally-forged a batch in the current slot
return p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged
}
// forgePolicySkipPreSelection is called before doing a tx selection in a batch to
// determine by policy if we should forge the batch or not. Returns true and
// the reason when the forging of the batch must be skipped.
func (p *Pipeline) forgePolicySkipPreSelection(now time.Time) (bool, string) {
// Check if the slot is not yet fulfilled
slotCommitted := p.slotCommitted()
if p.cfg.ForgeOncePerSlotIfTxs {
if slotCommitted {
return true, "cfg.ForgeOncePerSlotIfTxs = true and slot already committed"
}
return false, ""
}
return nil
// Determine if we must commit the slot
if !p.cfg.IgnoreSlotCommitment && !slotCommitted {
return false, ""
}
// If we haven't reached the ForgeDelay, skip forging the batch
if now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return true, "we haven't reached the forge delay"
}
return false, ""
}
// forgePolicySkipPostSelection is called after doing a tx selection in a batch to
// determine by policy if we should forge the batch or not. Returns true and
// the reason when the forging of the batch must be skipped.
func (p *Pipeline) forgePolicySkipPostSelection(now time.Time, l1UserTxsExtra, l1CoordTxs []common.L1Tx,
poolL2Txs []common.PoolL2Tx, batchInfo *BatchInfo) (bool, string, error) {
// Check if the slot is not yet fulfilled
slotCommitted := p.slotCommitted()
pendingTxs := true
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the number of unforged L1UserTxs
// (either in a open queue or in a frozen
// not-yet-forged queue).
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
if err != nil {
return false, "", err
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues to be able to
// forge the L1UserTxs in the future.
// Otherwise, skip.
if count == 0 {
pendingTxs = false
}
} else {
pendingTxs = false
}
}
if p.cfg.ForgeOncePerSlotIfTxs {
if slotCommitted {
return true, "cfg.ForgeOncePerSlotIfTxs = true and slot already committed",
nil
}
if pendingTxs {
return false, "", nil
}
return true, "cfg.ForgeOncePerSlotIfTxs = true and no pending txs",
nil
}
// Determine if we must commit the slot
if !p.cfg.IgnoreSlotCommitment && !slotCommitted {
return false, "", nil
}
// check if there is no txs to forge, no l1UserTxs in the open queue to
// freeze and we haven't reached the ForgeNoTxsDelay
if now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
if !pendingTxs {
return true, "no txs to forge and we haven't reached the forge no txs delay",
nil
}
}
return false, "", nil
}
// forgeBatch forges the batchNum batch.
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
skipReason *string, err error) {
// remove transactions from the pool that have been there for too long
_, err = p.purger.InvalidateMaybe(p.l2DB, p.txSelector.LocalAccountsDB(),
p.stats.Sync.LastBlock.Num, int64(batchNum))
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
_, err = p.purger.PurgeMaybe(p.l2DB, p.stats.Sync.LastBlock.Num, int64(batchNum))
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
// Structure to accumulate data and metadata of the batch
now := time.Now()
@ -417,53 +501,48 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var poolL2Txs []common.PoolL2Tx
var discardedL2Txs []common.PoolL2Tx
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
var l1UserTxs, l1CoordTxs []common.L1Tx
var auths [][]byte
var coordIdxs []common.Idx
// Check if the slot is not yet fulfilled
slotCommitted := p.cfg.IgnoreSlotCommitment
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if err = p.preForgeBatchCheck(slotCommitted, now); err != nil {
return nil, tracerr.Wrap(err)
if skip, reason := p.forgePolicySkipPreSelection(now); skip {
return nil, &reason, nil
}
// 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
return nil, nil, tracerr.Wrap(errLastL1BatchNotSynced)
}
// 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
_l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, l1UserTxs)
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
} else {
// 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
l1UserTxsExtra = nil
l1UserTxs = nil
}
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if err = p.postForgeBatchCheck(slotCommitted, now, l1UserTxsExtra, l1CoordTxs, poolL2Txs, batchInfo); err != nil {
return nil, tracerr.Wrap(err)
if skip, reason, err := p.forgePolicySkipPostSelection(now,
l1UserTxs, l1CoordTxs, poolL2Txs, batchInfo); err != nil {
return nil, nil, tracerr.Wrap(err)
} else if skip {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, nil, tracerr.Wrap(err)
}
return nil, &reason, tracerr.Wrap(err)
}
if batchInfo.L1Batch {
@ -472,7 +551,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
}
// 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1UserTxs = l1UserTxs
batchInfo.L1CoordTxs = l1CoordTxs
batchInfo.L1CoordinatorTxsAuths = auths
batchInfo.CoordIdxs = coordIdxs
@ -480,10 +559,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs),
batchInfo.BatchNum); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
// Invalidate transactions that become invalid because of
@ -492,21 +571,21 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
// all the nonces smaller than the current one)
err = p.l2DB.InvalidateOldNonces(idxsNonceFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
// 4. Call BatchBuilder with TxSelector output
configBatch := &batchbuilder.ConfigBatch{
TxProcessorConfig: p.cfg.TxProcessorConfig,
}
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxsExtra,
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxs,
l1CoordTxs, poolL2Txs)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
batchInfo.L2Txs = l2Txs
@ -518,42 +597,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil
}
// check if there is no txs to forge, no l1UserTxs in the open queue to freeze and we haven't reached the ForgeNoTxsDelay
func (p *Pipeline) postForgeBatchCheck(slotCommitted bool, now time.Time, l1UserTxsExtra, l1CoordTxs []common.L1Tx,
poolL2Txs []common.PoolL2Tx, batchInfo *BatchInfo) error {
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the number of unforged L1UserTxs
// (either in a open queue or in a frozen
// not-yet-forged queue).
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
if err != nil {
return err
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues to be able to
// forge the L1UserTxs in the future.
// Otherwise, skip.
if count == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return err
}
return errForgeNoTxsBeforeDelay
}
}
return nil
return batchInfo, nil, nil
}
// waitServerProof gets the generated zkProof & sends it to the SmartContract
@ -598,7 +642,7 @@ func prepareForgeBatchArgs(batchInfo *BatchInfo) *eth.RollupForgeBatchArgs {
NewLastIdx: int64(zki.Metadata.NewLastIdxRaw),
NewStRoot: zki.Metadata.NewStateRootRaw.BigInt(),
NewExitRoot: zki.Metadata.NewExitRootRaw.BigInt(),
L1UserTxs: batchInfo.L1UserTxsExtra,
L1UserTxs: batchInfo.L1UserTxs,
L1CoordinatorTxs: batchInfo.L1CoordTxs,
L1CoordinatorTxsAuths: batchInfo.L1CoordinatorTxsAuths,
L2TxsData: batchInfo.L2Txs,

+ 2
- 2
coordinator/pipeline_test.go

@ -224,12 +224,12 @@ PoolTransfer(0) User2-User3: 300 (126)
batchNum++
batchInfo, err := pipeline.forgeBatch(batchNum)
batchInfo, _, err := pipeline.forgeBatch(batchNum)
require.NoError(t, err)
assert.Equal(t, 3, len(batchInfo.L2Txs))
batchNum++
batchInfo, err = pipeline.forgeBatch(batchNum)
batchInfo, _, err = pipeline.forgeBatch(batchNum)
require.NoError(t, err)
assert.Equal(t, 0, len(batchInfo.L2Txs))
}

+ 1
- 1
coordinator/txmanager.go

@ -147,7 +147,7 @@ func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.Tr
auth.Value = big.NewInt(0) // in wei
gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1UserTxs))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
auth.GasLimit = gasLimit

+ 3
- 3
db/historydb/historydb.go

@ -693,11 +693,11 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
var txs []*common.L1Tx
err := meddler.QueryAll(
hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
hdb.dbRead, &txs,
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
tx.deposit_amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.deposit_amount_success THEN tx.deposit_amount ELSE '\x' END) AS effective_deposit_amount,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE 0 END) AS effective_amount,
tx.deposit_amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.deposit_amount_success THEN tx.deposit_amount ELSE 0 END) AS effective_deposit_amount,
tx.eth_block_num, tx.type, tx.batch_num
FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE ORDER BY item_id;`,
)

+ 18
- 12
db/migrations/0001.sql

@ -1,5 +1,11 @@
-- +migrate Up
-- NOTE: We use "DECIMAL(78,0)" to encode go *big.Int types. All the *big.Int
-- that we deal with represent a value in the SNARK field, which is an integer
-- of 256 bits. `log(2**256, 10) = 77.06`: that is, a 256 bit number can have
-- at most 78 digits, so we use this value to specify the precision in the
-- PostgreSQL DECIMAL guaranteeing that we will never lose precision.
-- History
CREATE TABLE block (
eth_block_num BIGINT PRIMARY KEY,
@ -22,10 +28,10 @@ CREATE TABLE batch (
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
fees_collected BYTEA NOT NULL,
fee_idxs_coordinator BYTEA NOT NULL,
state_root BYTEA NOT NULL,
state_root DECIMAL(78,0) NOT NULL,
num_accounts BIGINT NOT NULL,
last_idx BIGINT NOT NULL,
exit_root BYTEA NOT NULL,
exit_root DECIMAL(78,0) NOT NULL,
forge_l1_txs_num BIGINT,
slot_num BIGINT NOT NULL,
total_fees_usd NUMERIC
@ -34,7 +40,7 @@ CREATE TABLE batch (
CREATE TABLE bid (
item_id SERIAL PRIMARY KEY,
slot_num BIGINT NOT NULL,
bid_value BYTEA NOT NULL,
bid_value DECIMAL(78,0) NOT NULL,
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
bidder_addr BYTEA NOT NULL -- fake foreign key for coordinator
);
@ -106,7 +112,7 @@ CREATE TABLE account_update (
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
nonce BIGINT NOT NULL,
balance BYTEA NOT NULL
balance DECIMAL(78,0) NOT NULL
);
CREATE TABLE exit_tree (
@ -114,7 +120,7 @@ CREATE TABLE exit_tree (
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
account_idx BIGINT REFERENCES account (idx) ON DELETE CASCADE,
merkle_proof BYTEA NOT NULL,
balance BYTEA NOT NULL,
balance DECIMAL(78,0) NOT NULL,
instant_withdrawn BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
delayed_withdraw_request BIGINT REFERENCES block (eth_block_num) ON DELETE SET NULL,
owner BYTEA,
@ -164,7 +170,7 @@ CREATE TABLE tx (
to_idx BIGINT NOT NULL,
to_eth_addr BYTEA,
to_bjj BYTEA,
amount BYTEA NOT NULL,
amount DECIMAL(78,0) NOT NULL,
amount_success BOOLEAN NOT NULL DEFAULT true,
amount_f NUMERIC NOT NULL,
token_id INT NOT NULL REFERENCES token (token_id),
@ -174,7 +180,7 @@ CREATE TABLE tx (
-- L1
to_forge_l1_txs_num BIGINT,
user_origin BOOLEAN,
deposit_amount BYTEA,
deposit_amount DECIMAL(78,0),
deposit_amount_success BOOLEAN NOT NULL DEFAULT true,
deposit_amount_f NUMERIC,
deposit_amount_usd NUMERIC,
@ -544,7 +550,7 @@ FOR EACH ROW EXECUTE PROCEDURE forge_l1_user_txs();
CREATE TABLE rollup_vars (
eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE,
fee_add_token BYTEA NOT NULL,
fee_add_token DECIMAL(78,0) NOT NULL,
forge_l1_timeout BIGINT NOT NULL,
withdrawal_delay BIGINT NOT NULL,
buckets BYTEA NOT NULL,
@ -556,7 +562,7 @@ CREATE TABLE bucket_update (
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
num_bucket BIGINT NOT NULL,
block_stamp BIGINT NOT NULL,
withdrawals BYTEA NOT NULL
withdrawals DECIMAL(78,0) NOT NULL
);
CREATE TABLE token_exchange (
@ -572,7 +578,7 @@ CREATE TABLE escape_hatch_withdrawal (
who_addr BYTEA NOT NULL,
to_addr BYTEA NOT NULL,
token_addr BYTEA NOT NULL,
amount BYTEA NOT NULL
amount DECIMAL(78,0) NOT NULL
);
CREATE TABLE auction_vars (
@ -610,7 +616,7 @@ CREATE TABLE tx_pool (
effective_to_eth_addr BYTEA,
effective_to_bjj BYTEA,
token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE,
amount BYTEA NOT NULL,
amount DECIMAL(78,0) NOT NULL,
amount_f NUMERIC NOT NULL,
fee SMALLINT NOT NULL,
nonce BIGINT NOT NULL,
@ -624,7 +630,7 @@ CREATE TABLE tx_pool (
rq_to_eth_addr BYTEA,
rq_to_bjj BYTEA,
rq_token_id INT,
rq_amount BYTEA,
rq_amount DECIMAL(78,0),
rq_fee SMALLINT,
rq_nonce BIGINT,
tx_type VARCHAR(40) NOT NULL,

+ 16
- 4
db/utils.go

@ -13,6 +13,9 @@ import (
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx"
//nolint:errcheck // driver for postgres DB
_ "github.com/lib/pq"
migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
"golang.org/x/sync/semaphore"
@ -165,7 +168,11 @@ func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
}
field := fieldPtr.(**big.Int)
*field = new(big.Int).SetBytes([]byte(*ptr))
var ok bool
*field, ok = new(big.Int).SetString(*ptr, 10)
if !ok {
return tracerr.Wrap(fmt.Errorf("big.Int.SetString failed on \"%v\"", *ptr))
}
return nil
}
@ -173,7 +180,7 @@ func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) {
field := fieldPtr.(*big.Int)
return field.Bytes(), nil
return field.String(), nil
}
// BigIntNullMeddler encodes or decodes the field value to or from JSON
@ -198,7 +205,12 @@ func (b BigIntNullMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
if ptr == nil {
return tracerr.Wrap(fmt.Errorf("BigIntMeddler.PostRead: nil pointer"))
}
*field = new(big.Int).SetBytes(ptr)
var ok bool
*field, ok = new(big.Int).SetString(string(ptr), 10)
if !ok {
return tracerr.Wrap(fmt.Errorf("big.Int.SetString failed on \"%v\"", string(ptr)))
}
return nil
}
@ -208,7 +220,7 @@ func (b BigIntNullMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}
if field == nil {
return nil, nil
}
return field.Bytes(), nil
return field.String(), nil
}
// SliceToSlicePtrs converts any []Foo to []*Foo

+ 43
- 0
db/utils_test.go

@ -1,9 +1,13 @@
package db
import (
"math/big"
"os"
"testing"
"github.com/russross/meddler"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type foo struct {
@ -33,3 +37,42 @@ func TestSlicePtrsToSlice(t *testing.T) {
assert.Equal(t, *a[i], b[i])
}
}
func TestBigInt(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
db, err := InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
defer func() {
_, err := db.Exec("DROP TABLE IF EXISTS test_big_int;")
require.NoError(t, err)
err = db.Close()
require.NoError(t, err)
}()
_, err = db.Exec("DROP TABLE IF EXISTS test_big_int;")
require.NoError(t, err)
_, err = db.Exec(`CREATE TABLE test_big_int (
item_id SERIAL PRIMARY KEY,
value1 DECIMAL(78, 0) NOT NULL,
value2 DECIMAL(78, 0),
value3 DECIMAL(78, 0)
);`)
require.NoError(t, err)
type Entry struct {
ItemID int `meddler:"item_id"`
Value1 *big.Int `meddler:"value1,bigint"`
Value2 *big.Int `meddler:"value2,bigintnull"`
Value3 *big.Int `meddler:"value3,bigintnull"`
}
entry := Entry{ItemID: 1, Value1: big.NewInt(1234567890), Value2: big.NewInt(9876543210), Value3: nil}
err = meddler.Insert(db, "test_big_int", &entry)
require.NoError(t, err)
var dbEntry Entry
err = meddler.QueryRow(db, &dbEntry, "SELECT * FROM test_big_int WHERE item_id = 1;")
require.NoError(t, err)
assert.Equal(t, entry, dbEntry)
}

+ 13
- 7
eth/contracts/auction/HermezAuctionProtocol.go

@ -309,9 +309,12 @@ func (_HermezAuctionProtocol *HermezAuctionProtocolCaller) Coordinators(opts *bi
Forger common.Address
CoordinatorURL string
})
if err != nil {
return *outstruct, err
}
outstruct.Forger = out[0].(common.Address)
outstruct.CoordinatorURL = out[1].(string)
outstruct.Forger = *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
outstruct.CoordinatorURL = *abi.ConvertType(out[1], new(string)).(*string)
return *outstruct, err
@ -884,12 +887,15 @@ func (_HermezAuctionProtocol *HermezAuctionProtocolCaller) Slots(opts *bind.Call
BidAmount *big.Int
ClosedMinBid *big.Int
})
if err != nil {
return *outstruct, err
}
outstruct.Bidder = out[0].(common.Address)
outstruct.Fulfilled = out[1].(bool)
outstruct.ForgerCommitment = out[2].(bool)
outstruct.BidAmount = out[3].(*big.Int)
outstruct.ClosedMinBid = out[4].(*big.Int)
outstruct.Bidder = *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool)
outstruct.ForgerCommitment = *abi.ConvertType(out[2], new(bool)).(*bool)
outstruct.BidAmount = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int)
outstruct.ClosedMinBid = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int)
return *outstruct, err

+ 424
- 77
eth/contracts/hermez/Hermez.go
File diff suppressed because it is too large
View File


+ 6
- 24
eth/contracts/withdrawdelayer/WithdrawalDelayer.go
File diff suppressed because it is too large
View File


+ 1
- 0
node/node.go

@ -367,6 +367,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
MustForgeAtSlotDeadline: cfg.Coordinator.MustForgeAtSlotDeadline,
IgnoreSlotCommitment: cfg.Coordinator.IgnoreSlotCommitment,
ForgeOncePerSlotIfTxs: cfg.Coordinator.ForgeOncePerSlotIfTxs,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,

Loading…
Cancel
Save