Browse Source

Implement first iteration of node

The coordinator implementation has been refactored to allow all the goroutines
to be handled from the node.
feature/sql-semaphore1
Eduard S 3 years ago
parent
commit
475614cc3a
15 changed files with 758 additions and 159 deletions
  1. +0
    -1
      .gitignore
  2. +1
    -0
      cli/node/.gitignore
  3. +29
    -0
      cli/node/cfg.example.toml
  4. +2
    -0
      cli/node/coordcfg.example.toml
  5. +156
    -0
      cli/node/main.go
  6. +1
    -1
      common/accountcreationauths.go
  7. +101
    -0
      config/config.go
  8. +1
    -1
      coordinator/batch.go
  9. +96
    -127
      coordinator/coordinator.go
  10. +85
    -9
      coordinator/coordinator_test.go
  11. +11
    -2
      coordinator/proofpool.go
  12. +10
    -9
      eth/rollup.go
  13. +4
    -0
      go.mod
  14. +14
    -0
      go.sum
  15. +247
    -9
      node/node.go

+ 0
- 1
.gitignore

@ -1 +0,0 @@
gotest.sh

+ 1
- 0
cli/node/.gitignore

@ -0,0 +1 @@
cfg.example.secret.toml

+ 29
- 0
cli/node/cfg.example.toml

@ -0,0 +1,29 @@
[StateDB]
Path = "/tmp/iden3-test/hermez/statedb"
[PostgreSQL]
Port = 5432
Host = "localhost"
User = "hermez"
Password = "yourpasswordhere"
[L2DB]
Name = "l2"
SafetyPeriod = 10
MaxTxs = 512
TTL = "24h"
[HistoryDB]
Name = "history"
[Web3]
URL = "XXX"
[TxSelector]
Path = "/tmp/iden3-test/hermez/txselector"
[BatchBuilder]
Path = "/tmp/iden3-test/hermez/batchbuilder"
[Synchronizer]
SyncLoopInterval = "1s"

+ 2
- 0
cli/node/coordcfg.example.toml

@ -0,0 +1,2 @@
ForgerAddress = "0x6BB84Cc84D4A34467aD12a2039A312f7029e2071"
ForgerLoopInterval = "500ms"

+ 156
- 0
cli/node/main.go

@ -0,0 +1,156 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/hermeznetwork/hermez-node/config"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node"
"github.com/urfave/cli/v2"
)
const (
flagCfg = "cfg"
flagCoordCfg = "coordcfg"
flagMode = "mode"
modeSync = "sync"
modeCoord = "coord"
)
func cmdInit(c *cli.Context) error {
log.Info("Init")
cfg, err := parseCli(c)
if err != nil {
return err
}
fmt.Println("TODO", cfg)
return err
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return fmt.Errorf("error parsing flags and config: %w", err)
}
node, err := node.NewNode(cfg.mode, cfg.node, cfg.coord)
if err != nil {
return fmt.Errorf("error starting node: %w", err)
}
node.Start()
stopCh := make(chan interface{})
// catch ^C to send the stop signal
ossig := make(chan os.Signal, 1)
signal.Notify(ossig, os.Interrupt)
go func() {
for sig := range ossig {
if sig == os.Interrupt {
stopCh <- nil
}
}
}()
<-stopCh
node.Stop()
return nil
}
// Config is the configuration of the hermez node execution
type Config struct {
mode node.Mode
node *config.Node
coord *config.Coordinator
}
func parseCli(c *cli.Context) (*Config, error) {
cfg, err := getConfig(c)
if err != nil {
if err := cli.ShowAppHelp(c); err != nil {
panic(err)
}
return nil, err
}
return cfg, nil
}
func getConfig(c *cli.Context) (*Config, error) {
var cfg Config
mode := c.String(flagMode)
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
case modeCoord:
cfg.mode = node.ModeCoordinator
default:
return nil, fmt.Errorf("invalid mode \"%v\"", mode)
}
if cfg.mode == node.ModeCoordinator {
coordCfgPath := c.String(flagCoordCfg)
if coordCfgPath == "" {
return nil, fmt.Errorf("required flag \"%v\" not set", flagCoordCfg)
}
coordCfg, err := config.LoadCoordinator(coordCfgPath)
if err != nil {
return nil, err
}
cfg.coord = coordCfg
}
nodeCfgPath := c.String(flagCfg)
if nodeCfgPath == "" {
return nil, fmt.Errorf("required flag \"%v\" not set", flagCfg)
}
nodeCfg, err := config.LoadNode(nodeCfgPath)
if err != nil {
return nil, err
}
cfg.node = nodeCfg
return &cfg, nil
}
func main() {
app := cli.NewApp()
app.Name = "hermez-node"
app.Version = "0.1.0-alpha"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: flagMode,
Usage: fmt.Sprintf("Set node `MODE` (can be \"%v\" or \"%v\")", modeSync, modeCoord),
Required: true,
},
&cli.StringFlag{
Name: flagCfg,
Usage: "Node configuration `FILE`",
Required: true,
},
&cli.StringFlag{
Name: flagCoordCfg,
Usage: "Coordinator configuration `FILE`",
},
}
app.Commands = []*cli.Command{
{
Name: "init",
Aliases: []string{},
Usage: "Initialize the hermez-node",
Action: cmdInit,
},
{
Name: "run",
Aliases: []string{},
Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun,
},
}
err := app.Run(os.Args)
if err != nil {
fmt.Printf("\nError: %v\n", err)
os.Exit(1)
}
}

+ 1
- 1
common/accountcreationauths.go

@ -12,5 +12,5 @@ type AccountCreationAuth struct {
Timestamp time.Time
EthAddr ethCommon.Address
BJJ babyjub.PublicKey
Signature babyjub.Signature
Signature []byte
}

+ 101
- 0
config/config.go

@ -0,0 +1,101 @@
package config
import (
"fmt"
"io/ioutil"
"time"
"github.com/BurntSushi/toml"
ethCommon "github.com/ethereum/go-ethereum/common"
"gopkg.in/go-playground/validator.v9"
)
// Duration is a wrapper type that parses time duration from text.
type Duration struct {
time.Duration
}
// UnmarshalText unmarshalls time duration from text.
func (d *Duration) UnmarshalText(data []byte) error {
duration, err := time.ParseDuration(string(data))
if err != nil {
return err
}
d.Duration = duration
return nil
}
// Coordinator is the coordinator specific configuration.
type Coordinator struct {
ForgerAddress ethCommon.Address `validate:"required"`
ForgeLoopInterval Duration `validate:"required"`
}
// Node is the hermez node configuration.
type Node struct {
StateDB struct {
Path string
} `validate:"required"`
PostgreSQL struct {
Port int `validate:"required"`
Host string `validate:"required"`
User string `validate:"required"`
Password string `validate:"required"`
} `validate:"required"`
L2DB struct {
Name string `validate:"required"`
SafetyPeriod uint16 `validate:"required"`
MaxTxs uint32 `validate:"required"`
TTL Duration `validate:"required"`
} `validate:"required"`
HistoryDB struct {
Name string `validate:"required"`
} `validate:"required"`
Web3 struct {
URL string `validate:"required"`
} `validate:"required"`
TxSelector struct {
Path string `validate:"required"`
} `validate:"required"`
BatchBuilder struct {
Path string `validate:"required"`
} `validate:"required"`
Synchronizer struct {
SyncLoopInterval Duration `validate:"required"`
} `validate:"required"`
}
// Load loads a generic config.
func Load(path string, cfg interface{}) error {
bs, err := ioutil.ReadFile(path) //nolint:gosec
if err != nil {
return err
}
cfgToml := string(bs)
if _, err := toml.Decode(cfgToml, cfg); err != nil {
return err
}
validate := validator.New()
if err := validate.Struct(cfg); err != nil {
return fmt.Errorf("error validating configuration file: %w", err)
}
return nil
}
// LoadCoordinator loads the Coordinator configuration from path.
func LoadCoordinator(path string) (*Coordinator, error) {
var cfg Coordinator
if err := Load(path, &cfg); err != nil {
return nil, fmt.Errorf("error loading coordinator configuration file: %w", err)
}
return &cfg, nil
}
// LoadNode loads the Node configuration from path.
func LoadNode(path string) (*Node, error) {
var cfg Node
if err := Load(path, &cfg); err != nil {
return nil, fmt.Errorf("error loading node configuration file: %w", err)
}
return &cfg, nil
}

+ 1
- 1
coordinator/batch.go

@ -4,7 +4,7 @@ import (
"github.com/hermeznetwork/hermez-node/common"
)
// Proof TBD this type will be got from the proof server
// Proof TBD this type will be received from the proof server
type Proof struct {
}

+ 96
- 127
coordinator/coordinator.go

@ -1,7 +1,7 @@
package coordinator
import (
"time"
"fmt"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/batchbuilder"
@ -14,25 +14,23 @@ import (
"github.com/iden3/go-merkletree/db/memory"
)
// ErrStop is returned when the function is stopped asynchronously via the stop
// channel. It doesn't indicate an error.
var ErrStop = fmt.Errorf("Stopped")
// Config contains the Coordinator configuration
type Config struct {
ForgerAddress ethCommon.Address
LoopInterval time.Duration
}
// Coordinator implements the Coordinator type
type Coordinator struct {
// m sync.Mutex
stopch chan bool
stopforgerch chan bool
forging bool
isForgeSeq bool // WIP just for testing while implementing
config Config
batchNum common.BatchNum
batchQueue *BatchQueue
serverProofPool ServerProofPool
// synchronizer *synchronizer.Synchronizer
@ -61,123 +59,102 @@ func NewCoordinator(conf Config,
return &c
}
// Stop stops the Coordinator
func (c *Coordinator) Stop() {
log.Info("Stopping Coordinator")
c.stopch <- true
}
// Start starts the Coordinator service
func (c *Coordinator) Start() {
c.stopch = make(chan bool) // initialize channel
go func() {
log.Info("Starting Coordinator")
for {
select {
case <-c.stopch:
close(c.stopforgerch)
log.Info("Coordinator stopped")
return
case <-time.After(c.config.LoopInterval):
if !c.isForgeSequence() {
if c.forging {
log.Info("forging stopped")
c.forging = false
close(c.stopforgerch)
}
log.Debug("not in forge time")
continue
}
if !c.forging {
log.Info("Start Forging")
// c.batchNum = c.hdb.GetLastBatchNum() // uncomment when HistoryDB is ready
err := c.txsel.Reset(c.batchNum)
if err != nil {
log.Error("forging err: ", err)
}
err = c.batchBuilder.Reset(c.batchNum, true)
if err != nil {
log.Error("forging err: ", err)
}
c.batchQueue = NewBatchQueue()
c.forgerLoop()
c.forging = true
}
}
}
}()
}
// forgerLoop trigers goroutines for:
// - forgeSequence
// - proveSequence
// - forgeConfirmationSequence
func (c *Coordinator) forgerLoop() {
c.stopforgerch = make(chan bool) // initialize channel
go func() {
log.Info("forgeSequence started")
for {
select {
case <-c.stopforgerch:
log.Info("forgeSequence stopped")
return
case <-time.After(c.config.LoopInterval):
if err := c.forgeSequence(); err != nil {
log.Error("forgeSequence err: ", err)
}
}
// ForgeLoopFn is the function ran in a loop that checks if it's time to forge
// and forges a batch if so and sends it to outBatchCh. Returns true if it's
// the coordinator turn to forge.
func (c *Coordinator) ForgeLoopFn(outBatchCh chan *BatchInfo, stopCh chan bool) (bool, error) {
if !c.isForgeSequence() {
if c.forging {
log.Info("stop forging")
c.forging = false
}
}()
go func() {
log.Info("proveSequence started")
for {
select {
case <-c.stopforgerch:
log.Info("proveSequence stopped")
return
case <-time.After(c.config.LoopInterval):
if err := c.proveSequence(); err != nil && err != common.ErrBatchQueueEmpty {
log.Error("proveSequence err: ", err)
}
}
log.Debug("not in forge time")
return false, nil
}
log.Debug("forge time")
if !c.forging {
log.Info("start forging")
// c.batchNum = c.hdb.GetLastBatchNum() // uncomment when HistoryDB is ready
err := c.txsel.Reset(c.batchNum)
if err != nil {
log.Errorw("TxSelector.Reset", "error", err)
return true, err
}
}()
go func() {
log.Info("forgeConfirmationSequence started")
for {
select {
case <-c.stopforgerch:
log.Info("forgeConfirmationSequence stopped")
return
case <-time.After(c.config.LoopInterval):
if err := c.forgeConfirmationSequence(); err != nil {
log.Error("forgeConfirmationSequence err: ", err)
}
}
err = c.batchBuilder.Reset(c.batchNum, true)
if err != nil {
log.Errorw("BatchBuilder.Reset", "error", err)
return true, err
}
}()
}
// forgeSequence
func (c *Coordinator) forgeSequence() error {
// c.batchQueue = NewBatchQueue()
c.forging = true
}
// TODO once synchronizer has this method ready:
// If there's been a reorg, handle it
// handleReorg() function decides if the reorg must restart the pipeline or not
// if c.synchronizer.Reorg():
_ = c.handleReorg()
// 0. If there's an available server proof: Start pipeline for batchNum = batchNum + 1
serverProofInfo, err := c.serverProofPool.GetNextAvailable() // blocking call, returns when a server proof is available
// 0. If there's an available server proof: Start pipeline for batchNum = batchNum + 1.
// non-blocking call, returns nil if a server proof is
// not available, or non-nil otherwise.
serverProofInfo, err := c.serverProofPool.GetNextAvailable(stopCh)
if err != nil {
return err
return true, err
}
log.Debugw("start forge")
batchInfo, err := c.forge(serverProofInfo)
if err != nil {
log.Errorw("forge", "error", err)
return true, err
}
log.Debugw("end forge", "batchNum", batchInfo.batchNum)
outBatchCh <- batchInfo
return true, nil
}
// GetProofCallForgeLoopFn is the function ran in a loop that gets a forged
// batch via inBatchCh, waits for the proof server to finish, calls the ForgeBatch
// function in the Rollup Smart Contract, and sends the batch to outBatchCh.
func (c *Coordinator) GetProofCallForgeLoopFn(inBatchCh, outBatchCh chan *BatchInfo, stopCh chan bool) error {
select {
case <-stopCh:
log.Info("forgeLoopFn stopped")
return ErrStop
case batchInfo := <-inBatchCh:
log.Debugw("start getProofCallForge", "batchNum", batchInfo.batchNum)
if err := c.getProofCallForge(batchInfo); err != nil {
return err
}
log.Debugw("end getProofCallForge", "batchNum", batchInfo.batchNum)
outBatchCh <- batchInfo
}
return nil
}
// ForgeCallConfirmLoopFn is the function ran in a loop that gets a batch that
// has been sent to the Rollup Smart Contract via inBatchCh and waits for the
// ethereum transaction confirmation.
func (c *Coordinator) ForgeCallConfirmLoopFn(inBatchCh chan *BatchInfo, stopCh chan bool) error {
select {
case <-stopCh:
log.Info("forgeConfirmLoopFn stopped")
return ErrStop
case batchInfo := <-inBatchCh:
log.Debugw("start forgeCallConfirm", "batchNum", batchInfo.batchNum)
if err := c.forgeCallConfirm(batchInfo); err != nil {
return err
}
log.Debugw("end forgeCallConfirm", "batchNum", batchInfo.batchNum)
}
return nil
}
func (c *Coordinator) forge(serverProofInfo *ServerProofInfo) (*BatchInfo, error) {
// remove transactions from the pool that have been there for too long
err = c.purgeRemoveByTimeout()
err := c.purgeRemoveByTimeout()
if err != nil {
return err
return nil, err
}
c.batchNum = c.batchNum + 1
@ -193,13 +170,13 @@ func (c *Coordinator) forgeSequence() error {
var l1UserTxs []*common.L1Tx = nil // tmp, depends on HistoryDB
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
if err != nil {
return err
return nil, err
}
} else {
// 2b: only L2 txs
poolL2Txs, err = c.txsel.GetL2TxSelection(c.batchNum) // TODO once feesInfo is added to method return, add the var
if err != nil {
return err
return nil, err
}
l1UserTxsExtra = nil
l1OperatorTxs = nil
@ -211,7 +188,7 @@ func (c *Coordinator) forgeSequence() error {
// all the nonces smaller than the current one)
err = c.purgeInvalidDueToL2TxsSelection(poolL2Txs)
if err != nil {
return err
return nil, err
}
// 3. Save metadata from TxSelector output for BatchNum
@ -224,31 +201,23 @@ func (c *Coordinator) forgeSequence() error {
l2Txs := common.PoolL2TxsToL2Txs(poolL2Txs)
zkInputs, err := c.batchBuilder.BuildBatch(configBatch, l1UserTxsExtra, l1OperatorTxs, l2Txs, nil) // TODO []common.TokenID --> feesInfo
if err != nil {
return err
return nil, err
}
// 5. Save metadata from BatchBuilder output for BatchNum
batchInfo.SetZKInputs(zkInputs)
log.Debugf("Batch builded, batchNum: %d ", c.batchNum)
// 6. Call an idle server proof with BatchBuilder output, save server proof info for batchNum
err = batchInfo.serverProof.CalculateProof(zkInputs)
if err != nil {
return err
return nil, err
}
c.batchQueue.Push(&batchInfo)
return nil
return &batchInfo, nil
}
// proveSequence gets the generated zkProof & sends it to the SmartContract
func (c *Coordinator) proveSequence() error {
batchInfo := c.batchQueue.Pop()
if batchInfo == nil {
// no batches in queue, return
log.Debug("not batch to prove yet")
return common.ErrBatchQueueEmpty
}
// getProofCallForge gets the generated zkProof & sends it to the SmartContract
func (c *Coordinator) getProofCallForge(batchInfo *BatchInfo) error {
serverProofInfo := batchInfo.serverProof
proof, err := serverProofInfo.GetProof() // blocking call, until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil {
@ -276,7 +245,7 @@ func (c *Coordinator) proveSequence() error {
return nil
}
func (c *Coordinator) forgeConfirmationSequence() error {
func (c *Coordinator) forgeCallConfirm(batchInfo *BatchInfo) error {
// TODO strategy of this sequence TBD
// confirm eth txs and mark them as accepted sequence
// ethTx := ethTxStore.GetFirstPending()

+ 85
- 9
coordinator/coordinator_test.go

@ -44,34 +44,110 @@ func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBu
return txsel, bb
}
// CoordNode is an example of a Node that handles the goroutines for the coordinator
type CoordNode struct {
c *Coordinator
stopForge chan bool
stopGetProofCallForge chan bool
stopForgeCallConfirm chan bool
}
func NewCoordNode(c *Coordinator) *CoordNode {
return &CoordNode{
c: c,
}
}
func (cn *CoordNode) Start() {
log.Debugw("Starting CoordNode...")
cn.stopForge = make(chan bool)
cn.stopGetProofCallForge = make(chan bool)
cn.stopForgeCallConfirm = make(chan bool)
batchCh0 := make(chan *BatchInfo)
batchCh1 := make(chan *BatchInfo)
go func() {
for {
select {
case <-cn.stopForge:
return
default:
if forge, err := cn.c.ForgeLoopFn(batchCh0, cn.stopForge); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode ForgeLoopFn", "error", err)
} else if !forge {
time.Sleep(500 * time.Millisecond)
}
}
}
}()
go func() {
for {
select {
case <-cn.stopGetProofCallForge:
return
default:
if err := cn.c.GetProofCallForgeLoopFn(
batchCh0, batchCh1, cn.stopGetProofCallForge); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode GetProofCallForgeLoopFn", "error", err)
}
}
}
}()
go func() {
for {
select {
case <-cn.stopForgeCallConfirm:
return
default:
if err := cn.c.ForgeCallConfirmLoopFn(
batchCh1, cn.stopForgeCallConfirm); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode ForgeCallConfirmLoopFn", "error", err)
}
}
}
}()
}
func (cn *CoordNode) Stop() {
log.Debugw("Stopping CoordNode...")
cn.stopForge <- true
cn.stopGetProofCallForge <- true
cn.stopForgeCallConfirm <- true
}
func TestCoordinator(t *testing.T) {
txsel, bb := newTestModules(t)
conf := Config{
LoopInterval: 100 * time.Millisecond,
}
conf := Config{}
hdb := &historydb.HistoryDB{}
c := NewCoordinator(conf, hdb, txsel, bb, &eth.Client{})
c.Start()
cn := NewCoordNode(c)
cn.Start()
time.Sleep(1 * time.Second)
// simulate forgeSequence time
log.Debug("simulate entering in forge time")
log.Info("simulate entering in forge time")
c.isForgeSeq = true
time.Sleep(1 * time.Second)
// simulate going out from forgeSequence
log.Debug("simulate going out from forge time")
log.Info("simulate going out from forge time")
c.isForgeSeq = false
time.Sleep(1 * time.Second)
// simulate entering forgeSequence time again
log.Debug("simulate entering in forge time again")
log.Info("simulate entering in forge time again")
c.isForgeSeq = true
time.Sleep(1 * time.Second)
// simulate stopping forgerLoop by channel
log.Debug("simulate stopping forgerLoop by closing coordinator stopch")
c.Stop()
log.Info("simulate stopping forgerLoop by closing coordinator stopch")
cn.Stop()
time.Sleep(1 * time.Second)
}

+ 11
- 2
coordinator/proofpool.go

@ -1,6 +1,9 @@
package coordinator
import "github.com/hermeznetwork/hermez-node/common"
import (
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/log"
)
// ServerProofInfo contains the data related to a ServerProof
type ServerProofInfo struct {
@ -25,6 +28,12 @@ type ServerProofPool struct {
}
// GetNextAvailable returns the available ServerProofInfo
func (p *ServerProofPool) GetNextAvailable() (*ServerProofInfo, error) {
func (p *ServerProofPool) GetNextAvailable(stopCh chan bool) (*ServerProofInfo, error) {
select {
case <-stopCh:
log.Info("ServerProofPool.GetNextAvailable stopped")
return nil, ErrStop
default:
}
return nil, nil
}

+ 10
- 9
eth/rollup.go

@ -149,15 +149,16 @@ func NewRollupEvents() RollupEvents {
// RollupForgeBatchArgs are the arguments to the ForgeBatch function in the Rollup Smart Contract
//nolint:structcheck,unused
type RollupForgeBatchArgs struct {
ProofA [2]*big.Int
ProofB [2][2]*big.Int
ProofC [2]*big.Int
NewLastIdx int64
NewStRoot *big.Int
NewExitRoot *big.Int
L1CoordinatorTxs []*common.L1Tx
L2Txs []*common.L2Tx
FeeIdxCoordinator []common.Idx
ProofA [2]*big.Int
ProofB [2][2]*big.Int
ProofC [2]*big.Int
NewLastIdx int64
NewStRoot *big.Int
NewExitRoot *big.Int
L1CoordinatorTxs []*common.L1Tx
L1CoordinatorTxsAuths [][]byte // Authorization for accountCreations for each L1CoordinatorTxs
L2Txs []*common.L2Tx
FeeIdxCoordinator []common.Idx
// Circuit selector
VerifierIdx int64
L1Batch bool

+ 4
- 0
go.mod

@ -3,6 +3,7 @@ module github.com/hermeznetwork/hermez-node
go 1.14
require (
github.com/BurntSushi/toml v0.3.1
github.com/dghubble/sling v1.3.0
github.com/ethereum/go-ethereum v1.9.17
github.com/go-sql-driver/mysql v1.5.0 // indirect
@ -17,5 +18,8 @@ require (
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/russross/meddler v1.0.0
github.com/stretchr/testify v1.6.1
github.com/urfave/cli v1.22.1
github.com/urfave/cli/v2 v2.2.0
go.uber.org/zap v1.13.0
gopkg.in/go-playground/validator.v9 v9.29.1
)

+ 14
- 0
go.sum

@ -14,6 +14,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
@ -102,8 +103,10 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -170,7 +173,9 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -358,6 +363,7 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@ -507,7 +513,9 @@ github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk=
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/meddler v1.0.0 h1:3HgwIot/NsCrLrmorjSO7JhzoshoSVfuqgFgZ0VTbro=
github.com/russross/meddler v1.0.0/go.mod h1:j75NzzcOL4CGy+pPKykxZoT/At5Qj4ZnRRs1PXxweZI=
@ -517,6 +525,7 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bcxNpBIysykNIP7I=
github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -570,7 +579,11 @@ github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4 h1:u7tSpNPPswAFymm8IehJhy4uJMlUuU/GmqSkvJ1InXA=
github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4=
github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
@ -755,6 +768,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw=
gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw=

+ 247
- 9
node/node.go

@ -1,13 +1,251 @@
package node
type mode string
import (
"time"
// ModeCoordinator defines the mode of the HermezNode as Coordinator, which
// means that the node is set to forge (which also will be synchronizing with
// the L1 blockchain state)
const ModeCoordinator mode = "coordinator"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/config"
"github.com/hermeznetwork/hermez-node/coordinator"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/txselector"
)
// ModeSynchronizer defines the mode of the HermezNode as Synchronizer, which
// means that the node is set to only synchronize with the L1 blockchain state
// and will not forge
const ModeSynchronizer mode = "synchronizer"
// Mode sets the working mode of the node (synchronizer or coordinator)
type Mode string
const (
// ModeCoordinator defines the mode of the HermezNode as Coordinator, which
// means that the node is set to forge (which also will be synchronizing with
// the L1 blockchain state)
ModeCoordinator Mode = "coordinator"
// ModeSynchronizer defines the mode of the HermezNode as Synchronizer, which
// means that the node is set to only synchronize with the L1 blockchain state
// and will not forge
ModeSynchronizer Mode = "synchronizer"
)
// Node is the Hermez Node
type Node struct {
// Coordinator
coord *coordinator.Coordinator
coordCfg *config.Coordinator
stopForge chan bool
stopGetProofCallForge chan bool
stopForgeCallConfirm chan bool
stoppedForge chan bool
stoppedGetProofCallForge chan bool
stoppedForgeCallConfirm chan bool
// Synchronizer
sync *synchronizer.Synchronizer
stopSync chan bool
stoppedSync chan bool
// General
cfg *config.Node
mode Mode
}
// NewNode creates a Node
func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node, error) {
historyDB, err := historydb.NewHistoryDB(
cfg.PostgreSQL.Port,
cfg.PostgreSQL.Host,
cfg.PostgreSQL.User,
cfg.PostgreSQL.Password,
cfg.HistoryDB.Name,
)
if err != nil {
return nil, err
}
stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, true, 32)
if err != nil {
return nil, err
}
ethClient, err := ethclient.Dial(cfg.Web3.URL)
if err != nil {
return nil, err
}
client := eth.NewClient(ethClient, nil, nil, nil)
sync := synchronizer.NewSynchronizer(client, historyDB, stateDB)
var coord *coordinator.Coordinator
if mode == ModeCoordinator {
l2DB, err := l2db.NewL2DB(
cfg.PostgreSQL.Port,
cfg.PostgreSQL.Host,
cfg.PostgreSQL.User,
cfg.PostgreSQL.Password,
cfg.L2DB.Name,
cfg.L2DB.SafetyPeriod,
cfg.L2DB.MaxTxs,
cfg.L2DB.TTL.Duration,
)
if err != nil {
return nil, err
}
// TODO: Get (maxL1UserTxs, maxL1OperatorTxs, maxTxs) from the smart contract
txSelector, err := txselector.NewTxSelector(cfg.TxSelector.Path, stateDB, l2DB, 10, 10, 10)
if err != nil {
return nil, err
}
// TODO: Get (configCircuits []ConfigCircuit, batchNum common.BatchNum, nLevels uint64) from smart contract
nLevels := uint64(32) //nolint:gomnd
batchBuilder, err := batchbuilder.NewBatchBuilder(cfg.BatchBuilder.Path, stateDB, nil, 0, nLevels)
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
coord = coordinator.NewCoordinator(
coordinator.Config{
ForgerAddress: coordCfg.ForgerAddress,
},
historyDB,
txSelector,
batchBuilder,
client,
)
}
return &Node{
coord: coord,
coordCfg: coordCfg,
sync: sync,
cfg: cfg,
mode: mode,
}, nil
}
// StartCoordinator starts the coordinator
func (n *Node) StartCoordinator() {
log.Info("Starting Coordinator...")
n.stopForge = make(chan bool)
n.stopGetProofCallForge = make(chan bool)
n.stopForgeCallConfirm = make(chan bool)
n.stoppedForge = make(chan bool)
n.stoppedGetProofCallForge = make(chan bool)
n.stoppedForgeCallConfirm = make(chan bool)
batchCh0 := make(chan *coordinator.BatchInfo)
batchCh1 := make(chan *coordinator.BatchInfo)
go func() {
defer func() { n.stoppedForge <- true }()
for {
select {
case <-n.stopForge:
return
default:
if forge, err := n.coord.ForgeLoopFn(batchCh0, n.stopForge); err == coordinator.ErrStop {
return
} else if err != nil {
log.Errorw("Coordinator.ForgeLoopFn", "error", err)
} else if !forge {
time.Sleep(n.coordCfg.ForgeLoopInterval.Duration)
}
}
}
}()
go func() {
defer func() { n.stoppedGetProofCallForge <- true }()
for {
select {
case <-n.stopGetProofCallForge:
return
default:
if err := n.coord.GetProofCallForgeLoopFn(
batchCh0, batchCh1, n.stopGetProofCallForge); err == coordinator.ErrStop {
return
} else if err != nil {
log.Errorw("Coordinator.GetProofCallForgeLoopFn", "error", err)
}
}
}
}()
go func() {
defer func() { n.stoppedForgeCallConfirm <- true }()
for {
select {
case <-n.stopForgeCallConfirm:
return
default:
if err := n.coord.ForgeCallConfirmLoopFn(
batchCh1, n.stopForgeCallConfirm); err == coordinator.ErrStop {
return
} else if err != nil {
log.Errorw("Coordinator.ForgeCallConfirmLoopFn", "error", err)
}
}
}
}()
}
// StopCoordinator stops the coordinator
func (n *Node) StopCoordinator() {
log.Info("Stopping Coordinator...")
n.stopForge <- true
n.stopGetProofCallForge <- true
n.stopForgeCallConfirm <- true
<-n.stoppedForge
<-n.stoppedGetProofCallForge
<-n.stoppedForgeCallConfirm
}
// StartSynchronizer starts the synchronizer
func (n *Node) StartSynchronizer() {
log.Info("Starting Synchronizer...")
n.stopSync = make(chan bool)
n.stoppedSync = make(chan bool)
go func() {
defer func() { n.stoppedSync <- true }()
for {
select {
case <-n.stopSync:
log.Info("Coordinator stopped")
return
case <-time.After(n.cfg.Synchronizer.SyncLoopInterval.Duration):
if err := n.sync.Sync(); err != nil {
log.Errorw("Synchronizer.Sync", "error", err)
}
}
}
}()
}
// StopSynchronizer stops the synchronizer
func (n *Node) StopSynchronizer() {
log.Info("Stopping Synchronizer...")
n.stopSync <- true
<-n.stoppedSync
}
// Start the node
func (n *Node) Start() {
log.Infow("Starting node...", "mode", n.mode)
if n.mode == ModeCoordinator {
n.StartCoordinator()
}
n.StartSynchronizer()
}
// Stop the node
func (n *Node) Stop() {
log.Infow("Stopping node...")
if n.mode == ModeCoordinator {
n.StopCoordinator()
}
n.StopSynchronizer()
}

Loading…
Cancel
Save