mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Merge pull request #80 from hermeznetwork/feature/coordinator0
Feature/coordinator0
This commit is contained in:
@@ -27,7 +27,7 @@ type ConfigBatch struct {
|
|||||||
|
|
||||||
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
|
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
|
||||||
// method
|
// method
|
||||||
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, configCircuits []ConfigCircuit, batchNum uint64, nLevels uint64) (*BatchBuilder, error) {
|
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, configCircuits []ConfigCircuit, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
|
||||||
localStateDB, err := statedb.NewLocalStateDB(dbpath, synchronizerStateDB, true, int(nLevels))
|
localStateDB, err := statedb.NewLocalStateDB(dbpath, synchronizerStateDB, true, int(nLevels))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -46,12 +46,16 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, config
|
|||||||
// `batchNum`. If `fromSynchronizer` is true, the BatchBuilder must take a
|
// `batchNum`. If `fromSynchronizer` is true, the BatchBuilder must take a
|
||||||
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
||||||
// it can just roll back the internal copy.
|
// it can just roll back the internal copy.
|
||||||
func (bb *BatchBuilder) Reset(batchNum uint64, fromSynchronizer bool) error {
|
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
|
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
||||||
func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
|
func (bb *BatchBuilder) BuildBatch(configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
|
||||||
zkInputs, _, err := bb.localStateDB.ProcessTxs(false, l1usertxs, l1coordinatortxs, l2txs)
|
zkInputs, _, err := bb.localStateDB.ProcessTxs(false, l1usertxs, l1coordinatortxs, l2txs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = bb.localStateDB.MakeCheckpoint()
|
||||||
return zkInputs, err
|
return zkInputs, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package batchbuilder
|
package batchbuilder
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -19,7 +18,6 @@ func TestBatchBuilder(t *testing.T) {
|
|||||||
|
|
||||||
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
bb, err := NewBatchBuilder(bbDir, synchDB, nil, 0, 32)
|
_, err = NewBatchBuilder(bbDir, synchDB, nil, 0, 32)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
fmt.Println(bb)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/iden3/go-merkletree"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ExitInfo struct {
|
type ExitInfo struct {
|
||||||
AccountIdx Idx
|
AccountIdx Idx
|
||||||
MerkleProof []byte
|
MerkleProof *merkletree.CircomVerifierProof
|
||||||
Balance *big.Int
|
Balance *big.Int
|
||||||
Nullifier *big.Int
|
Nullifier *big.Int
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ type Proof struct {
|
|||||||
|
|
||||||
// BatchInfo contans the Batch information
|
// BatchInfo contans the Batch information
|
||||||
type BatchInfo struct {
|
type BatchInfo struct {
|
||||||
batchNum uint64
|
batchNum common.BatchNum
|
||||||
serverProof *ServerProofInfo
|
serverProof *ServerProofInfo
|
||||||
zkInputs *common.ZKInputs
|
zkInputs *common.ZKInputs
|
||||||
proof *Proof
|
proof *Proof
|
||||||
@@ -22,7 +22,7 @@ type BatchInfo struct {
|
|||||||
|
|
||||||
// NewBatchInfo creates a new BatchInfo with the given batchNum &
|
// NewBatchInfo creates a new BatchInfo with the given batchNum &
|
||||||
// ServerProofInfo
|
// ServerProofInfo
|
||||||
func NewBatchInfo(batchNum uint64, serverProof *ServerProofInfo) BatchInfo {
|
func NewBatchInfo(batchNum common.BatchNum, serverProof *ServerProofInfo) BatchInfo {
|
||||||
return BatchInfo{
|
return BatchInfo{
|
||||||
batchNum: batchNum,
|
batchNum: batchNum,
|
||||||
serverProof: serverProof,
|
serverProof: serverProof,
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package coordinator
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,8 +20,8 @@ func TestBatchQueue(t *testing.T) {
|
|||||||
batchNum: 1,
|
batchNum: 1,
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.Equal(t, uint64(0), bq.Pop().batchNum)
|
assert.Equal(t, common.BatchNum(0), bq.Pop().batchNum)
|
||||||
assert.Equal(t, uint64(2), bq.Pop().batchNum)
|
assert.Equal(t, common.BatchNum(2), bq.Pop().batchNum)
|
||||||
assert.Equal(t, uint64(1), bq.Pop().batchNum)
|
assert.Equal(t, common.BatchNum(1), bq.Pop().batchNum)
|
||||||
assert.Nil(t, bq.Pop())
|
assert.Nil(t, bq.Pop())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ import (
|
|||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/eth"
|
"github.com/hermeznetwork/hermez-node/eth"
|
||||||
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/hermez-node/txselector"
|
"github.com/hermeznetwork/hermez-node/txselector"
|
||||||
kvdb "github.com/iden3/go-merkletree/db"
|
kvdb "github.com/iden3/go-merkletree/db"
|
||||||
"github.com/iden3/go-merkletree/db/memory"
|
"github.com/iden3/go-merkletree/db/memory"
|
||||||
@@ -15,17 +17,26 @@ import (
|
|||||||
// CoordinatorConfig contains the Coordinator configuration
|
// CoordinatorConfig contains the Coordinator configuration
|
||||||
type CoordinatorConfig struct {
|
type CoordinatorConfig struct {
|
||||||
ForgerAddress ethCommon.Address
|
ForgerAddress ethCommon.Address
|
||||||
|
LoopInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Coordinator implements the Coordinator type
|
// Coordinator implements the Coordinator type
|
||||||
type Coordinator struct {
|
type Coordinator struct {
|
||||||
|
// m sync.Mutex
|
||||||
|
stopch chan bool
|
||||||
|
stopforgerch chan bool
|
||||||
|
|
||||||
|
forging bool
|
||||||
|
isForgeSeq bool // WIP just for testing while implementing
|
||||||
|
|
||||||
config CoordinatorConfig
|
config CoordinatorConfig
|
||||||
|
|
||||||
batchNum uint64
|
batchNum common.BatchNum
|
||||||
batchQueue *BatchQueue
|
batchQueue *BatchQueue
|
||||||
serverProofPool ServerProofPool
|
serverProofPool ServerProofPool
|
||||||
|
|
||||||
// synchronizer *synchronizer.Synchronizer
|
// synchronizer *synchronizer.Synchronizer
|
||||||
|
hdb *historydb.HistoryDB
|
||||||
txsel *txselector.TxSelector
|
txsel *txselector.TxSelector
|
||||||
batchBuilder *batchbuilder.BatchBuilder
|
batchBuilder *batchbuilder.BatchBuilder
|
||||||
|
|
||||||
@@ -34,48 +45,121 @@ type Coordinator struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCoordinator creates a new Coordinator
|
// NewCoordinator creates a new Coordinator
|
||||||
func NewCoordinator() *Coordinator { // once synchronizer is ready, synchronizer.Synchronizer will be passed as parameter here
|
func NewCoordinator(conf CoordinatorConfig,
|
||||||
var c *Coordinator
|
hdb *historydb.HistoryDB,
|
||||||
// c.ethClient = eth.NewClient() // TBD
|
txsel *txselector.TxSelector,
|
||||||
c.ethTxStore = memory.NewMemoryStorage()
|
bb *batchbuilder.BatchBuilder,
|
||||||
return c
|
ethClient *eth.Client) *Coordinator { // once synchronizer is ready, synchronizer.Synchronizer will be passed as parameter here
|
||||||
|
c := Coordinator{
|
||||||
|
config: conf,
|
||||||
|
hdb: hdb,
|
||||||
|
txsel: txsel,
|
||||||
|
batchBuilder: bb,
|
||||||
|
ethClient: ethClient,
|
||||||
|
ethTxStore: memory.NewMemoryStorage(),
|
||||||
|
}
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Coordinator) Stop() {
|
||||||
|
log.Info("Stopping Coordinator")
|
||||||
|
c.stopch <- true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the Coordinator service
|
// Start starts the Coordinator service
|
||||||
func (c *Coordinator) Start() {
|
func (c *Coordinator) Start() {
|
||||||
// TODO TBD note: the sequences & loops & errors & logging & goroutines
|
c.stopch = make(chan bool) // initialize channel
|
||||||
// & channels approach still needs to be defined, the current code is a
|
go func() {
|
||||||
// wip draft
|
log.Info("Starting Coordinator")
|
||||||
|
for {
|
||||||
// TBD: goroutines strategy
|
select {
|
||||||
|
case <-c.stopch:
|
||||||
// if in Forge Sequence:
|
close(c.stopforgerch)
|
||||||
if c.isForgeSequence() {
|
log.Info("Coordinator stopped")
|
||||||
// c.batchNum = c.synchronizer.LastBatchNum()
|
return
|
||||||
_ = c.txsel.Reset(c.batchNum)
|
case <-time.After(c.config.LoopInterval):
|
||||||
_ = c.batchBuilder.Reset(c.batchNum, true)
|
if !c.isForgeSequence() {
|
||||||
|
if c.forging {
|
||||||
|
log.Info("forging stopped")
|
||||||
|
c.forging = false
|
||||||
|
close(c.stopforgerch)
|
||||||
|
}
|
||||||
|
log.Debug("not in forge time")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !c.forging {
|
||||||
|
log.Info("Start Forging")
|
||||||
|
// c.batchNum = c.hdb.GetLastBatchNum() // uncomment when HistoryDB is ready
|
||||||
|
err := c.txsel.Reset(c.batchNum)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("forging err: ", err)
|
||||||
|
}
|
||||||
|
err = c.batchBuilder.Reset(c.batchNum, true)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("forging err: ", err)
|
||||||
|
}
|
||||||
c.batchQueue = NewBatchQueue()
|
c.batchQueue = NewBatchQueue()
|
||||||
go func() {
|
|
||||||
for {
|
c.forgerLoop()
|
||||||
_ = c.forgeSequence()
|
c.forging = true
|
||||||
time.Sleep(1 * time.Second)
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
_ = c.proveSequence()
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
_ = c.forgeConfirmationSequence()
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// forgerLoop trigers goroutines for:
|
||||||
|
// - forgeSequence
|
||||||
|
// - proveSequence
|
||||||
|
// - forgeConfirmationSequence
|
||||||
|
func (c *Coordinator) forgerLoop() {
|
||||||
|
c.stopforgerch = make(chan bool) // initialize channel
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
log.Info("forgeSequence started")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.stopforgerch:
|
||||||
|
log.Info("forgeSequence stopped")
|
||||||
|
return
|
||||||
|
case <-time.After(c.config.LoopInterval):
|
||||||
|
if err := c.forgeSequence(); err != nil {
|
||||||
|
log.Error("forgeSequence err: ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
log.Info("proveSequence started")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.stopforgerch:
|
||||||
|
log.Info("proveSequence stopped")
|
||||||
|
return
|
||||||
|
case <-time.After(c.config.LoopInterval):
|
||||||
|
if err := c.proveSequence(); err != nil && err != common.ErrBatchQueueEmpty {
|
||||||
|
log.Error("proveSequence err: ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
log.Info("forgeConfirmationSequence started")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.stopforgerch:
|
||||||
|
log.Info("forgeConfirmationSequence stopped")
|
||||||
|
return
|
||||||
|
case <-time.After(c.config.LoopInterval):
|
||||||
|
if err := c.forgeConfirmationSequence(); err != nil {
|
||||||
|
log.Error("forgeConfirmationSequence err: ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// forgeSequence
|
||||||
func (c *Coordinator) forgeSequence() error {
|
func (c *Coordinator) forgeSequence() error {
|
||||||
// TODO once synchronizer has this method ready:
|
// TODO once synchronizer has this method ready:
|
||||||
// If there's been a reorg, handle it
|
// If there's been a reorg, handle it
|
||||||
@@ -104,8 +188,8 @@ func (c *Coordinator) forgeSequence() error {
|
|||||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||||
if c.shouldL1L2Batch() {
|
if c.shouldL1L2Batch() {
|
||||||
// 2a: L1+L2 txs
|
// 2a: L1+L2 txs
|
||||||
// l1UserTxs, toForgeL1TxsNumber := c.synchronizer.GetNextL1UserTxs() // TODO once synchronizer is ready, uncomment
|
// l1UserTxs, toForgeL1TxsNumber := c.hdb.GetNextL1UserTxs() // TODO once HistoryDB is ready, uncomment
|
||||||
var l1UserTxs []*common.L1Tx = nil // tmp, depends on synchronizer
|
var l1UserTxs []*common.L1Tx = nil // tmp, depends on HistoryDB
|
||||||
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
|
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection(c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -144,6 +228,7 @@ func (c *Coordinator) forgeSequence() error {
|
|||||||
|
|
||||||
// 5. Save metadata from BatchBuilder output for BatchNum
|
// 5. Save metadata from BatchBuilder output for BatchNum
|
||||||
batchInfo.SetZKInputs(zkInputs)
|
batchInfo.SetZKInputs(zkInputs)
|
||||||
|
log.Debugf("Batch builded, batchNum: %d ", c.batchNum)
|
||||||
|
|
||||||
// 6. Call an idle server proof with BatchBuilder output, save server proof info for batchNum
|
// 6. Call an idle server proof with BatchBuilder output, save server proof info for batchNum
|
||||||
err = batchInfo.serverProof.CalculateProof(zkInputs)
|
err = batchInfo.serverProof.CalculateProof(zkInputs)
|
||||||
@@ -160,6 +245,7 @@ func (c *Coordinator) proveSequence() error {
|
|||||||
batchInfo := c.batchQueue.Pop()
|
batchInfo := c.batchQueue.Pop()
|
||||||
if batchInfo == nil {
|
if batchInfo == nil {
|
||||||
// no batches in queue, return
|
// no batches in queue, return
|
||||||
|
log.Debug("not batch to prove yet")
|
||||||
return common.ErrBatchQueueEmpty
|
return common.ErrBatchQueueEmpty
|
||||||
}
|
}
|
||||||
serverProofInfo := batchInfo.serverProof
|
serverProofInfo := batchInfo.serverProof
|
||||||
@@ -173,6 +259,8 @@ func (c *Coordinator) proveSequence() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Debugf("ethClient ForgeCall sent, batchNum: %d", c.batchNum)
|
||||||
|
|
||||||
// TODO once tx data type is defined, store ethTx (returned by ForgeCall)
|
// TODO once tx data type is defined, store ethTx (returned by ForgeCall)
|
||||||
// TBD if use ethTxStore as a disk k-v database, or use a Queue
|
// TBD if use ethTxStore as a disk k-v database, or use a Queue
|
||||||
// tx, err := c.ethTxStore.NewTx()
|
// tx, err := c.ethTxStore.NewTx()
|
||||||
@@ -202,8 +290,7 @@ func (c *Coordinator) handleReorg() error {
|
|||||||
|
|
||||||
// isForgeSequence returns true if the node is the Forger in the current ethereum block
|
// isForgeSequence returns true if the node is the Forger in the current ethereum block
|
||||||
func (c *Coordinator) isForgeSequence() bool {
|
func (c *Coordinator) isForgeSequence() bool {
|
||||||
|
return c.isForgeSeq
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) purgeRemoveByTimeout() error {
|
func (c *Coordinator) purgeRemoveByTimeout() error {
|
||||||
|
|||||||
76
coordinator/coordinator_test.go
Normal file
76
coordinator/coordinator_test.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package coordinator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/hermeznetwork/hermez-node/txselector"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBuilder) { // FUTURE once Synchronizer is ready, should return it also
|
||||||
|
nLevels := 32
|
||||||
|
|
||||||
|
synchDB, err := ioutil.TempDir("", "tmpSynchDB")
|
||||||
|
require.Nil(t, err)
|
||||||
|
sdb, err := statedb.NewStateDB(synchDB, true, nLevels)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
|
l2DB, err := l2db.NewL2DB(5432, "localhost", "hermez", pass, "l2", 10, 512, 24*time.Hour)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
||||||
|
require.Nil(t, err)
|
||||||
|
txsel, err := txselector.NewTxSelector(txselDir, sdb, l2DB, 10, 10, 10)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
||||||
|
require.Nil(t, err)
|
||||||
|
bb, err := batchbuilder.NewBatchBuilder(bbDir, sdb, nil, 0, uint64(nLevels))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// l1Txs, coordinatorL1Txs, poolL2Txs := test.GenerateTestTxsFromSet(t, test.SetTest0)
|
||||||
|
|
||||||
|
return txsel, bb
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCoordinator(t *testing.T) {
|
||||||
|
txsel, bb := newTestModules(t)
|
||||||
|
|
||||||
|
conf := CoordinatorConfig{
|
||||||
|
LoopInterval: 100 * time.Millisecond,
|
||||||
|
}
|
||||||
|
hdb := &historydb.HistoryDB{}
|
||||||
|
c := NewCoordinator(conf, hdb, txsel, bb, nil)
|
||||||
|
c.Start()
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// simulate forgeSequence time
|
||||||
|
log.Debug("simulate entering in forge time")
|
||||||
|
c.isForgeSeq = true
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// simulate going out from forgeSequence
|
||||||
|
log.Debug("simulate going out from forge time")
|
||||||
|
c.isForgeSeq = false
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// simulate entering forgeSequence time again
|
||||||
|
log.Debug("simulate entering in forge time again")
|
||||||
|
c.isForgeSeq = true
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// simulate stopping forgerLoop by channel
|
||||||
|
log.Debug("simulate stopping forgerLoop by closing coordinator stopch")
|
||||||
|
c.Stop()
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
@@ -130,7 +130,7 @@ func (s *StateDB) MakeCheckpoint() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
||||||
func (s *StateDB) DeleteCheckpoint(batchNum uint64) error {
|
func (s *StateDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||||
checkpointPath := s.path + PATHBATCHNUM + strconv.Itoa(int(batchNum))
|
checkpointPath := s.path + PATHBATCHNUM + strconv.Itoa(int(batchNum))
|
||||||
|
|
||||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||||
@@ -144,7 +144,7 @@ func (s *StateDB) DeleteCheckpoint(batchNum uint64) error {
|
|||||||
// does not delete the checkpoints between old current and the new current,
|
// does not delete the checkpoints between old current and the new current,
|
||||||
// those checkpoints will remain in the storage, and eventually will be
|
// those checkpoints will remain in the storage, and eventually will be
|
||||||
// deleted when MakeCheckpoint overwrites them.
|
// deleted when MakeCheckpoint overwrites them.
|
||||||
func (s *StateDB) Reset(batchNum uint64) error {
|
func (s *StateDB) Reset(batchNum common.BatchNum) error {
|
||||||
if batchNum == 0 {
|
if batchNum == 0 {
|
||||||
s.idx = 0
|
s.idx = 0
|
||||||
return nil
|
return nil
|
||||||
@@ -331,7 +331,7 @@ func NewLocalStateDB(path string, synchronizerDB *StateDB, withMT bool, nLevels
|
|||||||
|
|
||||||
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
||||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||||
func (l *LocalStateDB) Reset(batchNum uint64, fromSynchronizer bool) error {
|
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
if batchNum == 0 {
|
if batchNum == 0 {
|
||||||
l.idx = 0
|
l.idx = 0
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -193,13 +193,13 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, common.BatchNum(4), cb)
|
assert.Equal(t, common.BatchNum(4), cb)
|
||||||
|
|
||||||
err = sdb.DeleteCheckpoint(uint64(9))
|
err = sdb.DeleteCheckpoint(common.BatchNum(9))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = sdb.DeleteCheckpoint(uint64(10))
|
err = sdb.DeleteCheckpoint(common.BatchNum(10))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = sdb.DeleteCheckpoint(uint64(9)) // does not exist, should return err
|
err = sdb.DeleteCheckpoint(common.BatchNum(9)) // does not exist, should return err
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
err = sdb.DeleteCheckpoint(uint64(11)) // does not exist, should return err
|
err = sdb.DeleteCheckpoint(common.BatchNum(11)) // does not exist, should return err
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
// Create a LocalStateDB from the initial StateDB
|
// Create a LocalStateDB from the initial StateDB
|
||||||
|
|||||||
@@ -13,20 +13,12 @@ import (
|
|||||||
// keyidx is used as key in the db to store the current Idx
|
// keyidx is used as key in the db to store the current Idx
|
||||||
var keyidx = []byte("idx")
|
var keyidx = []byte("idx")
|
||||||
|
|
||||||
// FUTURE This will be used from common once pending PR is merged
|
|
||||||
type ExitInfo struct {
|
|
||||||
Idx *common.Idx
|
|
||||||
Proof *merkletree.CircomVerifierProof
|
|
||||||
Nullifier *big.Int
|
|
||||||
Balance *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProcessTxs process the given L1Txs & L2Txs applying the needed updates to
|
// ProcessTxs process the given L1Txs & L2Txs applying the needed updates to
|
||||||
// the StateDB depending on the transaction Type. Returns the common.ZKInputs
|
// the StateDB depending on the transaction Type. Returns the common.ZKInputs
|
||||||
// to generate the SnarkProof later used by the BatchBuilder, and if
|
// to generate the SnarkProof later used by the BatchBuilder, and if
|
||||||
// cmpExitTree is set to true, returns common.ExitTreeLeaf that is later used
|
// cmpExitTree is set to true, returns common.ExitTreeLeaf that is later used
|
||||||
// by the Synchronizer to update the HistoryDB.
|
// by the Synchronizer to update the HistoryDB.
|
||||||
func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx) (*common.ZKInputs, []*ExitInfo, error) {
|
func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*common.L1Tx, l2txs []*common.L2Tx) (*common.ZKInputs, []*common.ExitInfo, error) {
|
||||||
var err error
|
var err error
|
||||||
var exitTree *merkletree.MerkleTree
|
var exitTree *merkletree.MerkleTree
|
||||||
exits := make(map[common.Idx]common.Account)
|
exits := make(map[common.Idx]common.Account)
|
||||||
@@ -71,8 +63,8 @@ func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// once all txs processed (exitTree root frozen), for each leaf
|
// once all txs processed (exitTree root frozen), for each leaf
|
||||||
// generate ExitInfo data
|
// generate common.ExitInfo data
|
||||||
var exitInfos []*ExitInfo
|
var exitInfos []*common.ExitInfo
|
||||||
for exitIdx, exitAccount := range exits {
|
for exitIdx, exitAccount := range exits {
|
||||||
// 0. generate MerkleProof
|
// 0. generate MerkleProof
|
||||||
p, err := exitTree.GenerateCircomVerifierProof(exitIdx.BigInt(), nil)
|
p, err := exitTree.GenerateCircomVerifierProof(exitIdx.BigInt(), nil)
|
||||||
@@ -92,10 +84,10 @@ func (s *StateDB) ProcessTxs(cmpExitTree bool, l1usertxs, l1coordinatortxs []*co
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// 2. generate ExitInfo
|
// 2. generate common.ExitInfo
|
||||||
ei := &ExitInfo{
|
ei := &common.ExitInfo{
|
||||||
Idx: &exitIdx,
|
AccountIdx: exitIdx,
|
||||||
Proof: p,
|
MerkleProof: p,
|
||||||
Nullifier: nullifier,
|
Nullifier: nullifier,
|
||||||
Balance: exitAccount.Balance,
|
Balance: exitAccount.Balance,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -195,9 +195,18 @@ func (c *Client) HeaderByNumber(ctx context.Context, number *big.Int) (*types.He
|
|||||||
return c.client.HeaderByNumber(ctx, number)
|
return c.client.HeaderByNumber(ctx, number)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockByNumber internally calls ethclient.Client BlockByNumber
|
// BlockByNumber internally calls ethclient.Client BlockByNumber and returns *common.Block
|
||||||
func (c *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
func (c *Client) BlockByNumber(ctx context.Context, number *big.Int) (*common.Block, error) {
|
||||||
return c.client.BlockByNumber(ctx, number)
|
block, err := c.client.BlockByNumber(ctx, number)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b := &common.Block{
|
||||||
|
EthBlockNum: block.Number().Uint64(),
|
||||||
|
Timestamp: time.Unix(int64(block.Time()), 0),
|
||||||
|
Hash: block.Hash(),
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ForgeCall(callData *common.CallDataForge) ([]byte, error) {
|
func (c *Client) ForgeCall(callData *common.CallDataForge) ([]byte, error) {
|
||||||
|
|||||||
@@ -57,7 +57,8 @@ func Init(levelStr, errorsPath string) {
|
|||||||
}
|
}
|
||||||
//nolint:errcheck
|
//nolint:errcheck
|
||||||
defer logger.Sync()
|
defer logger.Sync()
|
||||||
log = logger.Sugar()
|
withOptions := logger.WithOptions(zap.AddCallerSkip(1))
|
||||||
|
log = withOptions.Sugar()
|
||||||
|
|
||||||
if errorsPath != "" {
|
if errorsPath != "" {
|
||||||
log.Infof("file where errors will be written: %s", errorsPath)
|
log.Infof("file where errors will be written: %s", errorsPath)
|
||||||
|
|||||||
50
test/txs.go
50
test/txs.go
@@ -4,6 +4,7 @@ import (
|
|||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"math/big"
|
"math/big"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -11,6 +12,7 @@ import (
|
|||||||
ethCrypto "github.com/ethereum/go-ethereum/crypto"
|
ethCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Account struct {
|
type Account struct {
|
||||||
@@ -53,12 +55,12 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
|
|||||||
accounts := GenerateKeys(t, instructions.Accounts)
|
accounts := GenerateKeys(t, instructions.Accounts)
|
||||||
l1CreatedAccounts := make(map[string]*Account)
|
l1CreatedAccounts := make(map[string]*Account)
|
||||||
|
|
||||||
var batchL1txs []*common.L1Tx
|
var batchL1Txs []*common.L1Tx
|
||||||
var batchCoordinatorL1txs []*common.L1Tx
|
var batchCoordinatorL1Txs []*common.L1Tx
|
||||||
var batchL2txs []*common.PoolL2Tx
|
var batchPoolL2Txs []*common.PoolL2Tx
|
||||||
var l1txs [][]*common.L1Tx
|
var l1Txs [][]*common.L1Tx
|
||||||
var coordinatorL1txs [][]*common.L1Tx
|
var coordinatorL1Txs [][]*common.L1Tx
|
||||||
var l2txs [][]*common.PoolL2Tx
|
var poolL2Txs [][]*common.PoolL2Tx
|
||||||
idx := 1
|
idx := 1
|
||||||
for _, inst := range instructions.Instructions {
|
for _, inst := range instructions.Instructions {
|
||||||
switch inst.Type {
|
switch inst.Type {
|
||||||
@@ -71,7 +73,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
|
|||||||
LoadAmount: big.NewInt(int64(inst.Amount)),
|
LoadAmount: big.NewInt(int64(inst.Amount)),
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
}
|
}
|
||||||
batchL1txs = append(batchL1txs, &tx)
|
batchL1Txs = append(batchL1Txs, &tx)
|
||||||
if accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx == common.Idx(0) { // if account.Idx is not set yet, set it and increment idx
|
if accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx == common.Idx(0) { // if account.Idx is not set yet, set it and increment idx
|
||||||
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx = common.Idx(idx)
|
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Idx = common.Idx(idx)
|
||||||
|
|
||||||
@@ -90,7 +92,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
|
|||||||
}
|
}
|
||||||
accounts[idxTokenIDToString(inst.To, inst.TokenID)].Idx = common.Idx(idx)
|
accounts[idxTokenIDToString(inst.To, inst.TokenID)].Idx = common.Idx(idx)
|
||||||
l1CreatedAccounts[idxTokenIDToString(inst.To, inst.TokenID)] = accounts[idxTokenIDToString(inst.To, inst.TokenID)]
|
l1CreatedAccounts[idxTokenIDToString(inst.To, inst.TokenID)] = accounts[idxTokenIDToString(inst.To, inst.TokenID)]
|
||||||
batchCoordinatorL1txs = append(batchCoordinatorL1txs, &tx)
|
batchCoordinatorL1Txs = append(batchCoordinatorL1Txs, &tx)
|
||||||
idx++
|
idx++
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,7 +122,7 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
|
|||||||
tx.Signature = sig
|
tx.Signature = sig
|
||||||
|
|
||||||
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Nonce++
|
accounts[idxTokenIDToString(inst.From, inst.TokenID)].Nonce++
|
||||||
batchL2txs = append(batchL2txs, &tx)
|
batchPoolL2Txs = append(batchPoolL2Txs, &tx)
|
||||||
|
|
||||||
case common.TxTypeExit, common.TxTypeForceExit:
|
case common.TxTypeExit, common.TxTypeForceExit:
|
||||||
tx := common.L1Tx{
|
tx := common.L1Tx{
|
||||||
@@ -130,22 +132,30 @@ func GenerateTestTxs(t *testing.T, instructions Instructions) ([][]*common.L1Tx,
|
|||||||
Amount: big.NewInt(int64(inst.Amount)),
|
Amount: big.NewInt(int64(inst.Amount)),
|
||||||
Type: common.TxTypeExit,
|
Type: common.TxTypeExit,
|
||||||
}
|
}
|
||||||
batchL1txs = append(batchL1txs, &tx)
|
batchL1Txs = append(batchL1Txs, &tx)
|
||||||
case TypeNewBatch:
|
case TypeNewBatch:
|
||||||
l1txs = append(l1txs, batchL1txs)
|
l1Txs = append(l1Txs, batchL1Txs)
|
||||||
coordinatorL1txs = append(coordinatorL1txs, batchCoordinatorL1txs)
|
coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs)
|
||||||
l2txs = append(l2txs, batchL2txs)
|
poolL2Txs = append(poolL2Txs, batchPoolL2Txs)
|
||||||
batchL1txs = []*common.L1Tx{}
|
batchL1Txs = []*common.L1Tx{}
|
||||||
batchCoordinatorL1txs = []*common.L1Tx{}
|
batchCoordinatorL1Txs = []*common.L1Tx{}
|
||||||
batchL2txs = []*common.PoolL2Tx{}
|
batchPoolL2Txs = []*common.PoolL2Tx{}
|
||||||
default:
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
l1txs = append(l1txs, batchL1txs)
|
l1Txs = append(l1Txs, batchL1Txs)
|
||||||
coordinatorL1txs = append(coordinatorL1txs, batchCoordinatorL1txs)
|
coordinatorL1Txs = append(coordinatorL1Txs, batchCoordinatorL1Txs)
|
||||||
l2txs = append(l2txs, batchL2txs)
|
poolL2Txs = append(poolL2Txs, batchPoolL2Txs)
|
||||||
|
|
||||||
return l1txs, coordinatorL1txs, l2txs
|
return l1Txs, coordinatorL1Txs, poolL2Txs
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestTxsFromSet(t *testing.T, set string) ([][]*common.L1Tx, [][]*common.L1Tx, [][]*common.PoolL2Tx) {
|
||||||
|
parser := NewParser(strings.NewReader(set))
|
||||||
|
instructions, err := parser.Parse()
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
return GenerateTestTxs(t, instructions)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func NewTxSelector(dbpath string, synchronizerStateDB *statedb.StateDB, l2 *l2db
|
|||||||
|
|
||||||
// Reset tells the TxSelector to get it's internal AccountsDB
|
// Reset tells the TxSelector to get it's internal AccountsDB
|
||||||
// from the required `batchNum`
|
// from the required `batchNum`
|
||||||
func (txsel *TxSelector) Reset(batchNum uint64) error {
|
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
|
||||||
err := txsel.localAccountsDB.Reset(batchNum, true)
|
err := txsel.localAccountsDB.Reset(batchNum, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -61,7 +61,7 @@ func (txsel *TxSelector) Reset(batchNum uint64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetL2TxSelection returns a selection of the L2Txs for the next batch, from the L2DB pool
|
// GetL2TxSelection returns a selection of the L2Txs for the next batch, from the L2DB pool
|
||||||
func (txsel *TxSelector) GetL2TxSelection(batchNum uint64) ([]*common.PoolL2Tx, error) {
|
func (txsel *TxSelector) GetL2TxSelection(batchNum common.BatchNum) ([]*common.PoolL2Tx, error) {
|
||||||
// get pending l2-tx from tx-pool
|
// get pending l2-tx from tx-pool
|
||||||
l2TxsRaw, err := txsel.l2db.GetPendingTxs() // once l2db ready, maybe use parameter 'batchNum'
|
l2TxsRaw, err := txsel.l2db.GetPendingTxs() // once l2db ready, maybe use parameter 'batchNum'
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -90,7 +90,7 @@ func (txsel *TxSelector) GetL2TxSelection(batchNum uint64) ([]*common.PoolL2Tx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetL1L2TxSelection returns the selection of L1 + L2 txs
|
// GetL1L2TxSelection returns the selection of L1 + L2 txs
|
||||||
func (txsel *TxSelector) GetL1L2TxSelection(batchNum uint64, l1txs []*common.L1Tx) ([]*common.L1Tx, []*common.L1Tx, []*common.PoolL2Tx, error) {
|
func (txsel *TxSelector) GetL1L2TxSelection(batchNum common.BatchNum, l1txs []*common.L1Tx) ([]*common.L1Tx, []*common.L1Tx, []*common.PoolL2Tx, error) {
|
||||||
// apply l1-user-tx to localAccountDB
|
// apply l1-user-tx to localAccountDB
|
||||||
// create new leaves
|
// create new leaves
|
||||||
// update balances
|
// update balances
|
||||||
@@ -153,6 +153,9 @@ func (txsel *TxSelector) checkIfAccountExistOrPending(idx common.Idx) bool {
|
|||||||
|
|
||||||
func (txsel *TxSelector) getL2Profitable(txs txs, max uint64) txs {
|
func (txsel *TxSelector) getL2Profitable(txs txs, max uint64) txs {
|
||||||
sort.Sort(txs)
|
sort.Sort(txs)
|
||||||
|
if len(txs) < int(max) {
|
||||||
|
return txs
|
||||||
|
}
|
||||||
return txs[:max]
|
return txs[:max]
|
||||||
}
|
}
|
||||||
func (txsel *TxSelector) createL1OperatorTxForL2Tx(accounts []*common.Account) []*common.L1Tx {
|
func (txsel *TxSelector) createL1OperatorTxForL2Tx(accounts []*common.Account) []*common.L1Tx {
|
||||||
|
|||||||
Reference in New Issue
Block a user