Redo coordinator structure, connect API to node

- API:
	- Modify the constructor so that hardcoded rollup constants don't need
	  to be passed (introduce a `Config` and use `configAPI` internally)
- Common:
	- Update rollup constants with proper *big.Int when required
	- Add BidCoordinator and Slot structs used by the HistoryDB and
	  Synchronizer.
	- Add helper methods to AuctionConstants
	- AuctionVariables: Add column `DefaultSlotSetBidSlotNum` (in the SQL
	  table: `default_slot_set_bid_slot_num`), which indicates at which
	  slotNum does the `DefaultSlotSetBid` specified starts applying.
- Config:
	- Move coordinator exclusive configuration from the node config to the
	  coordinator config
- Coordinator:
	- Reorganize the code towards having the goroutines started and stopped
	  from the coordinator itself instead of the node.
	- Remove all stop and stopped channels, and use context.Context and
	  sync.WaitGroup instead.
	- Remove BatchInfo setters and assing variables directly
	- In ServerProof and ServerProofPool use context instead stop channel.
	- Use message passing to notify the coordinator about sync updates and
	  reorgs
	- Introduce the Pipeline, which can be started and stopped by the
	  Coordinator
	- Introduce the TxManager, which manages ethereum transactions (the
	  TxManager is also in charge of making the forge call to the rollup
	  smart contract).  The TxManager keeps ethereum transactions and:
	  	1. Waits for the transaction to be accepted
		2. Waits for the transaction to be confirmed for N blocks
	- In forge logic, first prepare a batch and then wait for an available
	  server proof to have all work ready once the proof server is ready.
	- Remove the `isForgeSequence` method which was querying the smart
	  contract, and instead use notifications sent by the Synchronizer to
	  figure out if it's forging time.
	- Update test (which is a minimal test to manually see if the
	  coordinator starts)
- HistoryDB:
	- Add method to get the number of batches in a slot (used to detect when
	  a slot has passed the bid winner forging deadline)
	- Add method to get the best bid and associated coordinator of a slot
	  (used to detect the forgerAddress that can forge the slot)
- General:
	- Rename some instances of `currentBlock` to `lastBlock` to be more
	  clear.
- Node:
	- Connect the API to the node and call the methods to update cached
	  state when the sync advances blocks.
	- Call methods to update Coordinator state when the sync advances blocks
	  and finds reorgs.
- Synchronizer:
	- Add Auction field in the Stats, which contain the current slot with
	  info about highest bidder and other related info required to know who
	  can forge in the current block.
	- Better organization of cached state:
		- On Sync, update the internal cached state
		- On Init or Reorg, load the state from HistoryDB into the
		  internal cached state.
This commit is contained in:
Eduard S
2020-11-13 18:11:58 +01:00
parent bf88eb60b8
commit 3b99953007
31 changed files with 1195 additions and 716 deletions

View File

@@ -14,6 +14,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/txselector"
"github.com/stretchr/testify/assert"
@@ -29,15 +30,6 @@ func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBu
synchSdb, err := statedb.NewStateDB(synchDBPath, statedb.TypeSynchronizer, nLevels)
assert.Nil(t, err)
// txselDBPath, err := ioutil.TempDir("", "tmpTxSelDB")
// require.Nil(t, err)
// bbDBPath, err := ioutil.TempDir("", "tmpBBDB")
// require.Nil(t, err)
// txselSdb, err := statedb.NewLocalStateDB(txselDBPath, synchSdb, statedb.TypeTxSelector, nLevels)
// assert.Nil(t, err)
// bbSdb, err := statedb.NewLocalStateDB(bbDBPath, synchSdb, statedb.TypeBatchBuilder, nLevels)
// assert.Nil(t, err)
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.Nil(t, err)
@@ -60,85 +52,6 @@ func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBu
return txsel, bb
}
// CoordNode is an example of a Node that handles the goroutines for the coordinator
type CoordNode struct {
c *Coordinator
stopForge chan bool
stopGetProofCallForge chan bool
stopForgeCallConfirm chan bool
}
func NewCoordNode(c *Coordinator) *CoordNode {
return &CoordNode{
c: c,
}
}
func (cn *CoordNode) Start() {
log.Debugw("Starting CoordNode...")
cn.stopForge = make(chan bool)
cn.stopGetProofCallForge = make(chan bool)
cn.stopForgeCallConfirm = make(chan bool)
queueSize := 8
batchCh0 := make(chan *BatchInfo, queueSize)
batchCh1 := make(chan *BatchInfo, queueSize)
go func() {
for {
select {
case <-cn.stopForge:
return
default:
if forge, err := cn.c.ForgeLoopFn(batchCh0, cn.stopForge); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode ForgeLoopFn", "error", err)
time.Sleep(200 * time.Millisecond) // Avoid overflowing log with errors
} else if !forge {
time.Sleep(200 * time.Millisecond)
}
}
}
}()
go func() {
for {
select {
case <-cn.stopGetProofCallForge:
return
default:
if err := cn.c.GetProofCallForgeLoopFn(
batchCh0, batchCh1, cn.stopGetProofCallForge); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode GetProofCallForgeLoopFn", "error", err)
}
}
}
}()
go func() {
for {
select {
case <-cn.stopForgeCallConfirm:
return
default:
if err := cn.c.ForgeCallConfirmLoopFn(
batchCh1, cn.stopForgeCallConfirm); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode ForgeCallConfirmLoopFn", "error", err)
}
}
}
}()
}
func (cn *CoordNode) Stop() {
log.Debugw("Stopping CoordNode...")
cn.stopForge <- true
cn.stopGetProofCallForge <- true
cn.stopForgeCallConfirm <- true
}
type timer struct {
time int64
}
@@ -149,9 +62,12 @@ func (t *timer) Time() int64 {
return currentTime
}
func waitForSlot(t *testing.T, c *test.Client, slot int64) {
var forger ethCommon.Address
var bidder ethCommon.Address
func waitForSlot(t *testing.T, coord *Coordinator, c *test.Client, slot int64) {
for {
blockNum, err := c.EthCurrentBlock()
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
nextBlockSlot, err := c.AuctionGetSlotNumber(blockNum + 1)
require.Nil(t, err)
@@ -159,20 +75,35 @@ func waitForSlot(t *testing.T, c *test.Client, slot int64) {
break
}
c.CtlMineBlock()
time.Sleep(100 * time.Millisecond)
var stats synchronizer.Stats
stats.Eth.LastBlock = c.CtlLastBlock()
stats.Sync.LastBlock = c.CtlLastBlock()
canForge, err := c.AuctionCanForge(forger, blockNum+1)
require.Nil(t, err)
if canForge {
// fmt.Println("DBG canForge")
stats.Sync.Auction.CurrentSlot.Forger = forger
}
coord.SendMsg(MsgSyncStats{
Stats: stats,
})
}
}
func TestCoordinator(t *testing.T) {
txsel, bb := newTestModules(t)
bidder = ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f")
forger = ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
conf := Config{}
conf := Config{
ForgerAddress: forger,
}
hdb := &historydb.HistoryDB{}
serverProofs := []ServerProofInterface{&ServerProofMock{}, &ServerProofMock{}}
var timer timer
ethClientSetup := test.NewClientSetupExample()
bidder := ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f")
forger := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
ethClient := test.NewClient(true, &timer, &bidder, ethClientSetup)
// Bid for slot 2 and 4
@@ -183,28 +114,40 @@ func TestCoordinator(t *testing.T) {
_, err = ethClient.AuctionBidSimple(4, big.NewInt(9999))
require.Nil(t, err)
c := NewCoordinator(conf, hdb, txsel, bb, serverProofs, ethClient)
cn := NewCoordNode(c)
cn.Start()
scConsts := &synchronizer.SCConsts{
Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants,
}
initSCVars := &synchronizer.SCVariables{
Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables,
}
c := NewCoordinator(conf, hdb, txsel, bb, serverProofs, ethClient, scConsts, initSCVars)
c.Start()
time.Sleep(1 * time.Second)
// NOTE: With the current test, the coordinator will enter in forge
// time before the bidded slot because no one else is forging in the
// other slots before the slot deadline.
// simulate forgeSequence time
waitForSlot(t, ethClient, 2)
log.Info("simulate entering in forge time")
waitForSlot(t, c, ethClient, 2)
log.Info("~~~ simulate entering in forge time")
time.Sleep(1 * time.Second)
// simulate going out from forgeSequence
waitForSlot(t, ethClient, 3)
log.Info("simulate going out from forge time")
waitForSlot(t, c, ethClient, 3)
log.Info("~~~ simulate going out from forge time")
time.Sleep(1 * time.Second)
// simulate entering forgeSequence time again
waitForSlot(t, ethClient, 4)
log.Info("simulate entering in forge time again")
waitForSlot(t, c, ethClient, 4)
log.Info("~~~ simulate entering in forge time again")
time.Sleep(2 * time.Second)
// simulate stopping forgerLoop by channel
log.Info("simulate stopping forgerLoop by closing coordinator stopch")
cn.Stop()
log.Info("~~~ simulate stopping forgerLoop by closing coordinator stopch")
c.Stop()
time.Sleep(1 * time.Second)
}