Browse Source

Redo coordinator structure, connect API to node

- API:
	- Modify the constructor so that hardcoded rollup constants don't need
	  to be passed (introduce a `Config` and use `configAPI` internally)
- Common:
	- Update rollup constants with proper *big.Int when required
	- Add BidCoordinator and Slot structs used by the HistoryDB and
	  Synchronizer.
	- Add helper methods to AuctionConstants
	- AuctionVariables: Add column `DefaultSlotSetBidSlotNum` (in the SQL
	  table: `default_slot_set_bid_slot_num`), which indicates at which
	  slotNum does the `DefaultSlotSetBid` specified starts applying.
- Config:
	- Move coordinator exclusive configuration from the node config to the
	  coordinator config
- Coordinator:
	- Reorganize the code towards having the goroutines started and stopped
	  from the coordinator itself instead of the node.
	- Remove all stop and stopped channels, and use context.Context and
	  sync.WaitGroup instead.
	- Remove BatchInfo setters and assing variables directly
	- In ServerProof and ServerProofPool use context instead stop channel.
	- Use message passing to notify the coordinator about sync updates and
	  reorgs
	- Introduce the Pipeline, which can be started and stopped by the
	  Coordinator
	- Introduce the TxManager, which manages ethereum transactions (the
	  TxManager is also in charge of making the forge call to the rollup
	  smart contract).  The TxManager keeps ethereum transactions and:
	  	1. Waits for the transaction to be accepted
		2. Waits for the transaction to be confirmed for N blocks
	- In forge logic, first prepare a batch and then wait for an available
	  server proof to have all work ready once the proof server is ready.
	- Remove the `isForgeSequence` method which was querying the smart
	  contract, and instead use notifications sent by the Synchronizer to
	  figure out if it's forging time.
	- Update test (which is a minimal test to manually see if the
	  coordinator starts)
- HistoryDB:
	- Add method to get the number of batches in a slot (used to detect when
	  a slot has passed the bid winner forging deadline)
	- Add method to get the best bid and associated coordinator of a slot
	  (used to detect the forgerAddress that can forge the slot)
- General:
	- Rename some instances of `currentBlock` to `lastBlock` to be more
	  clear.
- Node:
	- Connect the API to the node and call the methods to update cached
	  state when the sync advances blocks.
	- Call methods to update Coordinator state when the sync advances blocks
	  and finds reorgs.
- Synchronizer:
	- Add Auction field in the Stats, which contain the current slot with
	  info about highest bidder and other related info required to know who
	  can forge in the current block.
	- Better organization of cached state:
		- On Sync, update the internal cached state
		- On Init or Reorg, load the state from HistoryDB into the
		  internal cached state.
feature/sql-semaphore1
Eduard S 3 years ago
parent
commit
3b99953007
31 changed files with 1172 additions and 693 deletions
  1. +7
    -3
      api/api.go
  2. +9
    -3
      api/api_test.go
  3. +27
    -0
      api/config.go
  4. +3
    -16
      api/config_test.go
  5. +10
    -5
      api/parsers.go
  6. +5
    -13
      cli/node/cfg.buidler.toml
  7. +12
    -0
      cli/node/coordcfg.example.toml
  8. +25
    -0
      common/batch.go
  9. +24
    -0
      common/bid.go
  10. +0
    -25
      common/block.go
  11. +25
    -1
      common/ethauction.go
  12. +5
    -4
      common/ethrollup.go
  13. +5
    -0
      common/utils.go
  14. +14
    -6
      config/config.go
  15. +18
    -43
      coordinator/batch.go
  16. +347
    -192
      coordinator/coordinator.go
  17. +47
    -104
      coordinator/coordinator_test.go
  18. +12
    -17
      coordinator/proofpool.go
  19. +29
    -10
      db/historydb/historydb.go
  20. +68
    -5
      db/historydb/historydb_test.go
  21. +1
    -0
      db/migrations/0001.sql
  22. +14
    -14
      eth/auction_test.go
  23. +3
    -3
      eth/ethereum.go
  24. +25
    -25
      eth/rollup_test.go
  25. +9
    -9
      eth/wdelayer_test.go
  26. +212
    -107
      node/node.go
  27. +201
    -76
      synchronizer/synchronizer.go
  28. +1
    -1
      synchronizer/synchronizer_test.go
  29. +3
    -1
      test/debugapi/debugapi.go
  30. +5
    -4
      test/ethclient.go
  31. +6
    -6
      test/ethclient_test.go

+ 7
- 3
api/api.go

@ -42,7 +42,7 @@ func NewAPI(
hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB,
config *configAPI,
config *Config,
) (*API, error) {
// Check input
// TODO: is stateDB only needed for explorer endpoints or for both?
@ -54,8 +54,12 @@ func NewAPI(
}
a := &API{
h: hdb,
cg: config,
h: hdb,
cg: &configAPI{
RollupConstants: *newRollupConstants(config.RollupConstants),
AuctionConstants: config.AuctionConstants,
WDelayerConstants: config.WDelayerConstants,
},
s: sdb,
l2: l2db,
}

+ 9
- 3
api/api_test.go

@ -219,7 +219,12 @@ func TestMain(m *testing.M) {
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants)
config = getConfigTest()
_config := getConfigTest()
config = configAPI{
RollupConstants: *newRollupConstants(_config.RollupConstants),
AuctionConstants: _config.AuctionConstants,
WDelayerConstants: _config.WDelayerConstants,
}
// API
apiGin := gin.Default()
@ -230,7 +235,7 @@ func TestMain(m *testing.M) {
hdb,
sdb,
l2DB,
&config,
&_config,
)
if err != nil {
panic(err)
@ -295,7 +300,8 @@ func TestMain(m *testing.M) {
}
for _, block := range blocksData {
// Insert block into HistoryDB
if err := api.h.AddBlockSCData(&block); err != nil { //nolint:gosec block is used as read only in the function
// nolint reason: block is used as read only in the function
if err := api.h.AddBlockSCData(&block); err != nil { //nolint:gosec
panic(err)
}
// Extract data

+ 27
- 0
api/config.go

@ -26,6 +26,33 @@ type rollupConstants struct {
ExchangeMultiplier int `json:"exchangeMultiplier"`
}
func newRollupConstants(publicConstants common.RollupConstants) *rollupConstants {
return &rollupConstants{
PublicConstants: publicConstants,
MaxFeeIdxCoordinator: common.RollupConstMaxFeeIdxCoordinator,
ReservedIdx: common.RollupConstReservedIDx,
ExitIdx: common.RollupConstExitIDx,
LimitLoadAmount: common.RollupConstLimitLoadAmount,
LimitL2TransferAmount: common.RollupConstLimitL2TransferAmount,
LimitTokens: common.RollupConstLimitTokens,
L1CoordinatorTotalBytes: common.RollupConstL1CoordinatorTotalBytes,
L1UserTotalBytes: common.RollupConstL1UserTotalBytes,
MaxL1UserTx: common.RollupConstMaxL1UserTx,
MaxL1Tx: common.RollupConstMaxL1Tx,
InputSHAConstantBytes: common.RollupConstInputSHAConstantBytes,
NumBuckets: common.RollupConstNumBuckets,
MaxWithdrawalDelay: common.RollupConstMaxWithdrawalDelay,
ExchangeMultiplier: common.RollupConstExchangeMultiplier,
}
}
// Config of the API
type Config struct {
RollupConstants common.RollupConstants
AuctionConstants common.AuctionConstants
WDelayerConstants common.WDelayerConstants
}
type configAPI struct {
RollupConstants rollupConstants `json:"hermez"`
AuctionConstants common.AuctionConstants `json:"auction"`

+ 3
- 16
api/config_test.go

@ -9,22 +9,9 @@ import (
"github.com/stretchr/testify/assert"
)
func getConfigTest() configAPI {
var config configAPI
func getConfigTest() Config {
var config Config
config.RollupConstants.ExchangeMultiplier = common.RollupConstExchangeMultiplier
config.RollupConstants.ExitIdx = common.RollupConstExitIDx
config.RollupConstants.ReservedIdx = common.RollupConstReservedIDx
config.RollupConstants.LimitLoadAmount, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10)
config.RollupConstants.LimitL2TransferAmount, _ = new(big.Int).SetString("6277101735386680763835789423207666416102355444464034512896", 10)
config.RollupConstants.LimitTokens = common.RollupConstLimitTokens
config.RollupConstants.L1CoordinatorTotalBytes = common.RollupConstL1CoordinatorTotalBytes
config.RollupConstants.L1UserTotalBytes = common.RollupConstL1UserTotalBytes
config.RollupConstants.MaxL1UserTx = common.RollupConstMaxL1UserTx
config.RollupConstants.MaxL1Tx = common.RollupConstMaxL1Tx
config.RollupConstants.InputSHAConstantBytes = common.RollupConstInputSHAConstantBytes
config.RollupConstants.NumBuckets = common.RollupConstNumBuckets
config.RollupConstants.MaxWithdrawalDelay = common.RollupConstMaxWithdrawalDelay
var rollupPublicConstants common.RollupConstants
rollupPublicConstants.AbsoluteMaxL1L2BatchTimeout = 240
rollupPublicConstants.HermezAuctionContract = ethCommon.HexToAddress("0x500D1d6A4c7D8Ae28240b47c8FCde034D827fD5e")
@ -50,7 +37,7 @@ func getConfigTest() configAPI {
wdelayerConstants.MaxEmergencyModeTime = uint64(1000000)
wdelayerConstants.MaxWithdrawalDelay = uint64(10000000)
config.RollupConstants.PublicConstants = rollupPublicConstants
config.RollupConstants = rollupPublicConstants
config.AuctionConstants = auctionConstants
config.WDelayerConstants = wdelayerConstants

+ 10
- 5
api/parsers.go

@ -51,17 +51,20 @@ func parsePagination(c querier) (fromItem *uint, order string, limit *uint, err
return fromItem, order, limit, nil
}
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009 res may be not overwriten
// nolint reason: res may be not overwriten
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009
str := c.Query(name)
return stringToUint(str, name, dflt, min, max)
}
func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009 res may be not overwriten
// nolint reason: res may be not overwriten
func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009
str := c.Query(name)
return stringToInt64(str, name, dflt, min, max)
}
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009 res may be not overwriten
// nolint reason: res may be not overwriten
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009
str := c.Query(name)
if str == "" {
return dflt, nil
@ -296,12 +299,14 @@ func parseParamIdx(c paramer) (*common.Idx, error) {
return stringToIdx(idxStr, name)
}
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009 res may be not overwriten
// nolint reason: res may be not overwriten
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009
str := c.Param(name)
return stringToUint(str, name, dflt, min, max)
}
func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009 res may be not overwriten
// nolint reason: res may be not overwriten
func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009
str := c.Param(name)
return stringToInt64(str, name, dflt, min, max)
}

+ 5
- 13
cli/node/cfg.buidler.toml

@ -1,3 +1,7 @@
[API]
Address = "localhost:8086"
Explorer = true
[Debug]
APIAddress = "localhost:12345"
@ -11,11 +15,6 @@ User = "hermez"
Password = "yourpasswordhere"
Name = "hermez"
[L2DB]
SafetyPeriod = 10
MaxTxs = 512
TTL = "24h"
[Web3]
URL = "http://localhost:8545"
@ -35,16 +34,9 @@ WDelayer = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZ = "0x2b7dEe2CF60484325716A1c6A193519c8c3b19F3"
TokenHEZName = "Hermez Network Token"
[EthClient]
CallGasLimit = 300000
DeployGasLimit = 1000000
GasPriceDiv = 100
ReceiptTimeout = "60s"
IntervalReceiptLoop = "200ms"
[Synchronizer.InitialVariables.Auction]
DonationAddress = "0x0000000000000000000000000000000000000001"
BootCoordinator = "0x0000000000000000000000000000000000000001"
BootCoordinator = "0xb4124cEB3451635DAcedd11767f004d8a28c6eE7"
DefaultSlotSetBid = [
"10000000000000000000",
"10000000000000000000",

+ 12
- 0
cli/node/coordcfg.example.toml

@ -1,2 +1,14 @@
ForgerAddress = "0x6BB84Cc84D4A34467aD12a2039A312f7029e2071"
ForgerLoopInterval = "500ms"
[EthClient]
CallGasLimit = 300000
DeployGasLimit = 1000000
GasPriceDiv = 100
ReceiptTimeout = "60s"
IntervalReceiptLoop = "200ms"
[L2DB]
SafetyPeriod = 10
MaxTxs = 512
TTL = "24h"

+ 25
- 0
common/batch.go

@ -44,3 +44,28 @@ func BatchNumFromBytes(b []byte) (BatchNum, error) {
batchNum := binary.BigEndian.Uint64(b[:batchNumBytesLen])
return BatchNum(batchNum), nil
}
// BatchData contains the information of a Batch
type BatchData struct {
// L1UserTxs that were forged in the batch
L1Batch bool // TODO: Remove once Batch.ForgeL1TxsNum is a pointer
// L1UserTxs []common.L1Tx
L1CoordinatorTxs []L1Tx
L2Txs []L2Tx
CreatedAccounts []Account
ExitTree []ExitInfo
Batch Batch
}
// NewBatchData creates an empty BatchData with the slices initialized.
func NewBatchData() *BatchData {
return &BatchData{
L1Batch: false,
// L1UserTxs: make([]common.L1Tx, 0),
L1CoordinatorTxs: make([]L1Tx, 0),
L2Txs: make([]L2Tx, 0),
CreatedAccounts: make([]Account, 0),
ExitTree: make([]ExitInfo, 0),
Batch: Batch{},
}
}

+ 24
- 0
common/bid.go

@ -13,3 +13,27 @@ type Bid struct {
EthBlockNum int64 `meddler:"eth_block_num"`
Bidder ethCommon.Address `meddler:"bidder_addr"` // Coordinator reference
}
// BidCoordinator contains the coordinator info of a bid, along with the bid value
type BidCoordinator struct {
SlotNum int64 `meddler:"slot_num"`
DefaultSlotSetBid [6]*big.Int `meddler:"default_slot_set_bid,json"`
BidValue *big.Int `meddler:"bid_value,bigint"`
Bidder ethCommon.Address `meddler:"bidder_addr"` // address of the bidder
Forger ethCommon.Address `meddler:"forger_addr"` // address of the forger
URL string `meddler:"url"` // URL of the coordinators API
}
// Slot contains relevant information of a slot
type Slot struct {
SlotNum int64
DefaultSlotBid *big.Int
StartBlock int64
EndBlock int64
BatchesLen int
BidValue *big.Int
BootCoord bool
Bidder ethCommon.Address
Forger ethCommon.Address
URL string
}

+ 0
- 25
common/block.go

@ -88,28 +88,3 @@ type BlockData struct {
// TODO: enable when common.WithdrawalDelayerVars is Merged from Synchronizer PR
// WithdrawalDelayerVars *common.WithdrawalDelayerVars
}
// BatchData contains the information of a Batch
type BatchData struct {
// L1UserTxs that were forged in the batch
L1Batch bool // TODO: Remove once Batch.ForgeL1TxsNum is a pointer
// L1UserTxs []common.L1Tx
L1CoordinatorTxs []L1Tx
L2Txs []L2Tx
CreatedAccounts []Account
ExitTree []ExitInfo
Batch Batch
}
// NewBatchData creates an empty BatchData with the slices initialized.
func NewBatchData() *BatchData {
return &BatchData{
L1Batch: false,
// L1UserTxs: make([]common.L1Tx, 0),
L1CoordinatorTxs: make([]L1Tx, 0),
L2Txs: make([]L2Tx, 0),
CreatedAccounts: make([]Account, 0),
ExitTree: make([]ExitInfo, 0),
Batch: Batch{},
}
}

+ 25
- 1
common/ethauction.go

@ -23,6 +23,28 @@ type AuctionConstants struct {
GovernanceAddress ethCommon.Address `json:"governanceAddress"`
}
// SlotNum returns the slot number of a block number
func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
}
return -1
}
// SlotBlocks returns the first and the last block numbers included in that slot
func (c *AuctionConstants) SlotBlocks(slotNum int64) (int64, int64) {
startBlock := c.GenesisBlockNum + slotNum*int64(c.BlocksPerSlot)
endBlock := startBlock + int64(c.BlocksPerSlot) - 1
return startBlock, endBlock
}
// RelativeBlock returns the relative block number within the slot where the
// block number belongs
func (c *AuctionConstants) RelativeBlock(blockNum int64) int64 {
slotNum := c.SlotNum(blockNum)
return blockNum - (c.GenesisBlockNum + (slotNum * int64(c.BlocksPerSlot)))
}
// AuctionVariables are the variables of the Auction Smart Contract
type AuctionVariables struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
@ -32,6 +54,8 @@ type AuctionVariables struct {
BootCoordinator ethCommon.Address `json:"bootCoordinator" meddler:"boot_coordinator" validate:"required"`
// The minimum bid value in a series of 6 slots
DefaultSlotSetBid [6]*big.Int `json:"defaultSlotSetBid" meddler:"default_slot_set_bid,json" validate:"required"`
// SlotNum at which the new default_slot_set_bid applies
DefaultSlotSetBidSlotNum int64 `json:"-" meddler:"default_slot_set_bid_slot_num"`
// Distance (#slots) to the closest slot to which you can bid ( 2 Slots = 2 * 40 Blocks = 20 min )
ClosedAuctionSlots uint16 `json:"closedAuctionSlots" meddler:"closed_auction_slots" validate:"required"`
// Distance (#slots) to the farthest slot to which you can bid (30 days = 4320 slots )
@ -48,7 +72,7 @@ type AuctionVariables struct {
func (v *AuctionVariables) Copy() *AuctionVariables {
vCpy := *v
for i := range v.DefaultSlotSetBid {
vCpy.DefaultSlotSetBid[i] = new(big.Int).SetBytes(v.DefaultSlotSetBid[i].Bytes())
vCpy.DefaultSlotSetBid[i] = CopyBigInt(v.DefaultSlotSetBid[i])
}
return &vCpy
}

+ 5
- 4
common/ethrollup.go

@ -61,10 +61,6 @@ const (
RollupConstReservedIDx = 255
// RollupConstExitIDx IDX 1 is reserved for exits
RollupConstExitIDx = 1
// RollupConstLimitLoadAmount Max load amount allowed (loadAmount: L1 --> L2)
RollupConstLimitLoadAmount = (1 << 128)
// RollupConstLimitL2TransferAmount Max amount allowed (amount L2 --> L2)
RollupConstLimitL2TransferAmount = (1 << 192)
// RollupConstLimitTokens Max number of tokens allowed to be registered inside the rollup
RollupConstLimitTokens = (1 << 32)
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
@ -91,6 +87,11 @@ const (
)
var (
// RollupConstLimitLoadAmount Max load amount allowed (loadAmount: L1 --> L2)
RollupConstLimitLoadAmount, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10)
// RollupConstLimitL2TransferAmount Max amount allowed (amount L2 --> L2)
RollupConstLimitL2TransferAmount, _ = new(big.Int).SetString("6277101735386680763835789423207666416102355444464034512896", 10)
// RollupConstEthAddressInternalOnly This ethereum address is used internally for rollup accounts that don't have ethereum address, only Babyjubjub
// This non-ethereum accounts can be created by the coordinator and allow users to have a rollup
// account without needing an ethereum address

+ 5
- 0
common/utils.go

@ -36,3 +36,8 @@ func BJJFromStringWithChecksum(s string) (*babyjub.PublicKey, error) {
copy(pkComp[:], pkBytes[:])
return pkComp.Decompress()
}
// CopyBigInt returns a copy of the big int
func CopyBigInt(a *big.Int) *big.Int {
return new(big.Int).SetBytes(a.Bytes())
}

+ 14
- 6
config/config.go

@ -36,6 +36,7 @@ type ServerProof struct {
type Coordinator struct {
ForgerAddress ethCommon.Address `validate:"required"`
ForgeLoopInterval Duration `validate:"required"`
ConfirmBlocks int64 `validate:"required"`
L2DB struct {
SafetyPeriod common.BatchNum `validate:"required"`
MaxTxs uint32 `validate:"required"`
@ -48,6 +49,16 @@ type Coordinator struct {
Path string `validate:"required"`
} `validate:"required"`
ServerProofs []ServerProof `validate:"required"`
EthClient struct {
CallGasLimit uint64 `validate:"required"`
DeployGasLimit uint64 `validate:"required"`
GasPriceDiv uint64 `validate:"required"`
ReceiptTimeout Duration `validate:"required"`
IntervalReceiptLoop Duration `validate:"required"`
} `validate:"required"`
API struct {
Coordinator bool
} `validate:"required"`
}
// Node is the hermez node configuration.
@ -78,12 +89,9 @@ type Node struct {
TokenHEZ ethCommon.Address `validate:"required"`
TokenHEZName string `validate:"required"`
} `validate:"required"`
EthClient struct {
CallGasLimit uint64 `validate:"required"`
DeployGasLimit uint64 `validate:"required"`
GasPriceDiv uint64 `validate:"required"`
ReceiptTimeout Duration `validate:"required"`
IntervalReceiptLoop Duration `validate:"required"`
API struct {
Address string
Explorer bool
} `validate:"required"`
Debug struct {
APIAddress string

+ 18
- 43
coordinator/batch.go

@ -3,59 +3,34 @@ package coordinator
import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/eth"
)
// Proof TBD this type will be received from the proof server
type Proof struct {
}
// TxStatus is used to mark the status of an ethereum transaction
type TxStatus string
const (
// TxStatusPending marks the Tx as Pending
TxStatusPending TxStatus = "pending"
// TxStatusSent marks the Tx as Sent
TxStatusSent TxStatus = "sent"
)
// BatchInfo contans the Batch information
type BatchInfo struct {
batchNum common.BatchNum
serverProof ServerProofInterface
zkInputs *common.ZKInputs
proof *Proof
BatchNum common.BatchNum
ServerProof ServerProofInterface
ZKInputs *common.ZKInputs
Proof *Proof
L1UserTxsExtra []common.L1Tx
L1OperatorTxs []common.L1Tx
L2Txs []common.PoolL2Tx
ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo
ethTx *types.Transaction
}
// NewBatchInfo creates a new BatchInfo with the given batchNum &
// ServerProof
func NewBatchInfo(batchNum common.BatchNum, serverProof ServerProofInterface) BatchInfo {
return BatchInfo{
batchNum: batchNum,
serverProof: serverProof,
}
}
// SetTxsInfo sets the l1UserTxs, l1OperatorTxs and l2Txs to the BatchInfo data
// structure
func (bi *BatchInfo) SetTxsInfo(l1UserTxsExtra, l1OperatorTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// TBD parameter: feesInfo
bi.L1UserTxsExtra = l1UserTxsExtra
bi.L1OperatorTxs = l1OperatorTxs
bi.L2Txs = l2Txs
}
// SetZKInputs sets the ZKInputs to the BatchInfo data structure
func (bi *BatchInfo) SetZKInputs(zkInputs *common.ZKInputs) {
bi.zkInputs = zkInputs
}
// SetServerProof sets the ServerProof to the BatchInfo data structure
func (bi *BatchInfo) SetServerProof(serverProof ServerProofInterface) {
bi.serverProof = serverProof
}
// SetProof sets the Proof to the BatchInfo data structure
func (bi *BatchInfo) SetProof(proof *Proof) {
bi.proof = proof
}
// SetEthTx sets the ethTx to the BatchInfo data structure
func (bi *BatchInfo) SetEthTx(ethTx *types.Transaction) {
bi.ethTx = ethTx
TxStatus TxStatus
EthTx *types.Transaction
}

+ 347
- 192
coordinator/coordinator.go

@ -3,6 +3,7 @@ package coordinator
import (
"context"
"fmt"
"sync"
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
@ -12,199 +13,384 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/txselector"
)
var errTODO = fmt.Errorf("TODO")
// ErrStop is returned when the function is stopped asynchronously via the stop
// channel. It doesn't indicate an error.
var ErrStop = fmt.Errorf("Stopped")
// ErrDone is returned when the function is stopped asynchronously via a done
// (terminated) context. It doesn't indicate an error.
var ErrDone = fmt.Errorf("done")
// Config contains the Coordinator configuration
type Config struct {
ForgerAddress ethCommon.Address
ConfirmBlocks int64
}
// Coordinator implements the Coordinator type
type Coordinator struct {
forging bool
// rw *sync.RWMutex
// isForgeSeq bool // WIP just for testing while implementing
config Config
// State
forging bool
batchNum common.BatchNum
serverProofPool *ServerProofPool
consts synchronizer.SCConsts
vars synchronizer.SCVariables
cfg Config
// synchronizer *synchronizer.Synchronizer
hdb *historydb.HistoryDB
txsel *txselector.TxSelector
batchBuilder *batchbuilder.BatchBuilder
ethClient eth.ClientInterface
ethTxs []*types.Transaction
// ethTxStore kvdb.Storage
msgCh chan interface{}
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
pipelineCtx context.Context
pipelineWg sync.WaitGroup
pipelineCancel context.CancelFunc
txManager *TxManager
}
// NewCoordinator creates a new Coordinator
func NewCoordinator(conf Config,
func NewCoordinator(cfg Config,
hdb *historydb.HistoryDB,
txsel *txselector.TxSelector,
bb *batchbuilder.BatchBuilder,
serverProofs []ServerProofInterface,
ethClient eth.ClientInterface) *Coordinator { // once synchronizer is ready, synchronizer.Synchronizer will be passed as parameter here
ethClient eth.ClientInterface,
scConsts *synchronizer.SCConsts,
initSCVars *synchronizer.SCVariables,
) *Coordinator { // once synchronizer is ready, synchronizer.Synchronizer will be passed as parameter here
serverProofPool := NewServerProofPool(len(serverProofs))
for _, serverProof := range serverProofs {
serverProofPool.Add(serverProof)
}
txManager := NewTxManager(ethClient, cfg.ConfirmBlocks)
ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{
config: conf,
forging: false,
batchNum: -1,
serverProofPool: serverProofPool,
hdb: hdb,
txsel: txsel,
batchBuilder: bb,
ethClient: ethClient,
ethTxs: make([]*types.Transaction, 0),
// ethTxStore: memory.NewMemoryStorage(),
// rw: &sync.RWMutex{},
consts: *scConsts,
vars: *initSCVars,
cfg: cfg,
hdb: hdb,
txsel: txsel,
batchBuilder: bb,
ethClient: ethClient,
msgCh: make(chan interface{}),
ctx: ctx,
// wg
cancel: cancel,
txManager: txManager,
}
return &c
}
// TODO(Edu): Change the current design of the coordinator structur:
// - Move Start and Stop functions (from node/node.go) here
// - Add concept of StartPipeline, StopPipeline, that spawns and stops the goroutines
// - Add a Manager that calls StartPipeline and StopPipeline, checks when it's time to forge, schedules new batches, etc.
// - Add a TxMonitor that monitors successful ForgeBatch ethereum transactions and waits for N blocks of confirmation, and reports back errors to the Manager.
// ForgeLoopFn is the function ran in a loop that checks if it's time to forge
// and forges a batch if so and sends it to outBatchCh. Returns true if it's
// the coordinator turn to forge.
func (c *Coordinator) ForgeLoopFn(outBatchCh chan *BatchInfo, stopCh chan bool) (forgetime bool, err error) {
// TODO: Move the logic to check if it's forge time or not outside the pipeline
isForgeSequence, err := c.isForgeSequence()
if err != nil {
return false, err
// MsgSyncStats indicates an update to the Synchronizer stats
type MsgSyncStats struct {
Stats synchronizer.Stats
}
// MsgSyncSCVars indicates an update to Smart Contract Vars
type MsgSyncSCVars struct {
Rollup *common.RollupVariables
Auction *common.AuctionVariables
WDelayer *common.WDelayerVariables
}
// MsgSyncReorg indicates a reorg
type MsgSyncReorg struct {
}
// SendMsg is a thread safe method to pass a message to the Coordinator
func (c *Coordinator) SendMsg(msg interface{}) {
c.msgCh <- msg
}
func (c *Coordinator) handleMsgSyncSCVars(msg *MsgSyncSCVars) {
if msg.Rollup != nil {
c.vars.Rollup = *msg.Rollup
}
if msg.Auction != nil {
c.vars.Auction = *msg.Auction
}
if msg.WDelayer != nil {
c.vars.WDelayer = *msg.WDelayer
}
if !isForgeSequence {
}
func (c *Coordinator) handleMsgSyncStats(stats *synchronizer.Stats) error {
if !stats.Synced() {
return nil
}
c.txManager.SetLastBlock(stats.Eth.LastBlock)
anyoneForge := false
if stats.Sync.Auction.CurrentSlot.BatchesLen == 0 &&
c.consts.Auction.RelativeBlock(stats.Eth.LastBlock) > int64(c.vars.Auction.SlotDeadline) {
log.Debug("Coordinator: anyone can forge in the current slot (slotDeadline passed)")
anyoneForge = true
}
if stats.Sync.Auction.CurrentSlot.Forger != c.cfg.ForgerAddress && !anyoneForge {
if c.forging {
log.Info("ForgeLoopFn: forging state end")
log.Info("Coordinator: forging state end")
c.forging = false
c.PipelineStop()
}
log.Debug("ForgeLoopFn: not in forge time")
return false, nil
// log.Debug("Coordinator: not in forge time") // DBG
return nil
}
log.Debug("ForgeLoopFn: forge time")
// log.Debug("Coordinator: forge time") // DBG
if !c.forging {
// Start pipeline from a batchNum state taken from synchronizer
log.Info("ForgeLoopFn: forging state begin")
// c.batchNum = c.hdb.GetLastBatchNum() // uncomment when HistoryDB is ready
log.Info("Coordinator: forging state begin")
c.batchNum = common.BatchNum(stats.Sync.LastBatch)
err := c.txsel.Reset(c.batchNum)
if err != nil {
log.Errorw("ForgeLoopFn: TxSelector.Reset", "error", err)
return true, err
log.Errorw("Coordinator: TxSelector.Reset", "error", err)
return err
}
err = c.batchBuilder.Reset(c.batchNum, true)
if err != nil {
log.Errorw("ForgeLoopFn: BatchBuilder.Reset", "error", err)
return true, err
log.Errorw("Coordinator: BatchBuilder.Reset", "error", err)
return err
}
// c.batchQueue = NewBatchQueue()
c.forging = true
c.PipelineStart()
}
// TODO once synchronizer has this method ready:
// If there's been a reorg, handle it
// handleReorg() function decides if the reorg must restart the pipeline or not
// if c.synchronizer.Reorg():
_ = c.handleReorg()
return nil
}
defer func() {
if err == ErrStop {
log.Info("ForgeLoopFn: forgeLoopFn stopped")
}
// Start the coordinator
func (c *Coordinator) Start() {
c.wg.Add(1)
go func() {
c.txManager.Run(c.ctx)
c.wg.Done()
}()
// 0. Wait for an available server proof
// blocking call
serverProof, err := c.serverProofPool.Get(stopCh)
if err != nil {
return true, err
}
defer func() {
if !forgetime || err != nil {
c.serverProofPool.Add(serverProof)
c.wg.Add(1)
go func() {
for {
select {
case <-c.ctx.Done():
log.Info("Coordinator done")
c.wg.Done()
return
case msg := <-c.msgCh:
switch msg := msg.(type) {
case MsgSyncStats:
stats := msg.Stats
if err := c.handleMsgSyncStats(&stats); err != nil {
log.Errorw("Coordinator.handleMsgSyncStats error", "err", err)
}
case MsgSyncReorg:
if err := c.handleReorg(); err != nil {
log.Errorw("Coordinator.handleReorg error", "err", err)
}
case MsgSyncSCVars:
c.handleMsgSyncSCVars(&msg)
default:
log.Fatalw("Coordinator Unexpected Coordinator msg of type %T: %+v", msg, msg)
}
}
}
}()
}
log.Debugw("ForgeLoopFn: using serverProof", "server", serverProof)
log.Debugw("ForgeLoopFn: forge start")
// forge for batchNum = batchNum + 1.
batchInfo, err := c.forge(serverProof)
if err != nil {
log.Errorw("forge", "error", err)
return true, err
// Stop the coordinator
func (c *Coordinator) Stop() {
log.Infow("Stopping coordinator...")
c.cancel()
c.wg.Wait()
if c.forging {
c.forging = false
c.PipelineStop()
}
log.Debugw("ForgeLoopFn: forge end", "batchNum", batchInfo.batchNum)
outBatchCh <- batchInfo
return true, nil
}
// GetProofCallForgeLoopFn is the function ran in a loop that gets a forged
// batch via inBatchCh, waits for the proof server to finish, calls the ForgeBatch
// function in the Rollup Smart Contract, and sends the batch to outBatchCh.
func (c *Coordinator) GetProofCallForgeLoopFn(inBatchCh, outBatchCh chan *BatchInfo, stopCh chan bool) (err error) {
defer func() {
if err == ErrStop {
log.Info("GetProofCallForgeLoopFn: forgeLoopFn stopped")
// PipelineStart starts the forging pipeline
func (c *Coordinator) PipelineStart() {
c.pipelineCtx, c.pipelineCancel = context.WithCancel(context.Background())
queueSize := 1
batchChSentServerProof := make(chan *BatchInfo, queueSize)
c.pipelineWg.Add(1)
go func() {
for {
select {
case <-c.pipelineCtx.Done():
log.Debug("Pipeline forgeSendServerProof loop done")
c.pipelineWg.Done()
return
default:
c.batchNum = c.batchNum + 1
batchInfo, err := c.forgeSendServerProof(c.pipelineCtx, c.batchNum)
if err == ErrDone {
continue
}
if err != nil {
log.Errorw("forgeSendServerProof", "err", err)
continue
}
batchChSentServerProof <- batchInfo
}
}
}()
select {
case <-stopCh:
return ErrStop
case batchInfo := <-inBatchCh:
log.Debugw("GetProofCallForgeLoopFn: getProofCallForge start", "batchNum", batchInfo.batchNum)
if err := c.getProofCallForge(batchInfo, stopCh); err != nil {
return err
c.pipelineWg.Add(1)
go func() {
for {
select {
case <-c.pipelineCtx.Done():
log.Debug("Pipeline waitServerProofSendEth loop done")
c.pipelineWg.Done()
return
case batchInfo := <-batchChSentServerProof:
err := c.waitServerProof(c.pipelineCtx, batchInfo)
if err == ErrDone {
continue
}
if err != nil {
log.Errorw("waitServerProof", "err", err)
continue
}
c.txManager.AddBatch(batchInfo)
}
}
log.Debugw("GetProofCallForgeLoopFn: getProofCallForge end", "batchNum", batchInfo.batchNum)
outBatchCh <- batchInfo
}()
}
// PipelineStop stops the forging pipeline
func (c *Coordinator) PipelineStop() {
log.Debug("Stopping pipeline...")
c.pipelineCancel()
c.pipelineWg.Wait()
}
// TxManager handles everything related to ethereum transactions: It makes the
// call to forge, waits for transaction confirmation, and keeps checking them
// until a number of confirmed blocks have passed.
type TxManager struct {
ethClient eth.ClientInterface
batchCh chan *BatchInfo
lastBlockCh chan int64
queue []*BatchInfo
confirmation int64
lastBlock int64
}
// NewTxManager creates a new TxManager
func NewTxManager(ethClient eth.ClientInterface, confirmation int64) *TxManager {
return &TxManager{
ethClient: ethClient,
// TODO: Find best queue size
batchCh: make(chan *BatchInfo, 16), //nolint:gomnd
// TODO: Find best queue size
lastBlockCh: make(chan int64, 16), //nolint:gomnd
confirmation: confirmation,
lastBlock: -1,
}
return nil
}
// ForgeCallConfirmLoopFn is the function ran in a loop that gets a batch that
// has been sent to the Rollup Smart Contract via inBatchCh and waits for the
// ethereum transaction confirmation.
func (c *Coordinator) ForgeCallConfirmLoopFn(inBatchCh chan *BatchInfo, stopCh chan bool) (err error) {
defer func() {
if err == ErrStop {
log.Info("ForgeCallConfirmLoopFn: forgeConfirmLoopFn stopped")
}
}()
select {
case <-stopCh:
return ErrStop
case batchInfo := <-inBatchCh:
log.Debugw("ForgeCallConfirmLoopFn: forgeCallConfirm start", "batchNum", batchInfo.batchNum)
if err := c.forgeCallConfirm(batchInfo, stopCh); err != nil {
return err
// AddBatch is a thread safe method to pass a new batch TxManager to be sent to
// the smart contract via the forge call
func (t *TxManager) AddBatch(batchInfo *BatchInfo) {
t.batchCh <- batchInfo
}
// SetLastBlock is a thread safe method to pass the lastBlock to the TxManager
func (t *TxManager) SetLastBlock(lastBlock int64) {
t.lastBlockCh <- lastBlock
}
const waitTime = 200 * time.Millisecond
const longWaitTime = 999 * time.Hour
// Run the TxManager
func (t *TxManager) Run(ctx context.Context) {
next := 0
d := time.Duration(longWaitTime)
for {
select {
case <-ctx.Done():
log.Info("TxManager done")
return
case lastBlock := <-t.lastBlockCh:
t.lastBlock = lastBlock
case batchInfo := <-t.batchCh:
ethTx, err := t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs)
if err != nil {
// TODO: Figure out different error cases and handle them properly
log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err)
continue
}
log.Debugf("ethClient ForgeCall sent, batchNum: %d", batchInfo.BatchNum)
batchInfo.EthTx = ethTx
t.queue = append(t.queue, batchInfo)
d = waitTime
case <-time.After(d):
if len(t.queue) == 0 {
continue
}
batchInfo := t.queue[next]
txID := batchInfo.EthTx.Hash()
receipt, err := t.ethClient.EthTransactionReceipt(ctx, txID)
if err != nil {
log.Errorw("TxManager ethClient.EthTransactionReceipt", "err", err)
// TODO: Figure out different error cases and handle them properly
// TODO: Notify the Coordinator to maybe reset the pipeline
continue
}
if receipt != nil {
if receipt.Status == types.ReceiptStatusFailed {
log.Errorw("TxManager receipt status is failed", "receipt", receipt)
} else if receipt.Status == types.ReceiptStatusSuccessful {
if t.lastBlock-receipt.BlockNumber.Int64() >= t.confirmation {
log.Debugw("TxManager tx for RollupForgeBatch confirmed", "batchNum", batchInfo.BatchNum)
t.queue = t.queue[1:]
if len(t.queue) == 0 {
d = longWaitTime
}
}
}
}
if len(t.queue) == 0 {
next = 0
} else {
next = (next + 1) % len(t.queue)
}
}
log.Debugw("ForgeCallConfirmLoopFn: forgeCallConfirm end", "batchNum", batchInfo.batchNum)
}
return nil
}
func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error) {
// forgeSendServerProof the next batch, wait for a proof server to be available and send the
// circuit inputs to the proof server.
func (c *Coordinator) forgeSendServerProof(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
// remove transactions from the pool that have been there for too long
err := c.purgeRemoveByTimeout()
if err != nil {
return nil, err
}
c.batchNum = c.batchNum + 1
batchInfo := NewBatchInfo(c.batchNum, serverProof) // to accumulate metadata of the batch
batchInfo := BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
var poolL2Txs []common.PoolL2Tx
// var feesInfo
@ -213,14 +399,14 @@ func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error
if c.shouldL1L2Batch() {
// 2a: L1+L2 txs
// l1UserTxs, toForgeL1TxsNumber := c.hdb.GetNextL1UserTxs() // TODO once HistoryDB is ready, uncomment
var l1UserTxs []common.L1Tx = nil // tmp, depends on HistoryDB
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection([]common.Idx{}, c.batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
var l1UserTxs []common.L1Tx = nil // tmp, depends on HistoryDB
l1UserTxsExtra, l1OperatorTxs, poolL2Txs, err = c.txsel.GetL1L2TxSelection([]common.Idx{}, batchNum, l1UserTxs) // TODO once feesInfo is added to method return, add the var
if err != nil {
return nil, err
}
} else {
// 2b: only L2 txs
_, poolL2Txs, err = c.txsel.GetL2TxSelection([]common.Idx{}, c.batchNum) // TODO once feesInfo is added to method return, add the var
_, poolL2Txs, err = c.txsel.GetL2TxSelection([]common.Idx{}, batchNum) // TODO once feesInfo is added to method return, add the var
if err != nil {
return nil, err
}
@ -238,11 +424,14 @@ func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error
}
// 3. Save metadata from TxSelector output for BatchNum
batchInfo.SetTxsInfo(l1UserTxsExtra, l1OperatorTxs, poolL2Txs) // TODO feesInfo
// batchInfo.SetTxsInfo(l1UserTxsExtra, l1OperatorTxs, poolL2Txs) // TODO feesInfo
batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1OperatorTxs = l1OperatorTxs
batchInfo.L2Txs = poolL2Txs
// 4. Call BatchBuilder with TxSelector output
configBatch := &batchbuilder.ConfigBatch{
ForgerAddress: c.config.ForgerAddress,
ForgerAddress: c.cfg.ForgerAddress,
}
zkInputs, err := c.batchBuilder.BuildBatch([]common.Idx{}, configBatch, l1UserTxsExtra, l1OperatorTxs, poolL2Txs, nil) // TODO []common.TokenID --> feesInfo
if err != nil {
@ -250,10 +439,25 @@ func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error
}
// 5. Save metadata from BatchBuilder output for BatchNum
batchInfo.SetZKInputs(zkInputs)
batchInfo.ZKInputs = zkInputs
// 6. Wait for an available server proof blocking call
serverProof, err := c.serverProofPool.Get(ctx)
if err != nil {
return nil, err
}
batchInfo.ServerProof = serverProof
defer func() {
// If there's an error further on, add the serverProof back to
// the pool
if err != nil {
c.serverProofPool.Add(serverProof)
}
}()
// 6. Call an idle server proof with BatchBuilder output, save server proof info for batchNum
err = batchInfo.serverProof.CalculateProof(zkInputs)
// 7. Call the selected idle server proof with BatchBuilder output,
// save server proof info for batchNum
err = batchInfo.ServerProof.CalculateProof(zkInputs)
if err != nil {
return nil, err
}
@ -261,24 +465,17 @@ func (c *Coordinator) forge(serverProof ServerProofInterface) (*BatchInfo, error
return &batchInfo, nil
}
// getProofCallForge gets the generated zkProof & sends it to the SmartContract
func (c *Coordinator) getProofCallForge(batchInfo *BatchInfo, stopCh chan bool) error {
serverProof := batchInfo.serverProof
proof, err := serverProof.GetProof(stopCh) // blocking call, until not resolved don't continue. Returns when the proof server has calculated the proof
c.serverProofPool.Add(serverProof)
batchInfo.serverProof = nil
if err != nil {
return err
}
batchInfo.SetProof(proof)
forgeBatchArgs := c.prepareForgeBatchArgs(batchInfo)
ethTx, err := c.ethClient.RollupForgeBatch(forgeBatchArgs)
// waitServerProof gets the generated zkProof & sends it to the SmartContract
func (c *Coordinator) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
proof, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil {
return err
}
// TODO: Move this to the next step (forgeCallConfirm)
log.Debugf("ethClient ForgeCall sent, batchNum: %d", c.batchNum)
batchInfo.SetEthTx(ethTx)
c.serverProofPool.Add(batchInfo.ServerProof)
batchInfo.ServerProof = nil
batchInfo.Proof = proof
batchInfo.ForgeBatchArgs = c.prepareForgeBatchArgs(batchInfo)
batchInfo.TxStatus = TxStatusPending
// TODO(FUTURE) once tx data type is defined, store ethTx (returned by ForgeCall)
// TBD if use ethTxStore as a disk k-v database, or use a Queue
@ -294,65 +491,23 @@ func (c *Coordinator) getProofCallForge(batchInfo *BatchInfo, stopCh chan bool)
return nil
}
func (c *Coordinator) forgeCallConfirm(batchInfo *BatchInfo, stopCh chan bool) error {
// TODO strategy of this sequence TBD
// confirm eth txs and mark them as accepted sequence
// IDEA: Keep an array in Coordinator with the list of sent ethTx.
// Here, loop over them and only delete them once the number of
// confirmed blocks is over a configured value. If the tx is rejected,
// return error.
// ethTx := ethTxStore.GetFirstPending()
// waitForAccepted(ethTx) // blocking call, returns once the ethTx is mined
// ethTxStore.MarkAccepted(ethTx)
txID := batchInfo.ethTx.Hash()
// TODO: Follow EthereumClient.waitReceipt logic
count := 0
// TODO: Define this waitTime in the config
waitTime := 100 * time.Millisecond //nolint:gomnd
select {
case <-time.After(waitTime):
receipt, err := c.ethClient.EthTransactionReceipt(context.TODO(), txID)
if err != nil {
return err
}
if receipt != nil {
if receipt.Status == types.ReceiptStatusFailed {
return fmt.Errorf("receipt status is failed")
} else if receipt.Status == types.ReceiptStatusSuccessful {
return nil
}
}
// TODO: Call go-ethereum:
// if err == nil && receipt == nil :
// `func (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) {`
count++
if time.Duration(count)*waitTime > 60*time.Second {
log.Warnw("Waiting for ethTx receipt for more than 60 seconds", "tx", batchInfo.ethTx)
// TODO: Decide if we resend the Tx with higher gas price
}
case <-stopCh:
return ErrStop
}
return fmt.Errorf("timeout")
}
func (c *Coordinator) handleReorg() error {
return nil // TODO
}
// isForgeSequence returns true if the node is the Forger in the current ethereum block
func (c *Coordinator) isForgeSequence() (bool, error) {
// TODO: Consider checking if we can forge by quering the Synchronizer instead of using ethClient
blockNum, err := c.ethClient.EthCurrentBlock()
if err != nil {
return false, err
}
addr, err := c.ethClient.EthAddress()
if err != nil {
return false, err
}
return c.ethClient.AuctionCanForge(*addr, blockNum+1)
}
// func (c *Coordinator) isForgeSequence() (bool, error) {
// // TODO: Consider checking if we can forge by quering the Synchronizer instead of using ethClient
// blockNum, err := c.ethClient.EthLastBlock()
// if err != nil {
// return false, err
// }
// addr, err := c.ethClient.EthAddress()
// if err != nil {
// return false, err
// }
// return c.ethClient.AuctionCanForge(*addr, blockNum+1)
// }
func (c *Coordinator) purgeRemoveByTimeout() error {
return nil // TODO

+ 47
- 104
coordinator/coordinator_test.go

@ -14,6 +14,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/txselector"
"github.com/stretchr/testify/assert"
@ -29,15 +30,6 @@ func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBu
synchSdb, err := statedb.NewStateDB(synchDBPath, statedb.TypeSynchronizer, nLevels)
assert.Nil(t, err)
// txselDBPath, err := ioutil.TempDir("", "tmpTxSelDB")
// require.Nil(t, err)
// bbDBPath, err := ioutil.TempDir("", "tmpBBDB")
// require.Nil(t, err)
// txselSdb, err := statedb.NewLocalStateDB(txselDBPath, synchSdb, statedb.TypeTxSelector, nLevels)
// assert.Nil(t, err)
// bbSdb, err := statedb.NewLocalStateDB(bbDBPath, synchSdb, statedb.TypeBatchBuilder, nLevels)
// assert.Nil(t, err)
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.Nil(t, err)
@ -60,85 +52,6 @@ func newTestModules(t *testing.T) (*txselector.TxSelector, *batchbuilder.BatchBu
return txsel, bb
}
// CoordNode is an example of a Node that handles the goroutines for the coordinator
type CoordNode struct {
c *Coordinator
stopForge chan bool
stopGetProofCallForge chan bool
stopForgeCallConfirm chan bool
}
func NewCoordNode(c *Coordinator) *CoordNode {
return &CoordNode{
c: c,
}
}
func (cn *CoordNode) Start() {
log.Debugw("Starting CoordNode...")
cn.stopForge = make(chan bool)
cn.stopGetProofCallForge = make(chan bool)
cn.stopForgeCallConfirm = make(chan bool)
queueSize := 8
batchCh0 := make(chan *BatchInfo, queueSize)
batchCh1 := make(chan *BatchInfo, queueSize)
go func() {
for {
select {
case <-cn.stopForge:
return
default:
if forge, err := cn.c.ForgeLoopFn(batchCh0, cn.stopForge); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode ForgeLoopFn", "error", err)
time.Sleep(200 * time.Millisecond) // Avoid overflowing log with errors
} else if !forge {
time.Sleep(200 * time.Millisecond)
}
}
}
}()
go func() {
for {
select {
case <-cn.stopGetProofCallForge:
return
default:
if err := cn.c.GetProofCallForgeLoopFn(
batchCh0, batchCh1, cn.stopGetProofCallForge); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode GetProofCallForgeLoopFn", "error", err)
}
}
}
}()
go func() {
for {
select {
case <-cn.stopForgeCallConfirm:
return
default:
if err := cn.c.ForgeCallConfirmLoopFn(
batchCh1, cn.stopForgeCallConfirm); err == ErrStop {
return
} else if err != nil {
log.Errorw("CoordNode ForgeCallConfirmLoopFn", "error", err)
}
}
}
}()
}
func (cn *CoordNode) Stop() {
log.Debugw("Stopping CoordNode...")
cn.stopForge <- true
cn.stopGetProofCallForge <- true
cn.stopForgeCallConfirm <- true
}
type timer struct {
time int64
}
@ -149,9 +62,12 @@ func (t *timer) Time() int64 {
return currentTime
}
func waitForSlot(t *testing.T, c *test.Client, slot int64) {
var forger ethCommon.Address
var bidder ethCommon.Address
func waitForSlot(t *testing.T, coord *Coordinator, c *test.Client, slot int64) {
for {
blockNum, err := c.EthCurrentBlock()
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
nextBlockSlot, err := c.AuctionGetSlotNumber(blockNum + 1)
require.Nil(t, err)
@ -159,20 +75,35 @@ func waitForSlot(t *testing.T, c *test.Client, slot int64) {
break
}
c.CtlMineBlock()
time.Sleep(100 * time.Millisecond)
var stats synchronizer.Stats
stats.Eth.LastBlock = c.CtlLastBlock()
stats.Sync.LastBlock = c.CtlLastBlock()
canForge, err := c.AuctionCanForge(forger, blockNum+1)
require.Nil(t, err)
if canForge {
// fmt.Println("DBG canForge")
stats.Sync.Auction.CurrentSlot.Forger = forger
}
coord.SendMsg(MsgSyncStats{
Stats: stats,
})
}
}
func TestCoordinator(t *testing.T) {
txsel, bb := newTestModules(t)
bidder = ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f")
forger = ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
conf := Config{}
conf := Config{
ForgerAddress: forger,
}
hdb := &historydb.HistoryDB{}
serverProofs := []ServerProofInterface{&ServerProofMock{}, &ServerProofMock{}}
var timer timer
ethClientSetup := test.NewClientSetupExample()
bidder := ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f")
forger := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
ethClient := test.NewClient(true, &timer, &bidder, ethClientSetup)
// Bid for slot 2 and 4
@ -183,28 +114,40 @@ func TestCoordinator(t *testing.T) {
_, err = ethClient.AuctionBidSimple(4, big.NewInt(9999))
require.Nil(t, err)
c := NewCoordinator(conf, hdb, txsel, bb, serverProofs, ethClient)
cn := NewCoordNode(c)
cn.Start()
scConsts := &synchronizer.SCConsts{
Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants,
}
initSCVars := &synchronizer.SCVariables{
Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables,
}
c := NewCoordinator(conf, hdb, txsel, bb, serverProofs, ethClient, scConsts, initSCVars)
c.Start()
time.Sleep(1 * time.Second)
// NOTE: With the current test, the coordinator will enter in forge
// time before the bidded slot because no one else is forging in the
// other slots before the slot deadline.
// simulate forgeSequence time
waitForSlot(t, ethClient, 2)
log.Info("simulate entering in forge time")
waitForSlot(t, c, ethClient, 2)
log.Info("~~~ simulate entering in forge time")
time.Sleep(1 * time.Second)
// simulate going out from forgeSequence
waitForSlot(t, ethClient, 3)
log.Info("simulate going out from forge time")
waitForSlot(t, c, ethClient, 3)
log.Info("~~~ simulate going out from forge time")
time.Sleep(1 * time.Second)
// simulate entering forgeSequence time again
waitForSlot(t, ethClient, 4)
log.Info("simulate entering in forge time again")
waitForSlot(t, c, ethClient, 4)
log.Info("~~~ simulate entering in forge time again")
time.Sleep(2 * time.Second)
// simulate stopping forgerLoop by channel
log.Info("simulate stopping forgerLoop by closing coordinator stopch")
cn.Stop()
log.Info("~~~ simulate stopping forgerLoop by closing coordinator stopch")
c.Stop()
time.Sleep(1 * time.Second)
}

+ 12
- 17
coordinator/proofpool.go

@ -1,6 +1,7 @@
package coordinator
import (
"context"
"time"
"github.com/hermeznetwork/hermez-node/common"
@ -10,7 +11,7 @@ import (
// ServerProofInterface is the interface to a ServerProof that calculates zk proofs
type ServerProofInterface interface {
CalculateProof(zkInputs *common.ZKInputs) error
GetProof(stopCh chan bool) (*Proof, error)
GetProof(ctx context.Context) (*Proof, error)
}
// ServerProof contains the data related to a ServerProof
@ -33,7 +34,7 @@ func (p *ServerProof) CalculateProof(zkInputs *common.ZKInputs) error {
}
// GetProof retreives the Proof from the ServerProof
func (p *ServerProof) GetProof(stopCh chan bool) (*Proof, error) {
func (p *ServerProof) GetProof(ctx context.Context) (*Proof, error) {
log.Error("TODO")
return nil, errTODO
}
@ -49,13 +50,13 @@ func (p *ServerProofMock) CalculateProof(zkInputs *common.ZKInputs) error {
}
// GetProof retreives the Proof from the ServerProof
func (p *ServerProofMock) GetProof(stopCh chan bool) (*Proof, error) {
func (p *ServerProofMock) GetProof(ctx context.Context) (*Proof, error) {
// Simulate a delay
select {
case <-time.After(200 * time.Millisecond): //nolint:gomnd
return &Proof{}, nil
case <-stopCh:
return nil, ErrStop
case <-ctx.Done():
return nil, ErrDone
}
}
@ -77,18 +78,12 @@ func (p *ServerProofPool) Add(serverProof ServerProofInterface) {
}
// Get returns the next available ServerProof
func (p *ServerProofPool) Get(stopCh chan bool) (ServerProofInterface, error) {
func (p *ServerProofPool) Get(ctx context.Context) (ServerProofInterface, error) {
select {
case <-stopCh:
log.Info("ServerProofPool.Get stopped")
return nil, ErrStop
default:
select {
case <-stopCh:
log.Info("ServerProofPool.Get stopped")
return nil, ErrStop
case serverProof := <-p.pool:
return serverProof, nil
}
case <-ctx.Done():
log.Info("ServerProofPool.Get done")
return nil, ErrDone
case serverProof := <-p.pool:
return serverProof, nil
}
}

+ 29
- 10
db/historydb/historydb.go

@ -290,6 +290,13 @@ func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, erro
return db.SlicePtrsToSlice(batches).([]common.Batch), err
}
// GetBatchesLen retrieve number of batches from the DB, given a slotNum
func (hdb *HistoryDB) GetBatchesLen(slotNum int64) (int, error) {
row := hdb.db.QueryRow("SELECT COUNT(*) FROM batch WHERE slot_num = $1;", slotNum)
var batchesLen int
return batchesLen, row.Scan(&batchesLen)
}
// GetLastBatchNum returns the BatchNum of the latest forged batch
func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
row := hdb.db.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;")
@ -318,16 +325,6 @@ func (hdb *HistoryDB) Reorg(lastValidBlock int64) error {
return err
}
// SyncPoD stores all the data that can be changed / added on a block in the PoD SC
func (hdb *HistoryDB) SyncPoD(
blockNum uint64,
bids []common.Bid,
coordinators []common.Coordinator,
vars *common.AuctionVariables,
) error {
return nil
}
// AddBids insert Bids into the DB
func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.db, bids) }
func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
@ -361,6 +358,27 @@ func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
return *bid, err
}
// GetBestBidCoordinator returns the forger address of the highest bidder in a slot by slotNum
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
bidCoord := &common.BidCoordinator{}
err := meddler.QueryRow(
hdb.db, bidCoord,
`SELECT (
SELECT default_slot_set_bid_slot_num
FROM auction_vars
WHERE default_slot_set_bid_slot_num <= $1
ORDER BY eth_block_num DESC LIMIT 1
),
bid.slot_num, bid.bid_value, bid.bidder_addr,
coordinator.forger_addr, coordinator.url
FROM bid
INNER JOIN coordinator ON bid.bidder_addr = coordinator.bidder_addr
WHERE bid.slot_num = $1 ORDER BY bid.item_id DESC LIMIT 1;`,
slotNum)
return bidCoord, err
}
// GetBestBidsAPI returns the best bid in specific slot by slotNum
func (hdb *HistoryDB) GetBestBidsAPI(
minSlotNum, maxSlotNum *int64,
@ -1244,6 +1262,7 @@ func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables,
rollup.EthBlockNum = 0
auction.EthBlockNum = 0
wDelayer.EthBlockNum = 0
auction.DefaultSlotSetBidSlotNum = 0
if err := hdb.setRollupVars(txn, rollup); err != nil {
return err
}

+ 68
- 5
db/historydb/historydb_test.go

@ -633,11 +633,7 @@ func TestGetL1UserTxs(t *testing.T) {
assert.Equal(t, 0, len(l1UserTxs))
}
func TestSetInitialSCVars(t *testing.T) {
test.WipeDB(historyDB.DB())
_, _, _, err := historyDB.GetSCVars()
assert.Equal(t, sql.ErrNoRows, err)
func exampleInitSCVars() (*common.RollupVariables, *common.AuctionVariables, *common.WDelayerVariables) {
//nolint:govet
rollup := &common.RollupVariables{
0,
@ -655,6 +651,7 @@ func TestSetInitialSCVars(t *testing.T) {
big.NewInt(1), big.NewInt(2), big.NewInt(3),
big.NewInt(4), big.NewInt(5), big.NewInt(6),
},
0,
2,
4320,
[3]uint16{10, 11, 12},
@ -671,6 +668,14 @@ func TestSetInitialSCVars(t *testing.T) {
14,
false,
}
return rollup, auction, wDelayer
}
func TestSetInitialSCVars(t *testing.T) {
test.WipeDB(historyDB.DB())
_, _, _, err := historyDB.GetSCVars()
assert.Equal(t, sql.ErrNoRows, err)
rollup, auction, wDelayer := exampleInitSCVars()
err = historyDB.SetInitialSCVars(rollup, auction, wDelayer)
require.Nil(t, err)
dbRollup, dbAuction, dbWDelayer, err := historyDB.GetSCVars()
@ -793,6 +798,64 @@ func TestUpdateExitTree(t *testing.T) {
require.Equal(t, &block.Block.EthBlockNum, dbExitsByIdx[257].DelayedWithdrawn)
}
func TestGetBestBidCoordinator(t *testing.T) {
test.WipeDB(historyDB.DB())
rollup, auction, wDelayer := exampleInitSCVars()
err := historyDB.SetInitialSCVars(rollup, auction, wDelayer)
require.Nil(t, err)
tc := til.NewContext(common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(`
Type: Blockchain
> block // blockNum=2
`)
require.Nil(t, err)
err = historyDB.AddBlockSCData(&blocks[0])
require.Nil(t, err)
coords := []common.Coordinator{
{
Bidder: ethCommon.BigToAddress(big.NewInt(1)),
Forger: ethCommon.BigToAddress(big.NewInt(2)),
EthBlockNum: 2,
URL: "foo",
},
{
Bidder: ethCommon.BigToAddress(big.NewInt(3)),
Forger: ethCommon.BigToAddress(big.NewInt(4)),
EthBlockNum: 2,
URL: "bar",
},
}
err = historyDB.addCoordinators(historyDB.db, coords)
require.Nil(t, err)
err = historyDB.addBids(historyDB.db, []common.Bid{
{
SlotNum: 10,
BidValue: big.NewInt(10),
EthBlockNum: 2,
Bidder: coords[0].Bidder,
},
{
SlotNum: 10,
BidValue: big.NewInt(20),
EthBlockNum: 2,
Bidder: coords[1].Bidder,
},
})
require.Nil(t, err)
forger10, err := historyDB.GetBestBidCoordinator(10)
require.Nil(t, err)
require.Equal(t, coords[1].Forger, forger10.Forger)
require.Equal(t, coords[1].Bidder, forger10.Bidder)
require.Equal(t, coords[1].URL, forger10.URL)
_, err = historyDB.GetBestBidCoordinator(11)
require.Equal(t, sql.ErrNoRows, err)
}
// setTestBlocks WARNING: this will delete the blocks and recreate them
func setTestBlocks(from, to int64) []common.Block {
test.WipeDB(historyDB.DB())

+ 1
- 0
db/migrations/0001.sql

@ -536,6 +536,7 @@ CREATE TABLE auction_vars (
donation_address BYTEA NOT NULL,
boot_coordinator BYTEA NOT NULL,
default_slot_set_bid BYTEA NOT NULL,
default_slot_set_bid_slot_num BIGINT NOT NULL, -- slot_num after which the new default_slot_set_bid applies
closed_auction_slots INT NOT NULL,
open_auction_slots INT NOT NULL,
allocation_ratio VARCHAR(200),

+ 14
- 14
eth/auction_test.go

@ -71,7 +71,7 @@ func TestAuctionSetSlotDeadline(t *testing.T) {
slotDeadline, err := auctionClientTest.AuctionGetSlotDeadline()
require.Nil(t, err)
assert.Equal(t, newSlotDeadline, slotDeadline)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline)
}
@ -90,7 +90,7 @@ func TestAuctionSetOpenAuctionSlots(t *testing.T) {
openAuctionSlots, err := auctionClientTest.AuctionGetOpenAuctionSlots()
require.Nil(t, err)
assert.Equal(t, newOpenAuctionSlots, openAuctionSlots)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots)
}
@ -109,7 +109,7 @@ func TestAuctionSetClosedAuctionSlots(t *testing.T) {
closedAuctionSlots, err := auctionClientTest.AuctionGetClosedAuctionSlots()
require.Nil(t, err)
assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
@ -130,7 +130,7 @@ func TestAuctionSetOutbidding(t *testing.T) {
outbidding, err := auctionClientTest.AuctionGetOutbidding()
require.Nil(t, err)
assert.Equal(t, newOutbidding, outbidding)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding)
_, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst)
@ -151,7 +151,7 @@ func TestAuctionSetAllocationRatio(t *testing.T) {
allocationRatio, err := auctionClientTest.AuctionGetAllocationRatio()
require.Nil(t, err)
assert.Equal(t, newAllocationRatio, allocationRatio)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio)
_, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst)
@ -178,7 +178,7 @@ func TestAuctionSetDonationAddress(t *testing.T) {
donationAddress, err := auctionClientTest.AuctionGetDonationAddress()
require.Nil(t, err)
assert.Equal(t, &newDonationAddress, donationAddress)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress)
_, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst)
@ -193,7 +193,7 @@ func TestAuctionSetBootCoordinator(t *testing.T) {
bootCoordinator, err := auctionClientTest.AuctionGetBootCoordinator()
require.Nil(t, err)
assert.Equal(t, &newBootCoordinator, bootCoordinator)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
_, err = auctionClientTest.AuctionSetBootCoordinator(bootCoordinatorAddressConst)
@ -227,7 +227,7 @@ func TestAuctionChangeDefaultSlotSetBid(t *testing.T) {
minBid, err := auctionClientTest.AuctionGetDefaultSlotSetBid(set)
require.Nil(t, err)
assert.Equal(t, minBid, newInitialMinBid)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet)
assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid)
@ -251,7 +251,7 @@ func TestAuctionRegisterCoordinator(t *testing.T) {
_, err := auctionClientTest.AuctionSetCoordinator(forgerAddress, URL)
require.Nil(t, err)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress)
assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress)
@ -268,7 +268,7 @@ func TestAuctionBid(t *testing.T) {
bidderAddress := governanceAddressConst
_, err = auctionClientTest.AuctionBid(amount, currentSlot+4, bidAmount, deadline)
require.Nil(t, err)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount)
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
@ -306,7 +306,7 @@ func TestAuctionMultiBid(t *testing.T) {
bidderAddress := governanceAddressConst
_, err = auctionClientTest.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, maxBid, minBid, deadline)
require.Nil(t, err)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot)
@ -334,7 +334,7 @@ func TestAuctionClaimHEZ(t *testing.T) {
_, err := auctionClientTest.AuctionClaimHEZ()
require.Nil(t, err)
currentBlockNum, _ := auctionClientTest.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTest.client.EthLastBlock()
auctionEvents, _, _ := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount)
assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner)
@ -345,10 +345,10 @@ func TestAuctionForge(t *testing.T) {
require.Nil(t, err)
slotConst := 4
blockNum := int64(int(blocksPerSlot)*slotConst + int(genesisBlock))
currentBlockNum, _ := auctionClientTestHermez.client.EthCurrentBlock()
currentBlockNum, _ := auctionClientTestHermez.client.EthLastBlock()
blocksToAdd := blockNum - currentBlockNum
addBlocks(blocksToAdd, ethClientDialURL)
currentBlockNum, _ = auctionClientTestHermez.client.EthCurrentBlock()
currentBlockNum, _ = auctionClientTestHermez.client.EthLastBlock()
assert.Equal(t, currentBlockNum, blockNum)
_, err = auctionClientTestHermez.AuctionForge(governanceAddressConst)
require.Nil(t, err)

+ 3
- 3
eth/ethereum.go

@ -26,7 +26,7 @@ type ERC20Consts struct {
// EthereumInterface is the interface to Ethereum
type EthereumInterface interface {
EthCurrentBlock() (int64, error)
EthLastBlock() (int64, error)
// EthHeaderByNumber(context.Context, *big.Int) (*types.Header, error)
EthBlockByNumber(context.Context, int64) (*common.Block, error)
EthAddress() (*ethCommon.Address, error)
@ -241,8 +241,8 @@ func (c *EthereumClient) waitReceipt(ctx context.Context, tx *types.Transaction,
return receipt, err
}
// EthCurrentBlock returns the current block number in the blockchain
func (c *EthereumClient) EthCurrentBlock() (int64, error) {
// EthLastBlock returns the last block number in the blockchain
func (c *EthereumClient) EthLastBlock() (int64, error) {
ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Second)
defer cancel()
header, err := c.client.HeaderByNumber(ctx, nil)

+ 25
- 25
eth/rollup_test.go

@ -77,7 +77,7 @@ func TestRollupAddToken(t *testing.T) {
require.Nil(t, err)
_, err = rollupClient.RollupAddToken(tokenHEZAddressConst, feeAddToken, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress)
@ -106,7 +106,7 @@ func TestRollupForgeBatch(t *testing.T) {
// Add Blocks
blockNum := int64(int(blocksPerSlot)*int(currentSlot+4) + int(genesisBlock))
currentBlockNum, _ := auctionClient.client.EthCurrentBlock()
currentBlockNum, _ := auctionClient.client.EthLastBlock()
blocksToAdd := blockNum - currentBlockNum
addBlocks(blocksToAdd, ethClientDialURL)
@ -155,7 +155,7 @@ func TestRollupForgeBatch(t *testing.T) {
_, err = rollupClient.RollupForgeBatch(argsForge)
require.Nil(t, err)
currentBlockNum, _ = rollupClient.client.EthCurrentBlock()
currentBlockNum, _ = rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum)
@ -181,7 +181,7 @@ func TestRollupUpdateForgeL1L2BatchTimeout(t *testing.T) {
_, err := rollupClient.RollupUpdateForgeL1L2BatchTimeout(newForgeL1L2BatchTimeout)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
@ -192,7 +192,7 @@ func TestRollupUpdateFeeAddToken(t *testing.T) {
_, err := rollupClient.RollupUpdateFeeAddToken(newFeeAddToken)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken)
@ -217,7 +217,7 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -245,7 +245,7 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -273,7 +273,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -302,7 +302,7 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -330,7 +330,7 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -358,7 +358,7 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -387,7 +387,7 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -415,7 +415,7 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -443,7 +443,7 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -472,7 +472,7 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -500,7 +500,7 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -528,7 +528,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -557,7 +557,7 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -585,7 +585,7 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -613,7 +613,7 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -642,7 +642,7 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -670,7 +670,7 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -700,7 +700,7 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.LoadAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@ -748,7 +748,7 @@ func TestRollupForgeBatch2(t *testing.T) {
_, err = rollupClient.RollupForgeBatch(argsForge)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum)
@ -787,7 +787,7 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
_, err = rollupClientAux.RollupWithdrawMerkleProof(pk, tokenID, numExitRoot, fromIdx, amount, siblings, instantWithdraw)
require.Nil(t, err)
currentBlockNum, _ := rollupClient.client.EthCurrentBlock()
currentBlockNum, _ := rollupClient.client.EthLastBlock()
rollupEvents, _, _ := rollupClient.RollupEventsByBlock(currentBlockNum)
assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx)

+ 9
- 9
eth/wdelayer_test.go

@ -41,7 +41,7 @@ func TestWDelayerSetHermezGovernanceDAOAddress(t *testing.T) {
auxAddress, err := wdelayerClientTest.WDelayerGetHermezGovernanceDAOAddress()
require.Nil(t, err)
assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceDAOAddress[0].NewHermezGovernanceDAOAddress)
wdelayerClientAux, err := NewWDelayerClient(ethereumClientAux, wdelayerTestAddressConst)
@ -64,7 +64,7 @@ func TestWDelayerSetHermezKeeperAddress(t *testing.T) {
auxAddress, err := wdelayerClientTest.WDelayerGetHermezKeeperAddress()
require.Nil(t, err)
assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezKeeperAddress[0].NewHermezKeeperAddress)
wdelayerClientAux, err := NewWDelayerClient(ethereumClientAux, wdelayerTestAddressConst)
@ -87,7 +87,7 @@ func TestWDelayerSetWhiteHackGroupAddress(t *testing.T) {
auxAddress, err := wdelayerClientTest.WDelayerGetWhiteHackGroupAddress()
require.Nil(t, err)
assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewWhiteHackGroupAddress[0].NewWhiteHackGroupAddress)
wdelayerClientAux, err := NewWDelayerClient(ethereumClientAux, wdelayerTestAddressConst)
@ -116,7 +116,7 @@ func TestWDelayerChangeWithdrawalDelay(t *testing.T) {
withdrawalDelay, err := wdelayerClientTest.WDelayerGetWithdrawalDelay()
require.Nil(t, err)
assert.Equal(t, newWithdrawalDelay, withdrawalDelay)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, newWithdrawalDelay.Uint64(), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay)
}
@ -128,7 +128,7 @@ func TestWDelayerDeposit(t *testing.T) {
require.Nil(t, err)
_, err = wdelayerClientHermez.WDelayerDeposit(auxAddressConst, tokenHEZAddressConst, amount)
require.Nil(t, err)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
@ -152,7 +152,7 @@ func TestWDelayerWithdrawal(t *testing.T) {
addBlock(ethClientDialURL)
_, err = wdelayerClientTest.WDelayerWithdrawal(auxAddressConst, tokenHEZAddressConst)
require.Nil(t, err)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner)
@ -166,7 +166,7 @@ func TestWDelayerSecondDeposit(t *testing.T) {
require.Nil(t, err)
_, err = wdelayerClientHermez.WDelayerDeposit(auxAddressConst, tokenHEZAddressConst, amount)
require.Nil(t, err)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
@ -181,7 +181,7 @@ func TestWDelayerEnableEmergencyMode(t *testing.T) {
emergencyMode, err := wdelayerClientTest.WDelayerIsEmergencyMode()
require.Nil(t, err)
assert.Equal(t, true, emergencyMode)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
auxEvent := new(WDelayerEventEmergencyModeEnabled)
assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0])
@ -208,7 +208,7 @@ func TestWDelayerEscapeHatchWithdrawal(t *testing.T) {
addTime(seconds, ethClientDialURL)
_, err = wdelayerClientWhite.WDelayerEscapeHatchWithdrawal(governanceAddressConst, tokenHEZAddressConst, amount)
require.Nil(t, err)
currentBlockNum, _ := wdelayerClientTest.client.EthCurrentBlock()
currentBlockNum, _ := wdelayerClientTest.client.EthLastBlock()
wdelayerEvents, _, _ := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token)
assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To)

+ 212
- 107
node/node.go

@ -2,10 +2,14 @@ package node
import (
"context"
"net/http"
"sync"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api"
"github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
@ -39,16 +43,11 @@ const (
// Node is the Hermez Node
type Node struct {
nodeAPI *NodeAPI
debugAPI *debugapi.DebugAPI
// Coordinator
coord *coordinator.Coordinator
coordCfg *config.Coordinator
stopForge chan bool
stopGetProofCallForge chan bool
stopForgeCallConfirm chan bool
stoppedForge chan bool
stoppedGetProofCallForge chan bool
stoppedForgeCallConfirm chan bool
coord *coordinator.Coordinator
coordCfg *config.Coordinator
// Synchronizer
sync *synchronizer.Synchronizer
@ -87,14 +86,18 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
if err != nil {
return nil, err
}
var ethCfg eth.EthereumConfig
if mode == ModeCoordinator {
ethCfg = eth.EthereumConfig{
CallGasLimit: coordCfg.EthClient.CallGasLimit,
DeployGasLimit: coordCfg.EthClient.DeployGasLimit,
GasPriceDiv: coordCfg.EthClient.GasPriceDiv,
ReceiptTimeout: coordCfg.EthClient.ReceiptTimeout.Duration,
IntervalReceiptLoop: coordCfg.EthClient.IntervalReceiptLoop.Duration,
}
}
client, err := eth.NewClient(ethClient, nil, nil, &eth.ClientConfig{
Ethereum: eth.EthereumConfig{
CallGasLimit: cfg.EthClient.CallGasLimit,
DeployGasLimit: cfg.EthClient.DeployGasLimit,
GasPriceDiv: cfg.EthClient.GasPriceDiv,
ReceiptTimeout: cfg.EthClient.ReceiptTimeout.Duration,
IntervalReceiptLoop: cfg.EthClient.IntervalReceiptLoop.Duration,
},
Ethereum: ethCfg,
Rollup: eth.RollupConfig{
Address: cfg.SmartContracts.Rollup,
},
@ -121,10 +124,23 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
if err != nil {
return nil, err
}
varsRollup, varsAuction, varsWDelayer := sync.SCVars()
initSCVars := synchronizer.SCVariables{
Rollup: *varsRollup,
Auction: *varsAuction,
WDelayer: *varsWDelayer,
}
scConsts := synchronizer.SCConsts{
Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(),
}
var coord *coordinator.Coordinator
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB := l2db.NewL2DB(
l2DB = l2db.NewL2DB(
db,
coordCfg.L2DB.SafetyPeriod,
coordCfg.L2DB.MaxTxs,
@ -148,23 +164,56 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
for i, serverProofCfg := range coordCfg.ServerProofs {
serverProofs[i] = coordinator.NewServerProof(serverProofCfg.URL)
}
coord = coordinator.NewCoordinator(
coordinator.Config{
ForgerAddress: coordCfg.ForgerAddress,
ConfirmBlocks: coordCfg.ConfirmBlocks,
},
historyDB,
txSelector,
batchBuilder,
serverProofs,
client,
&scConsts,
&initSCVars,
)
}
var nodeAPI *NodeAPI
if cfg.API.Address != "" {
server := gin.Default()
coord := false
if mode == ModeCoordinator {
coord = coordCfg.API.Coordinator
}
var err error
nodeAPI, err = NewNodeAPI(
cfg.API.Address,
coord, cfg.API.Explorer,
server,
historyDB,
stateDB,
l2DB,
&api.Config{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
},
)
if err != nil {
return nil, err
}
nodeAPI.api.SetRollupVariables(initSCVars.Rollup)
nodeAPI.api.SetAuctionVariables(initSCVars.Auction)
nodeAPI.api.SetWDelayerVariables(initSCVars.WDelayer)
}
var debugAPI *debugapi.DebugAPI
if cfg.Debug.APIAddress != "" {
debugAPI = debugapi.NewDebugAPI(cfg.Debug.APIAddress, stateDB, sync)
}
ctx, cancel := context.WithCancel(context.Background())
return &Node{
nodeAPI: nodeAPI,
debugAPI: debugAPI,
coord: coord,
coordCfg: coordCfg,
@ -177,85 +226,137 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
}, nil
}
// StartCoordinator starts the coordinator
func (n *Node) StartCoordinator() {
log.Info("Starting Coordinator...")
// TODO: Replace stopXXX by context
// TODO: Replace stoppedXXX by waitgroup
n.stopForge = make(chan bool)
n.stopGetProofCallForge = make(chan bool)
n.stopForgeCallConfirm = make(chan bool)
// NodeAPI holds the node http API
type NodeAPI struct { //nolint:golint
api *api.API
engine *gin.Engine
addr string
}
n.stoppedForge = make(chan bool, 1)
n.stoppedGetProofCallForge = make(chan bool, 1)
n.stoppedForgeCallConfirm = make(chan bool, 1)
func handleNoRoute(c *gin.Context) {
c.JSON(http.StatusNotFound, gin.H{
"error": "404 page not found",
})
}
queueSize := 1
batchCh0 := make(chan *coordinator.BatchInfo, queueSize)
batchCh1 := make(chan *coordinator.BatchInfo, queueSize)
// NewNodeAPI creates a new NodeAPI (which internally calls api.NewAPI)
func NewNodeAPI(
addr string,
coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine,
hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB,
config *api.Config,
) (*NodeAPI, error) {
engine := gin.Default()
engine.NoRoute(handleNoRoute)
engine.Use(cors.Default())
_api, err := api.NewAPI(
coordinatorEndpoints, explorerEndpoints,
engine,
hdb,
sdb,
l2db,
config,
)
if err != nil {
return nil, err
}
return &NodeAPI{
addr: addr,
api: _api,
engine: engine,
}, nil
}
// Run starts the http server of the NodeAPI. To stop it, pass a context with
// cancelation.
func (a *NodeAPI) Run(ctx context.Context) error {
server := &http.Server{
Addr: a.addr,
Handler: a.engine,
// TODO: Figure out best parameters for production
ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd
}
go func() {
defer func() { n.stoppedForge <- true }()
for {
select {
case <-n.stopForge:
return
default:
if forge, err := n.coord.ForgeLoopFn(batchCh0, n.stopForge); err == coordinator.ErrStop {
return
} else if err != nil {
log.Errorw("Coordinator.ForgeLoopFn", "error", err)
} else if !forge {
time.Sleep(n.coordCfg.ForgeLoopInterval.Duration)
}
}
log.Infof("NodeAPI is ready at %v", a.addr)
if err := server.ListenAndServe(); err != nil &&
err != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err)
}
}()
go func() {
defer func() { n.stoppedGetProofCallForge <- true }()
for {
select {
case <-n.stopGetProofCallForge:
return
default:
if err := n.coord.GetProofCallForgeLoopFn(
batchCh0, batchCh1, n.stopGetProofCallForge); err == coordinator.ErrStop {
return
} else if err != nil {
log.Errorw("Coordinator.GetProofCallForgeLoopFn", "error", err)
}
}
<-ctx.Done()
log.Info("Stopping NodeAPI...")
ctxTimeout, cancel := context.WithTimeout(context.Background(), 10*time.Second) //nolint:gomnd
defer cancel()
if err := server.Shutdown(ctxTimeout); err != nil {
return err
}
log.Info("NodeAPI done")
return nil
}
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
// don't have to pass it around.
func (n *Node) syncLoopFn(lastBlock *common.Block) (*common.Block, time.Duration) {
if blockData, discarded, err := n.sync.Sync2(n.ctx, lastBlock); err != nil {
// case: error
log.Errorw("Synchronizer.Sync", "error", err)
return nil, n.cfg.Synchronizer.SyncLoopInterval.Duration
} else if discarded != nil {
// case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
if n.mode == ModeCoordinator {
n.coord.SendMsg(coordinator.MsgSyncReorg{})
}
}()
go func() {
defer func() { n.stoppedForgeCallConfirm <- true }()
for {
select {
case <-n.stopForgeCallConfirm:
return
default:
if err := n.coord.ForgeCallConfirmLoopFn(
batchCh1, n.stopForgeCallConfirm); err == coordinator.ErrStop {
return
} else if err != nil {
log.Errorw("Coordinator.ForgeCallConfirmLoopFn", "error", err)
}
if n.nodeAPI != nil {
rollup, auction, wDelayer := n.sync.SCVars()
n.nodeAPI.api.SetRollupVariables(*rollup)
n.nodeAPI.api.SetAuctionVariables(*auction)
n.nodeAPI.api.SetWDelayerVariables(*wDelayer)
// TODO: n.nodeAPI.api.UpdateNetworkInfo()
}
return nil, time.Duration(0)
} else if blockData != nil {
// case: new block
stats := n.sync.Stats()
if n.mode == ModeCoordinator {
if stats.Synced() && (blockData.Rollup.Vars != nil ||
blockData.Auction.Vars != nil ||
blockData.WDelayer.Vars != nil) {
n.coord.SendMsg(coordinator.MsgSyncSCVars{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,
})
}
n.coord.SendMsg(coordinator.MsgSyncStats{
Stats: *stats,
})
}
}()
}
if n.nodeAPI != nil {
if blockData.Rollup.Vars != nil {
n.nodeAPI.api.SetRollupVariables(*blockData.Rollup.Vars)
}
if blockData.Auction.Vars != nil {
n.nodeAPI.api.SetAuctionVariables(*blockData.Auction.Vars)
}
if blockData.WDelayer.Vars != nil {
n.nodeAPI.api.SetWDelayerVariables(*blockData.WDelayer.Vars)
}
// StopCoordinator stops the coordinator
func (n *Node) StopCoordinator() {
log.Info("Stopping Coordinator...")
n.stopForge <- true
n.stopGetProofCallForge <- true
n.stopForgeCallConfirm <- true
<-n.stoppedForge
<-n.stoppedGetProofCallForge
<-n.stoppedForgeCallConfirm
// TODO: n.nodeAPI.api.UpdateNetworkInfo()
}
return &blockData.Block, time.Duration(0)
} else {
// case: no block
return lastBlock, n.cfg.Synchronizer.SyncLoopInterval.Duration
}
}
// StartSynchronizer starts the synchronizer
@ -263,32 +364,16 @@ func (n *Node) StartSynchronizer() {
log.Info("Starting Synchronizer...")
n.wg.Add(1)
go func() {
defer func() {
log.Info("Synchronizer routine stopped")
n.wg.Done()
}()
var lastBlock *common.Block
d := time.Duration(0)
waitDuration := time.Duration(0)
for {
select {
case <-n.ctx.Done():
log.Info("Synchronizer done")
n.wg.Done()
return
case <-time.After(d):
if blockData, discarded, err := n.sync.Sync2(n.ctx, lastBlock); err != nil {
log.Errorw("Synchronizer.Sync", "error", err)
lastBlock = nil
d = n.cfg.Synchronizer.SyncLoopInterval.Duration
} else if discarded != nil {
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
lastBlock = nil
d = time.Duration(0)
} else if blockData != nil {
lastBlock = &blockData.Block
d = time.Duration(0)
} else {
d = n.cfg.Synchronizer.SyncLoopInterval.Duration
}
case <-time.After(waitDuration):
lastBlock, waitDuration = n.syncLoopFn(lastBlock)
}
}
}()
@ -310,14 +395,33 @@ func (n *Node) StartDebugAPI() {
}()
}
// StartNodeAPI starts the NodeAPI
func (n *Node) StartNodeAPI() {
log.Info("Starting NodeAPI...")
n.wg.Add(1)
go func() {
defer func() {
log.Info("NodeAPI routine stopped")
n.wg.Done()
}()
if err := n.nodeAPI.Run(n.ctx); err != nil {
log.Fatalw("NodeAPI.Run", "err", err)
}
}()
}
// Start the node
func (n *Node) Start() {
log.Infow("Starting node...", "mode", n.mode)
if n.debugAPI != nil {
n.StartDebugAPI()
}
if n.nodeAPI != nil {
n.StartNodeAPI()
}
if n.mode == ModeCoordinator {
n.StartCoordinator()
log.Info("Starting Coordinator...")
n.coord.Start()
}
n.StartSynchronizer()
}
@ -327,7 +431,8 @@ func (n *Node) Stop() {
log.Infow("Stopping node...")
n.cancel()
if n.mode == ModeCoordinator {
n.StopCoordinator()
log.Info("Stopping Coordinator...")
n.coord.Stop()
}
n.wg.Wait()
}

+ 201
- 76
synchronizer/synchronizer.go

@ -54,14 +54,27 @@ type Stats struct {
Updated time.Time
LastBlock int64
LastBatch int64
Auction struct {
CurrentSlot common.Slot
}
}
}
// Synced returns true if the Synchronizer is up to date with the last ethereum block
func (s *Stats) Synced() bool {
return s.Eth.LastBlock == s.Sync.LastBlock
}
// TODO(Edu): Consider removing all the mutexes from StatsHolder, make
// Synchronizer.Stats not thread-safe, don't pass the synchronizer to the
// debugAPI, and have a copy of the Stats in the DebugAPI that the node passes
// when the Sync updates.
// StatsHolder stores stats and that allows reading and writing them
// concurrently
type StatsHolder struct {
stats Stats
rw sync.RWMutex
Stats
rw sync.RWMutex
}
// NewStatsHolder creates a new StatsHolder
@ -69,18 +82,25 @@ func NewStatsHolder(firstBlock int64, refreshPeriod time.Duration) *StatsHolder
stats := Stats{}
stats.Eth.RefreshPeriod = refreshPeriod
stats.Eth.FirstBlock = firstBlock
return &StatsHolder{stats: stats}
return &StatsHolder{Stats: stats}
}
// UpdateCurrentSlot updates the auction stats
func (s *StatsHolder) UpdateCurrentSlot(slot common.Slot) {
s.rw.Lock()
s.Sync.Auction.CurrentSlot = slot
s.rw.Unlock()
}
// UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock int64, lastBatch *common.BatchNum) {
now := time.Now()
s.rw.Lock()
s.stats.Sync.LastBlock = lastBlock
s.Sync.LastBlock = lastBlock
if lastBatch != nil {
s.stats.Sync.LastBatch = int64(*lastBatch)
s.Sync.LastBatch = int64(*lastBatch)
}
s.stats.Sync.Updated = now
s.Sync.Updated = now
s.rw.Unlock()
}
@ -88,13 +108,13 @@ func (s *StatsHolder) UpdateSync(lastBlock int64, lastBatch *common.BatchNum) {
func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
now := time.Now()
s.rw.RLock()
elapsed := now.Sub(s.stats.Eth.Updated)
elapsed := now.Sub(s.Eth.Updated)
s.rw.RUnlock()
if elapsed < s.stats.Eth.RefreshPeriod {
if elapsed < s.Eth.RefreshPeriod {
return nil
}
lastBlock, err := ethClient.EthCurrentBlock()
lastBlock, err := ethClient.EthLastBlock()
if err != nil {
return err
}
@ -103,9 +123,9 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
return err
}
s.rw.Lock()
s.stats.Eth.Updated = now
s.stats.Eth.LastBlock = lastBlock
s.stats.Eth.LastBatch = lastBatch
s.Eth.Updated = now
s.Eth.LastBlock = lastBlock
s.Eth.LastBatch = lastBatch
s.rw.Unlock()
return nil
}
@ -113,19 +133,27 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
// CopyStats returns a copy of the inner Stats
func (s *StatsHolder) CopyStats() *Stats {
s.rw.RLock()
sCopy := s.stats
sCopy := s.Stats
if s.Sync.Auction.CurrentSlot.BidValue != nil {
sCopy.Sync.Auction.CurrentSlot.BidValue =
common.CopyBigInt(s.Sync.Auction.CurrentSlot.BidValue)
}
s.rw.RUnlock()
return &sCopy
}
func (s *StatsHolder) blocksPerc() float64 {
return float64(s.stats.Sync.LastBlock-s.stats.Eth.FirstBlock) * 100.0 /
float64(s.stats.Eth.LastBlock-s.stats.Eth.FirstBlock)
syncLastBlock := s.Sync.LastBlock
if s.Sync.LastBlock == 0 {
syncLastBlock = s.Eth.FirstBlock - 1
}
return float64(syncLastBlock-(s.Eth.FirstBlock-1)) * 100.0 /
float64(s.Eth.LastBlock-(s.Eth.FirstBlock-1))
}
func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
return float64(batchNum) * 100.0 /
float64(s.stats.Eth.LastBatch)
float64(s.Eth.LastBatch)
}
// ConfigStartBlockNum sets the first block used to start tracking the smart
@ -143,6 +171,13 @@ type SCVariables struct {
WDelayer common.WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup common.RollupConstants
Auction common.AuctionConstants
WDelayer common.WDelayerConstants
}
// Config is the Synchronizer configuration
type Config struct {
StartBlockNum ConfigStartBlockNum
@ -152,16 +187,17 @@ type Config struct {
// Synchronizer implements the Synchronizer type
type Synchronizer struct {
ethClient eth.ClientInterface
auctionConstants common.AuctionConstants
rollupConstants common.RollupConstants
wDelayerConstants common.WDelayerConstants
historyDB *historydb.HistoryDB
stateDB *statedb.StateDB
cfg Config
startBlockNum int64
vars SCVariables
stats *StatsHolder
ethClient eth.ClientInterface
// auctionConstants common.AuctionConstants
// rollupConstants common.RollupConstants
// wDelayerConstants common.WDelayerConstants
consts SCConsts
historyDB *historydb.HistoryDB
stateDB *statedb.StateDB
cfg Config
startBlockNum int64
vars SCVariables
stats *StatsHolder
// firstSavedBlock *common.Block
// mux sync.Mutex
}
@ -196,56 +232,113 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
}
stats := NewStatsHolder(startBlockNum, cfg.StatsRefreshPeriod)
s := &Synchronizer{
ethClient: ethClient,
auctionConstants: *auctionConstants,
rollupConstants: *rollupConstants,
wDelayerConstants: *wDelayerConstants,
historyDB: historyDB,
stateDB: stateDB,
cfg: cfg,
startBlockNum: startBlockNum,
stats: stats,
ethClient: ethClient,
consts: SCConsts{
Rollup: *rollupConstants,
Auction: *auctionConstants,
WDelayer: *wDelayerConstants,
},
historyDB: historyDB,
stateDB: stateDB,
cfg: cfg,
startBlockNum: startBlockNum,
stats: stats,
}
return s, s.init()
}
// Stats returns a copy of the Synchronizer Stats
// Stats returns a copy of the Synchronizer Stats. It is safe to call Stats()
// during a Sync call
func (s *Synchronizer) Stats() *Stats {
return s.stats.CopyStats()
}
// AuctionConstants returns the AuctionConstants read from the smart contract
func (s *Synchronizer) AuctionConstants() *common.AuctionConstants {
return &s.auctionConstants
return &s.consts.Auction
}
// RollupConstants returns the RollupConstants read from the smart contract
func (s *Synchronizer) RollupConstants() *common.RollupConstants {
return &s.rollupConstants
return &s.consts.Rollup
}
// WDelayerConstants returns the WDelayerConstants read from the smart contract
func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
return &s.wDelayerConstants
return &s.consts.WDelayer
}
func (s *Synchronizer) init() error {
rollup, auction, wDelayer, err := s.historyDB.GetSCVars()
// If SCVars are not in the HistoryDB, this is probably the first run
// of the Synchronizer: store the initial vars taken from config
if err == sql.ErrNoRows {
rollup = &s.cfg.InitialVariables.Rollup
auction = &s.cfg.InitialVariables.Auction
wDelayer = &s.cfg.InitialVariables.WDelayer
log.Info("Setting initial SCVars in HistoryDB")
if err = s.historyDB.SetInitialSCVars(rollup, auction, wDelayer); err != nil {
// SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() (*common.RollupVariables, *common.AuctionVariables, *common.WDelayerVariables) {
return s.vars.Rollup.Copy(), s.vars.Auction.Copy(), s.vars.WDelayer.Copy()
}
func (s *Synchronizer) updateCurrentSlotIfSync(batchesLen int) error {
slot := common.Slot{
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
BatchesLen: int(s.stats.Sync.Auction.CurrentSlot.BatchesLen),
}
// We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock + 1
slotNum := s.consts.Auction.SlotNum(blockNum)
if batchesLen == -1 {
dbBatchesLen, err := s.historyDB.GetBatchesLen(slotNum)
// fmt.Printf("DBG -1 from: %v, to: %v, len: %v\n", from, to, dbBatchesLen)
if err != nil {
log.Errorw("historyDB.GetBatchesLen", "err", err)
return err
}
slot.BatchesLen = dbBatchesLen
} else if slotNum > slot.SlotNum {
// fmt.Printf("DBG batchesLen Reset len: %v (%v %v)\n", batchesLen, slotNum, slot.SlotNum)
slot.BatchesLen = batchesLen
} else {
// fmt.Printf("DBG batchesLen add len: %v: %v\n", batchesLen, slot.BatchesLen+batchesLen)
slot.BatchesLen += batchesLen
}
slot.SlotNum = slotNum
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator
if s.stats.Synced() {
bidCoord, err := s.historyDB.GetBestBidCoordinator(slot.SlotNum)
if err != nil && err != sql.ErrNoRows {
return err
}
if err == sql.ErrNoRows {
slot.BootCoord = true
slot.Forger = s.vars.Auction.BootCoordinator
slot.URL = "???"
} else if err == nil {
slot.BidValue = bidCoord.BidValue
defaultSlotBid := bidCoord.DefaultSlotSetBid[slot.SlotNum%6]
if slot.BidValue.Cmp(defaultSlotBid) >= 0 {
slot.Bidder = bidCoord.Bidder
slot.Forger = bidCoord.Forger
slot.URL = bidCoord.URL
} else {
slot.BootCoord = true
slot.Forger = s.vars.Auction.BootCoordinator
slot.URL = "???"
}
}
// TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil {
return err
}
if !canForge {
return fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)
}
// END SANITY CHECK
}
s.vars.Rollup = *rollup
s.vars.Auction = *auction
s.vars.WDelayer = *wDelayer
s.stats.UpdateCurrentSlot(slot)
return nil
}
func (s *Synchronizer) init() error {
// Update stats parameters so that they have valid values before the
// first Sync call
if err := s.stats.UpdateEth(s.ethClient); err != nil {
@ -265,26 +358,20 @@ func (s *Synchronizer) init() error {
} else {
lastBlockNum = lastSavedBlock.EthBlockNum
}
lastBatchNum, err := s.historyDB.GetLastBatchNum()
if err != nil && err != sql.ErrNoRows {
if err := s.resetState(lastBlockNum); err != nil {
return err
}
if err == sql.ErrNoRows {
lastBatchNum = 0
}
s.stats.UpdateSync(lastBlockNum, &lastBatchNum)
log.Infow("Sync init block",
"syncLastBlock", s.stats.stats.Sync.LastBlock,
"syncLastBlock", s.stats.Sync.LastBlock,
"syncBlocksPerc", s.stats.blocksPerc(),
"ethFirstBlock", s.stats.stats.Eth.FirstBlock,
"ethLastBlock", s.stats.stats.Eth.LastBlock,
"ethFirstBlock", s.stats.Eth.FirstBlock,
"ethLastBlock", s.stats.Eth.LastBlock,
)
log.Infow("Sync init batch",
"syncLastBatch", s.stats.stats.Sync.LastBatch,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.stats.Sync.LastBatch),
"ethLastBatch", s.stats.stats.Eth.LastBatch,
"syncLastBatch", s.stats.Sync.LastBatch,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
"ethLastBatch", s.stats.Eth.LastBatch,
)
return nil
}
@ -329,7 +416,7 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
log.Debugw("Syncing...",
"block", nextBlockNum,
"ethLastBlock", s.stats.stats.Eth.LastBlock,
"ethLastBlock", s.stats.Eth.LastBlock,
)
// Check that the obtianed ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
@ -404,16 +491,19 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
s.stats.UpdateSync(ethBlock.EthBlockNum,
&rollupData.Batches[batchesLen-1].Batch.BatchNum)
}
if err := s.updateCurrentSlotIfSync(len(rollupData.Batches)); err != nil {
return nil, nil, err
}
log.Debugw("Synced block",
"syncLastBlock", s.stats.stats.Sync.LastBlock,
"syncLastBlock", s.stats.Sync.LastBlock,
"syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlock", s.stats.stats.Eth.LastBlock,
"ethLastBlock", s.stats.Eth.LastBlock,
)
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
"ethLastBatch", s.stats.stats.Eth.LastBatch,
"ethLastBatch", s.stats.Eth.LastBatch,
)
}
@ -431,11 +521,13 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
for blockNum >= s.startBlockNum {
ethBlock, err := s.ethClient.EthBlockByNumber(context.Background(), blockNum)
if err != nil {
log.Errorw("ethClient.EthBlockByNumber", "err", err)
return 0, err
}
block, err := s.historyDB.GetBlock(blockNum)
if err != nil {
log.Errorw("historyDB.GetBlock", "err", err)
return 0, err
}
if block.Hash == ethBlock.Hash {
@ -453,19 +545,51 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
return 0, err
}
if err := s.resetState(blockNum); err != nil {
return 0, err
}
return blockNum, nil
}
func (s *Synchronizer) resetState(blockNum int64) error {
rollup, auction, wDelayer, err := s.historyDB.GetSCVars()
// If SCVars are not in the HistoryDB, this is probably the first run
// of the Synchronizer: store the initial vars taken from config
if err == sql.ErrNoRows {
rollup = &s.cfg.InitialVariables.Rollup
auction = &s.cfg.InitialVariables.Auction
wDelayer = &s.cfg.InitialVariables.WDelayer
log.Info("Setting initial SCVars in HistoryDB")
if err = s.historyDB.SetInitialSCVars(rollup, auction, wDelayer); err != nil {
log.Errorw("historyDB.SetInitialSCVars", "err", err)
return err
}
}
s.vars.Rollup = *rollup
s.vars.Auction = *auction
s.vars.WDelayer = *wDelayer
batchNum, err := s.historyDB.GetLastBatchNum()
if err != nil && err != sql.ErrNoRows {
return 0, err
log.Errorw("historyDB.GetLastBatchNum", "err", err)
return err
}
if err == sql.ErrNoRows {
batchNum = 0
}
err = s.stateDB.Reset(batchNum)
if err != nil {
return 0, err
log.Errorw("stateDB.Reset", "err", err)
return err
}
return blockNum, nil
s.stats.UpdateSync(blockNum, &batchNum)
if err := s.updateCurrentSlotIfSync(-1); err != nil {
return err
}
return nil
}
// TODO: Figure out who will use the Status output, and only return what's strictly need
@ -500,7 +624,7 @@ func (s *Synchronizer) Status() (*common.SyncStatus, error) {
status.CurrentBatch = lastSavedBatch
// Get latest blockNum in blockchain
latestBlockNum, err := s.ethClient.EthCurrentBlock()
latestBlockNum, err := s.ethClient.EthLastBlock()
if err != nil {
return nil, err
}
@ -664,9 +788,9 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
batchData.CreatedAccounts = processTxsOut.CreatedAccounts
slotNum := int64(0)
if ethBlock.EthBlockNum >= s.auctionConstants.GenesisBlockNum {
slotNum = (ethBlock.EthBlockNum - s.auctionConstants.GenesisBlockNum) /
int64(s.auctionConstants.BlocksPerSlot)
if ethBlock.EthBlockNum >= s.consts.Auction.GenesisBlockNum {
slotNum = (ethBlock.EthBlockNum - s.consts.Auction.GenesisBlockNum) /
int64(s.consts.Auction.BlocksPerSlot)
}
// Get Batch information
@ -831,6 +955,7 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
"auctionEvents.NewDefaultSlotSetBid: %v", evt.SlotSet)
}
s.vars.Auction.DefaultSlotSetBid[evt.SlotSet] = evt.NewInitialMinBid
s.vars.Auction.DefaultSlotSetBidSlotNum = s.consts.Auction.SlotNum(blockNum) + int64(s.vars.Auction.ClosedAuctionSlots) + 1
varsUpdate = true
}

+ 1
- 1
synchronizer/synchronizer_test.go

@ -535,7 +535,7 @@ func TestSync(t *testing.T) {
for i := 0; i < 4; i++ {
client.CtlRollback()
}
blockNum := client.CtlCurrentBlock()
blockNum := client.CtlLastBlock()
require.Equal(t, int64(1), blockNum)
// Generate extra required data

+ 3
- 1
test/debugapi/debugapi.go

@ -126,7 +126,9 @@ func (a *DebugAPI) Run(ctx context.Context) error {
<-ctx.Done()
log.Info("Stopping DebugAPI...")
if err := debugAPIServer.Shutdown(context.Background()); err != nil {
ctxTimeout, cancel := context.WithTimeout(context.Background(), 10*time.Second) //nolint:gomnd
defer cancel()
if err := debugAPIServer.Shutdown(ctxTimeout); err != nil {
return err
}
log.Info("DebugAPI done")

+ 5
- 4
test/ethclient.go

@ -166,6 +166,7 @@ func (a *AuctionBlock) canForge(forger ethCommon.Address, blockNum int64) (bool,
}
slotToForge := a.getSlotNumber(blockNum)
// fmt.Printf("DBG canForge slot: %v\n", slotToForge)
// Get the relativeBlock to check if the slotDeadline has been exceeded
relativeBlock := blockNum - (a.Constants.GenesisBlockNum + (slotToForge * int64(a.Constants.BlocksPerSlot)))
@ -551,15 +552,15 @@ func (c *Client) CtlRollback() {
// Ethereum
//
// CtlCurrentBlock returns the current blockNum without checks
func (c *Client) CtlCurrentBlock() int64 {
// CtlLastBlock returns the last blockNum without checks
func (c *Client) CtlLastBlock() int64 {
c.rw.RLock()
defer c.rw.RUnlock()
return c.blockNum
}
// EthCurrentBlock returns the current blockNum
func (c *Client) EthCurrentBlock() (int64, error) {
// EthLastBlock returns the last blockNum
func (c *Client) EthLastBlock() (int64, error) {
c.rw.RLock()
defer c.rw.RUnlock()

+ 6
- 6
test/ethclient_test.go

@ -40,7 +40,7 @@ func TestClientEth(t *testing.T) {
var timer timer
clientSetup := NewClientSetupExample()
c := NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
blockNum, err := c.EthCurrentBlock()
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
assert.Equal(t, int64(1), blockNum)
@ -126,7 +126,7 @@ func TestClientAuction(t *testing.T) {
c.CtlMineBlock()
blockNum, err := c.EthCurrentBlock()
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
auctionEvents, _, err := c.AuctionEventsByBlock(blockNum)
@ -168,7 +168,7 @@ func TestClientRollup(t *testing.T) {
}
c.CtlMineBlock()
blockNum, err := c.EthCurrentBlock()
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
rollupEvents, _, err := c.RollupEventsByBlock(blockNum)
require.Nil(t, err)
@ -189,7 +189,7 @@ func TestClientRollup(t *testing.T) {
})
c.CtlMineBlock()
blockNumA, err := c.EthCurrentBlock()
blockNumA, err := c.EthLastBlock()
require.Nil(t, err)
rollupEvents, hashA, err := c.RollupEventsByBlock(blockNumA)
require.Nil(t, err)
@ -202,7 +202,7 @@ func TestClientRollup(t *testing.T) {
c.CtlRollback()
c.CtlMineBlock()
blockNumB, err := c.EthCurrentBlock()
blockNumB, err := c.EthLastBlock()
require.Nil(t, err)
rollupEvents, hashB, err := c.RollupEventsByBlock(blockNumA)
require.Nil(t, err)
@ -229,7 +229,7 @@ func TestClientRollup(t *testing.T) {
// Retrieve ForgeBatchArguments starting from the events
blockNum, err = c.EthCurrentBlock()
blockNum, err = c.EthLastBlock()
require.Nil(t, err)
rollupEvents, _, err = c.RollupEventsByBlock(blockNum)
require.Nil(t, err)

Loading…
Cancel
Save