Compare commits

..

14 Commits

Author SHA1 Message Date
Eduard S
b330889570 WIP 2021-03-09 14:32:23 +01:00
Eduard S
a5ef822c64 WIP3 2021-03-08 18:17:15 +01:00
Eduard S
5501f30062 WIP2 2021-03-04 14:05:35 +01:00
Eduard S
d4f6926311 WIP 2021-03-03 14:37:41 +01:00
Eduard S
bfba1ba2d2 Clean 2021-03-03 12:50:21 +01:00
arnaubennassar
eed635539f pull 2021-03-02 18:49:34 +01:00
arnaubennassar
87610f6188 wip 2021-03-02 18:46:56 +01:00
arnaubennassar
4b596072d2 Add table to decouple API from node 2021-03-02 15:22:02 +01:00
Eduard S
95c4019cb2 WIP 2021-03-01 10:51:30 +01:00
Eduard S
c4d5e8a7ab WIP 2021-03-01 10:51:30 +01:00
Eduard S
c1375d9c5f Serve API only via cli 2021-03-01 10:51:30 +01:00
Eduard S
26e2bbc262 WIP 2021-02-26 16:17:06 +01:00
Eduard S
bb4c464200 WIP 2021-02-26 13:09:24 +01:00
Eduard S
982899efed Serve API only via cli 2021-02-26 13:09:24 +01:00
108 changed files with 2411 additions and 3374 deletions

View File

@@ -44,7 +44,7 @@ func (a *API) getAccounts(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type accountResponse struct {
Accounts []historydb.AccountAPI `json:"accounts"`
PendingItems uint64 `json:"pendingItems"`

View File

@@ -47,7 +47,7 @@ func (a *API) getAccountCreationAuth(c *gin.Context) {
retSQLErr(err, c)
return
}
// Build successful response
// Build succesfull response
c.JSON(http.StatusOK, auth)
}

View File

@@ -2,40 +2,19 @@ package api
import (
"errors"
"sync"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/tracerr"
)
// TODO: Add correct values to constants
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Status define status of the network
type Status struct {
sync.RWMutex
NodeConfig NodeConfig `json:"nodeConfig"`
Network Network `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// API serves HTTP requests to allow external interaction with the Hermez node
type API struct {
h *historydb.HistoryDB
cg *configAPI
l2 *l2db.L2DB
status Status
chainID uint16
hermezAddress ethCommon.Address
}
@@ -46,8 +25,6 @@ func NewAPI(
server *gin.Engine,
hdb *historydb.HistoryDB,
l2db *l2db.L2DB,
config *Config,
nodeConfig *NodeConfig,
) (*API, error) {
// Check input
// TODO: is stateDB only needed for explorer endpoints or for both?
@@ -57,20 +34,20 @@ func NewAPI(
if explorerEndpoints && hdb == nil {
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
}
consts, err := hdb.GetConstants()
if err != nil {
return nil, err
}
a := &API{
h: hdb,
cg: &configAPI{
RollupConstants: *newRollupConstants(config.RollupConstants),
AuctionConstants: config.AuctionConstants,
WDelayerConstants: config.WDelayerConstants,
RollupConstants: *newRollupConstants(consts.Rollup),
AuctionConstants: consts.Auction,
WDelayerConstants: consts.WDelayer,
},
l2: l2db,
status: Status{
NodeConfig: *nodeConfig,
},
chainID: config.ChainID,
hermezAddress: config.HermezAddress,
l2: l2db,
chainID: consts.ChainID,
hermezAddress: consts.HermezAddress,
}
// Add coordinator endpoints

View File

@@ -180,12 +180,13 @@ type testCommon struct {
auctionVars common.AuctionVariables
rollupVars common.RollupVariables
wdelayerVars common.WDelayerVariables
nextForgers []NextForger
nextForgers []historydb.NextForgerAPI
}
var tc testCommon
var config configAPI
var api *API
var stateAPIUpdater *StateAPIUpdater
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
// emulating the task of the synchronizer in order to have data to be returned
@@ -201,21 +202,11 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
apiConnCon := db.NewAPIConnectionController(1, time.Second)
apiConnCon := db.NewAPICnnectionController(1, time.Second)
hdb := historydb.NewHistoryDB(database, database, apiConnCon)
if err != nil {
panic(err)
}
// StateDB
dir, err := ioutil.TempDir("", "tmpdb")
if err != nil {
panic(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
}()
// L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
@@ -230,18 +221,38 @@ func TestMain(m *testing.M) {
// API
apiGin := gin.Default()
// Reset DB
test.WipeDB(hdb.DB())
constants := &historydb.Constants{
SCConsts: common.SCConsts{
Rollup: _config.RollupConstants,
Auction: _config.AuctionConstants,
WDelayer: _config.WDelayerConstants,
},
ChainID: chainID,
HermezAddress: _config.HermezAddress,
}
if err := hdb.SetConstants(constants); err != nil {
panic(err)
}
nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10,
MinFeeUSD: 0,
}
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err)
}
api, err = NewAPI(
true,
true,
apiGin,
hdb,
l2DB,
&_config,
&NodeConfig{
ForgeDelay: 180,
},
)
if err != nil {
log.Error(err)
panic(err)
}
// Start server
@@ -257,10 +268,7 @@ func TestMain(m *testing.M) {
}
}()
// Reset DB
test.WipeDB(api.h.DB())
// Generate blockchain data with til
// Genratre blockchain data with til
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
@@ -460,19 +468,19 @@ func TestMain(m *testing.M) {
if err = api.h.AddBids(bids); err != nil {
panic(err)
}
bootForger := NextForger{
bootForger := historydb.NextForgerAPI{
Coordinator: historydb.CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
},
}
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
nextForgers := []NextForger{}
nextForgers := []historydb.NextForgerAPI{}
var initBlock int64 = 140
var deltaBlocks int64 = 40
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
fromBlock := initBlock + deltaBlocks*int64(i-1)
bootForger.Period = Period{
bootForger.Period = historydb.Period{
SlotNum: int64(i),
FromBlock: fromBlock,
ToBlock: fromBlock + deltaBlocks - 1,
@@ -512,7 +520,13 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000),
}
// Generate test data, as expected to be received/sent from/to the API
stateAPIUpdater = NewStateAPIUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
// Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords)
testBids := genTestBids(commonBlocks, testCoords, bids)
testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
@@ -589,17 +603,14 @@ func TestMain(m *testing.M) {
if err := database.Close(); err != nil {
panic(err)
}
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
os.Exit(result)
}
func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
apiConnConTO := db.NewAPIConnectionController(1, 100*time.Millisecond)
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
require.NoError(t, err)
// L2DB
@@ -627,17 +638,12 @@ func TestTimeout(t *testing.T) {
require.NoError(t, err)
}
}()
_config := getConfigTest(0)
_, err = NewAPI(
true,
true,
apiGinTO,
hdbTO,
l2DBTO,
&_config,
&NodeConfig{
ForgeDelay: 180,
},
)
require.NoError(t, err)

View File

@@ -52,7 +52,7 @@ func (a *API) getBatches(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type batchesResponse struct {
Batches []historydb.BatchAPI `json:"batches"`
PendingItems uint64 `json:"pendingItems"`

View File

@@ -34,7 +34,7 @@ func (a *API) getBids(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type bidsResponse struct {
Bids []historydb.BidAPI `json:"bids"`
PendingItems uint64 `json:"pendingItems"`

View File

@@ -32,7 +32,7 @@ func (a *API) getCoordinators(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type coordinatorsResponse struct {
Coordinators []historydb.CoordinatorAPI `json:"coordinators"`
PendingItems uint64 `json:"pendingItems"`

View File

@@ -43,7 +43,7 @@ func (a *API) getExits(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type exitsResponse struct {
Exits []historydb.ExitAPI `json:"exits"`
PendingItems uint64 `json:"pendingItems"`
@@ -72,6 +72,6 @@ func (a *API) getExit(c *gin.Context) {
retSQLErr(err, c)
return
}
// Build successful response
// Build succesfull response
c.JSON(http.StatusOK, exit)
}

View File

@@ -14,7 +14,7 @@ import (
)
const (
// maxLimit is the max permitted items to be returned in paginated responses
// maxLimit is the max permited items to be returned in paginated responses
maxLimit uint = 2049
// dfltOrder indicates how paginated endpoints are ordered if not specified
@@ -40,8 +40,8 @@ const (
)
var (
// ErrNilBidderAddr is used when a nil bidderAddr is received in the getCoordinator method
ErrNilBidderAddr = errors.New("biderAddr can not be nil")
// ErrNillBidderAddr is used when a nil bidderAddr is received in the getCoordinator method
ErrNillBidderAddr = errors.New("biderAddr can not be nil")
)
func retSQLErr(err error, c *gin.Context) {

View File

@@ -50,19 +50,19 @@ func parsePagination(c querier) (fromItem *uint, order string, limit *uint, err
return fromItem, order, limit, nil
}
// nolint reason: res may be not overwritten
// nolint reason: res may be not overwriten
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009
str := c.Query(name)
return stringToUint(str, name, dflt, min, max)
}
// nolint reason: res may be not overwritten
// nolint reason: res may be not overwriten
func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009
str := c.Query(name)
return stringToInt64(str, name, dflt, min, max)
}
// nolint reason: res may be not overwritten
// nolint reason: res may be not overwriten
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009
str := c.Query(name)
if str == "" {
@@ -295,13 +295,13 @@ func parseParamIdx(c paramer) (*common.Idx, error) {
return stringToIdx(idxStr, name)
}
// nolint reason: res may be not overwritten
// nolint reason: res may be not overwriten
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009
str := c.Param(name)
return stringToUint(str, name, dflt, min, max)
}
// nolint reason: res may be not overwritten
// nolint reason: res may be not overwriten
func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009
str := c.Param(name)
return stringToInt64(str, name, dflt, min, max)

View File

@@ -11,7 +11,7 @@ import (
"github.com/hermeznetwork/tracerr"
)
// SlotAPI is a representation of a slot information
// SlotAPI is a repesentation of a slot information
type SlotAPI struct {
ItemID uint64 `json:"itemId"`
SlotNum int64 `json:"slotNum"`
@@ -316,7 +316,7 @@ func (a *API) getSlots(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type slotsResponse struct {
Slots []SlotAPI `json:"slots"`
PendingItems uint64 `json:"pendingItems"`

View File

@@ -99,7 +99,9 @@ func TestGetSlot(t *testing.T) {
nil, &fetchedSlot,
),
)
emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars)
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
assertSlot(t, emptySlot, fetchedSlot)
// Invalid slotNum
@@ -127,8 +129,10 @@ func TestGetSlots(t *testing.T) {
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
assert.NoError(t, err)
allSlots := tc.slots
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars)
emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars)
allSlots = append(allSlots, emptySlot)
}
assertSlots(t, allSlots, fetchedSlots)

View File

@@ -2,319 +2,160 @@ package api
import (
"database/sql"
"fmt"
"math"
"math/big"
"net/http"
"time"
"sync"
"github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr"
)
// Network define status of the network
type Network struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *historydb.BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
}
// NodeConfig is the configuration of the node that is exposed via API
type NodeConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// NextForger is a representation of the information of a coordinator and the period will forge
type NextForger struct {
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// Period is a representation of a period
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
func (a *API) getState(c *gin.Context) {
// TODO: There are no events for the buckets information, so now this information will be 0
a.status.RLock()
status := a.status //nolint
a.status.RUnlock()
c.JSON(http.StatusOK, status) //nolint
}
// SC Vars
// SetRollupVariables set Status.Rollup variables
func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
a.status.Lock()
var rollupVAPI historydb.RollupVariablesAPI
rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
for i, bucket := range rollupVariables.Buckets {
var apiBucket historydb.BucketParamsAPI
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
rollupVAPI.Buckets[i] = apiBucket
stateAPI, err := a.h.GetStateAPI()
if err != nil {
retBadReq(err, c)
return
}
rollupVAPI.SafeMode = rollupVariables.SafeMode
a.status.Rollup = rollupVAPI
a.status.Unlock()
c.JSON(http.StatusOK, stateAPI)
}
// SetWDelayerVariables set Status.WithdrawalDelayer variables
func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) {
a.status.Lock()
a.status.WithdrawalDelayer = wDelayerVariables
a.status.Unlock()
// StateAPIUpdater is an utility object to facilitate updating the StateAPI
type StateAPIUpdater struct {
hdb *historydb.HistoryDB
state historydb.StateAPI
config historydb.NodeConfig
vars common.SCVariablesPtr
consts historydb.Constants
rw sync.RWMutex
}
// SetAuctionVariables set Status.Auction variables
func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) {
a.status.Lock()
var auctionAPI historydb.AuctionVariablesAPI
auctionAPI.EthBlockNum = auctionVariables.EthBlockNum
auctionAPI.DonationAddress = auctionVariables.DonationAddress
auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
auctionAPI.Outbidding = auctionVariables.Outbidding
auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
// NewStateAPIUpdater creates a new StateAPIUpdater
func NewStateAPIUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants) *StateAPIUpdater {
u := StateAPIUpdater{
hdb: hdb,
config: *config,
consts: *consts,
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionAPI.AllocationRatio[i] = ratio
}
a.status.Auction = auctionAPI
a.status.Unlock()
u.SetSCVars(vars.AsPtr())
return &u
}
// Network
// Store the State in the HistoryDB
func (u *StateAPIUpdater) Store() error {
u.rw.RLock()
defer u.rw.RUnlock()
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state))
}
// SetSCVars sets the smart contract vars (ony updates those that are not nil)
func (u *StateAPIUpdater) SetSCVars(vars *common.SCVariablesPtr) {
u.rw.Lock()
defer u.rw.Unlock()
if vars.Rollup != nil {
u.vars.Rollup = vars.Rollup
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
u.state.Rollup = *rollupVars
}
if vars.Auction != nil {
u.vars.Auction = vars.Auction
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
u.state.Auction = *auctionVars
}
if vars.WDelayer != nil {
u.vars.WDelayer = vars.WDelayer
u.state.WithdrawalDelayer = *u.vars.WDelayer
}
}
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *StateAPIUpdater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
}
// UpdateMetrics update Status.Metrics information
func (u *StateAPIUpdater) UpdateMetrics() error {
u.rw.RLock()
lastBatch := u.state.Network.LastBatch
u.rw.RUnlock()
if lastBatch == nil {
return nil
}
lastBatchNum := lastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.Metrics = *metrics
u.rw.Unlock()
return nil
}
// UpdateNetworkInfoBlock update Status.Network block related information
func (a *API) UpdateNetworkInfoBlock(
lastEthBlock, lastSyncBlock common.Block,
) {
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
func (u *StateAPIUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
u.rw.Lock()
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.rw.Unlock()
}
// UpdateNetworkInfo update Status.Network information
func (a *API) UpdateNetworkInfo(
func (u *StateAPIUpdater) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64,
) error {
lastBatch, err := a.h.GetBatchAPI(lastBatchNum)
// Get last batch in API format
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil
} else if err != nil {
return tracerr.Wrap(err)
}
lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots)
nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot)
u.rw.RLock()
auctionVars := u.vars.Auction
u.rw.RUnlock()
// Get next forgers
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil
} else if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastBatch = lastBatch
a.status.Network.CurrentSlot = currentSlot
a.status.Network.NextForgers = nextForgers
// Update buckets withdrawals
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
if tracerr.Unwrap(err) == sql.ErrNoRows {
bucketsUpdate = nil
bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
if err == sql.ErrNoRows {
bucketUpdates = nil
} else if err != nil {
return tracerr.Wrap(err)
}
for i, bucketParams := range a.status.Rollup.Buckets {
for _, bucketUpdate := range bucketsUpdate {
u.rw.Lock()
// Update NodeInfo struct
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals
a.status.Rollup.Buckets[i] = bucketParams
u.state.Rollup.Buckets[i] = bucketParams
break
}
}
}
a.status.Unlock()
return nil
}
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
var slots [6]*big.Int
for i, slot := range defaultSlotSetBid {
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
if !ok {
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
}
slots[i] = bigInt
}
return slots, nil
}
// getNextForgers returns next forgers
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := a.h.GetBestBidsAPI(&currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForger{}
// Get min bid info
var minBidInfo []historydb.MinBidInfo
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
if err != nil {
return nil, tracerr.Wrap(err)
}
minBidInfo = []historydb.MinBidInfo{{
DefaultSlotSetBid: bigIntSlots,
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
nextForger := NextForger{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = historydb.CoordinatorAPI{
Forger: a.status.Auction.BootCoordinator,
URL: a.status.Auction.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// Metrics
// UpdateMetrics update Status.Metrics information
func (a *API) UpdateMetrics() error {
a.status.RLock()
if a.status.Network.LastBatch == nil {
a.status.RUnlock()
return nil
}
batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock()
metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Metrics = *metrics
a.status.Unlock()
return nil
}
// Recommended fee
// UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil {
return tracerr.Wrap(err)
}
var minFeeUSD float64
if a.l2 != nil {
minFeeUSD = a.l2.MinFeeUSD()
}
a.status.Lock()
a.status.RecommendedFee.ExistingAccount =
math.Max(feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.Unlock()
u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num
u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers
u.rw.Unlock()
return nil
}

View File

@@ -13,7 +13,7 @@ import (
type testStatus struct {
Network testNetwork `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Metrics historydb.MetricsAPI `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@@ -21,18 +21,19 @@ type testStatus struct {
}
type testNetwork struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
}
func TestSetRollupVariables(t *testing.T) {
rollupVars := &common.RollupVariables{}
assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true)
api.SetRollupVariables(tc.rollupVars)
assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true)
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars})
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
}
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
@@ -51,17 +52,19 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
}
func TestSetWDelayerVariables(t *testing.T) {
wdelayerVars := &common.WDelayerVariables{}
assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer)
api.SetWDelayerVariables(tc.wdelayerVars)
assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer)
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars})
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
}
func TestSetAuctionVariables(t *testing.T) {
auctionVars := &common.AuctionVariables{}
assertEqualAuctionVariables(t, *auctionVars, api.status.Auction)
api.SetAuctionVariables(tc.auctionVars)
assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction)
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars})
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
}
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
@@ -85,11 +88,6 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
}
func TestUpdateNetworkInfo(t *testing.T) {
status := &Network{}
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1)
@@ -118,14 +116,18 @@ func TestUpdateNetworkInfo(t *testing.T) {
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
require.NoError(t, err)
err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err)
assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock)
assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum)
assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot)
assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers))
assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
// stateAPIUpdater := NewStateAPIUpdater(hdb)
err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock)
assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum)
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
}
func TestUpdateMetrics(t *testing.T) {
@@ -133,51 +135,60 @@ func TestUpdateMetrics(t *testing.T) {
lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err)
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err)
err = api.UpdateMetrics()
assert.NoError(t, err)
assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0))
assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0))
assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0))
assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0))
err = stateAPIUpdater.UpdateMetrics()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
}
func TestUpdateRecommendedFee(t *testing.T) {
err := api.UpdateRecommendedFee()
assert.NoError(t, err)
err := stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
var minFeeUSD float64
if api.l2 != nil {
minFeeUSD = api.l2.MinFeeUSD()
}
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, minFeeUSD)
assert.Equal(t, api.status.RecommendedFee.CreatesAccount,
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister,
api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountAndRegister,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
}
func TestGetState(t *testing.T) {
lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1)
api.SetRollupVariables(tc.rollupVars)
api.SetWDelayerVariables(tc.wdelayerVars)
api.SetAuctionVariables(tc.auctionVars)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err)
err = api.UpdateMetrics()
assert.NoError(t, err)
err = api.UpdateRecommendedFee()
assert.NoError(t, err)
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{
Rollup: &tc.rollupVars,
Auction: &tc.auctionVars,
WDelayer: &tc.wdelayerVars,
})
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err)
err = stateAPIUpdater.UpdateMetrics()
require.NoError(t, err)
err = stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
endpoint := apiURL + "state"
var status testStatus
assert.NoError(t, doGoodReq("GET", endpoint, nil, &status))
require.NoError(t, doGoodReq("GET", endpoint, nil, &status))
// SC vars
// UpdateNetworkInfo will overwrite buckets withdrawal values
@@ -204,13 +215,13 @@ func TestGetState(t *testing.T) {
// Recommended fee
// TODO: perform real asserts (not just greater than 0)
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
assert.Equal(t, status.RecommendedFee.CreatesAccount,
status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
// assert.Equal(t, status.RecommendedFee.CreatesAccount,
// status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
// assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
// status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
}
func assertNextForgers(t *testing.T, expected, actual []NextForger) {
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
assert.Equal(t, len(expected), len(actual))
for i := range expected {
// ignore timestamps and other metadata

View File

@@ -53,7 +53,7 @@ func (a *API) getTokens(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type tokensResponse struct {
Tokens []historydb.TokenWithUSD `json:"tokens"`
PendingItems uint64 `json:"pendingItems"`

View File

@@ -42,7 +42,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
return
}
// Build successful response
// Build succesfull response
type txsResponse struct {
Txs []historydb.TxAPI `json:"transactions"`
PendingItems uint64 `json:"pendingItems"`
@@ -66,6 +66,6 @@ func (a *API) getHistoryTx(c *gin.Context) {
retSQLErr(err, c)
return
}
// Build successful response
// Build succesfull response
c.JSON(http.StatusOK, tx)
}

View File

@@ -455,7 +455,7 @@ func TestGetHistoryTx(t *testing.T) {
// 400, due invalid TxID
err := doBadReq("GET", endpoint+"0x001", nil, 400)
assert.NoError(t, err)
// 404, due nonexistent TxID in DB
// 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x00eb5e95e1ce5e9f6c4ed402d415e8d0bdd7664769cfd2064d28da04a2c76be432", nil, 404)
assert.NoError(t, err)
}

View File

@@ -51,7 +51,7 @@ func (a *API) getPoolTx(c *gin.Context) {
retSQLErr(err, c)
return
}
// Build successful response
// Build succesfull response
c.JSON(http.StatusOK, tx)
}

View File

@@ -2,15 +2,10 @@ package api
import (
"bytes"
"crypto/ecdsa"
"encoding/binary"
"encoding/hex"
"encoding/json"
"math/big"
"testing"
"time"
ethCrypto "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub"
@@ -240,7 +235,7 @@ func TestPoolTxs(t *testing.T) {
// 400, due invalid TxID
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
require.NoError(t, err)
// 404, due nonexistent TxID in DB
// 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
require.NoError(t, err)
}
@@ -262,73 +257,3 @@ func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {
}
assert.Equal(t, expected, actual)
}
// TestAllTosNull test that the API doesn't accept txs with all the TOs set to null (to eth, to bjj, to idx)
func TestAllTosNull(t *testing.T) {
// Generate account:
// Ethereum private key
var key ecdsa.PrivateKey
key.D = big.NewInt(int64(4444)) // only for testing
key.PublicKey.X, key.PublicKey.Y = ethCrypto.S256().ScalarBaseMult(key.D.Bytes())
key.Curve = ethCrypto.S256()
addr := ethCrypto.PubkeyToAddress(key.PublicKey)
// BJJ private key
var sk babyjub.PrivateKey
var iBytes [8]byte
binary.LittleEndian.PutUint64(iBytes[:], 4444)
copy(sk[:], iBytes[:]) // only for testing
account := common.Account{
Idx: 4444,
TokenID: 0,
BatchNum: 1,
BJJ: sk.Public().Compress(),
EthAddr: addr,
Nonce: 0,
Balance: big.NewInt(1000000),
}
// Add account to history DB (required to verify signature)
err := api.h.AddAccounts([]common.Account{account})
assert.NoError(t, err)
// Genrate tx with all tos set to nil (to eth, to bjj, to idx)
tx := common.PoolL2Tx{
FromIdx: account.Idx,
TokenID: account.TokenID,
Amount: big.NewInt(1000),
Fee: 200,
Nonce: 0,
}
// Set idx and type manually, and check that the function doesn't allow it
_, err = common.NewPoolL2Tx(&tx)
assert.Error(t, err)
tx.Type = common.TxTypeTransfer
var txID common.TxID
txIDRaw, err := hex.DecodeString("02e66e24f7f25272906647c8fd1d7fe8acf3cf3e9b38ffc9f94bbb5090dc275073")
assert.NoError(t, err)
copy(txID[:], txIDRaw)
tx.TxID = txID
// Sign tx
toSign, err := tx.HashToSign(0)
assert.NoError(t, err)
sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress()
// Transform common.PoolL2Tx ==> testPoolTxSend
txToSend := testPoolTxSend{
TxID: tx.TxID,
Type: tx.Type,
TokenID: tx.TokenID,
FromIdx: idxToHez(tx.FromIdx, "ETH"),
Amount: tx.Amount.String(),
Fee: tx.Fee,
Nonce: tx.Nonce,
Signature: tx.Signature,
}
// Send tx to the API
jsonTxBytes, err := json.Marshal(txToSend)
require.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", apiURL+"transactions-pool", jsonTxReader, 400)
require.NoError(t, err)
// Clean historyDB: the added account shouldn't be there for other tests
_, err = api.h.DB().DB.Exec("delete from account where idx = 4444")
assert.NoError(t, err)
}

View File

@@ -91,7 +91,7 @@ func (c *CollectedFees) UnmarshalJSON(text []byte) error {
return nil
}
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez format (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez fotmat (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
// It assumes that Ethereum Address are inserted/fetched to/from the DB using the default Scan/Value interface
type HezEthAddr string
@@ -143,7 +143,7 @@ func (s *StrHezEthAddr) UnmarshalText(text []byte) error {
return nil
}
// HezBJJ is used to scan/value *babyjub.PublicKeyComp directly into strings that follow the BJJ public key hez format (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs.
// HezBJJ is used to scan/value *babyjub.PublicKeyComp directly into strings that follow the BJJ public key hez fotmat (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs.
// It assumes that *babyjub.PublicKeyComp are inserted/fetched to/from the DB using the default Scan/Value interface
type HezBJJ string
@@ -216,7 +216,7 @@ func (b HezBJJ) Value() (driver.Value, error) {
// StrHezBJJ is used to unmarshal HezBJJ directly into an alias of babyjub.PublicKeyComp
type StrHezBJJ babyjub.PublicKeyComp
// UnmarshalText unmarshalls a StrHezBJJ
// UnmarshalText unmarshals a StrHezBJJ
func (s *StrHezBJJ) UnmarshalText(text []byte) error {
bjj, err := hezStrToBJJ(string(text))
if err != nil {
@@ -226,8 +226,8 @@ func (s *StrHezBJJ) UnmarshalText(text []byte) error {
return nil
}
// HezIdx is used to value common.Idx directly into strings that follow the Idx key hez format (hez:tokenSymbol:idx) to sql DBs.
// Note that this can only be used to insert to DB since there is no way to automatically read from the DB since it needs the tokenSymbol
// HezIdx is used to value common.Idx directly into strings that follow the Idx key hez fotmat (hez:tokenSymbol:idx) to sql DBs.
// Note that this can only be used to insert to DB since there is no way to automaticaly read from the DB since it needs the tokenSymbol
type HezIdx string
// StrHezIdx is used to unmarshal HezIdx directly into an alias of common.Idx

View File

@@ -28,8 +28,7 @@ type ConfigBatch struct {
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
// method
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum,
nLevels uint64) (*BatchBuilder, error) {
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
localStateDB, err := statedb.NewLocalStateDB(
statedb.Config{
Path: dbpath,

View File

@@ -15,8 +15,7 @@ func TestBatchBuilder(t *testing.T) {
require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
synchDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 0})
synchDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeBatchBuilder, NLevels: 0})
assert.Nil(t, err)
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")

View File

@@ -10,8 +10,6 @@ SQLConnectionTimeout = "2s"
Interval = "10s"
URL = "https://api-pub.bitfinex.com/v2/"
Type = "bitfinexV2"
# URL = "https://api.coingecko.com/api/v3/"
# Type = "coingeckoV3"
[Debug]
APIAddress = "localhost:12345"
@@ -53,7 +51,7 @@ ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordina
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
MinimumForgeAddressBalance = "0"
MinimumForgeAddressBalance = 0
ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6
StartSlotBlocksDelay = 2
@@ -109,10 +107,10 @@ Path = "/tmp/iden3-test/hermez/ethkeystore"
Password = "yourpasswordhere"
[Coordinator.EthClient.ForgeBatchGasCost]
Fixed = 600000
L1UserTx = 15000
L1CoordTx = 8000
L2Tx = 250
Fixed = 500000
L1UserTx = 8000
L1CoordTx = 9000
L2Tx = 1
[Coordinator.API]
Coordinator = true

View File

@@ -5,16 +5,13 @@ import (
"fmt"
"os"
"os/signal"
"path"
"strings"
ethKeystore "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/kvdb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node"
@@ -75,86 +72,6 @@ func cmdImportKey(c *cli.Context) error {
return nil
}
func resetStateDBs(cfg *Config, batchNum common.BatchNum) error {
log.Infof("Reset Synchronizer StateDB to batchNum %v...", batchNum)
// Manually make a checkpoint from batchNum to current to force current
// to be a valid checkpoint. This is useful because in case of a
// crash, current can be corrupted and the first thing that
// `kvdb.NewKVDB` does is read the current checkpoint, which wouldn't
// succeed in case of corruption.
dbPath := cfg.node.StateDB.Path
source := path.Join(dbPath, fmt.Sprintf("%s%d", kvdb.PathBatchNum, batchNum))
current := path.Join(dbPath, kvdb.PathCurrent)
last := path.Join(dbPath, kvdb.PathLast)
if err := os.RemoveAll(last); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
if batchNum == 0 {
if err := os.RemoveAll(current); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
} else {
if err := kvdb.PebbleMakeCheckpoint(source, current); err != nil {
return tracerr.Wrap(fmt.Errorf("kvdb.PebbleMakeCheckpoint: %w", err))
}
}
db, err := kvdb.NewKVDB(kvdb.Config{
Path: dbPath,
NoGapsCheck: true,
NoLast: true,
})
if err != nil {
return tracerr.Wrap(fmt.Errorf("kvdb.NewKVDB: %w", err))
}
if err := db.Reset(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("db.Reset: %w", err))
}
if cfg.mode == node.ModeCoordinator {
log.Infof("Wipe Coordinator StateDBs...")
// We wipe the Coordinator StateDBs entirely (by deleting
// current and resetting to batchNum 0) because the Coordinator
// StateDBs are always reset from Synchronizer when the
// coordinator pipeline starts.
dbPath := cfg.node.Coordinator.TxSelector.Path
current := path.Join(dbPath, kvdb.PathCurrent)
if err := os.RemoveAll(current); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
db, err := kvdb.NewKVDB(kvdb.Config{
Path: dbPath,
NoGapsCheck: true,
NoLast: true,
})
if err != nil {
return tracerr.Wrap(fmt.Errorf("kvdb.NewKVDB: %w", err))
}
if err := db.Reset(0); err != nil {
return tracerr.Wrap(fmt.Errorf("db.Reset: %w", err))
}
dbPath = cfg.node.Coordinator.BatchBuilder.Path
current = path.Join(dbPath, kvdb.PathCurrent)
if err := os.RemoveAll(current); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
db, err = kvdb.NewKVDB(kvdb.Config{
Path: dbPath,
NoGapsCheck: true,
NoLast: true,
})
if err != nil {
return tracerr.Wrap(fmt.Errorf("statedb.NewKVDB: %w", err))
}
if err := db.Reset(0); err != nil {
return tracerr.Wrap(fmt.Errorf("db.Reset: %w", err))
}
}
return nil
}
func cmdWipeSQL(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
@@ -163,8 +80,7 @@ func cmdWipeSQL(c *cli.Context) error {
cfg := _cfg.node
yes := c.Bool(flagYes)
if !yes {
fmt.Print("*WARNING* Are you sure you want to delete " +
"the SQL DB and StateDBs? [y/N]: ")
fmt.Print("*WARNING* Are you sure you want to delete the SQL DB? [y/N]: ")
var input string
if _, err := fmt.Scanln(&input); err != nil {
return tracerr.Wrap(err)
@@ -186,27 +102,12 @@ func cmdWipeSQL(c *cli.Context) error {
}
log.Info("Wiping SQL DB...")
if err := dbUtils.MigrationsDown(db.DB); err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.MigrationsDown: %w", err))
}
log.Info("Wiping StateDBs...")
if err := resetStateDBs(_cfg, 0); err != nil {
return tracerr.Wrap(fmt.Errorf("resetStateDBs: %w", err))
return tracerr.Wrap(err)
}
return nil
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
func waitSigInt() {
stopCh := make(chan interface{})
// catch ^C to send the stop signal
@@ -227,11 +128,40 @@ func cmdRun(c *cli.Context) error {
}
}()
<-stopCh
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
waitSigInt()
node.Stop()
return nil
}
func cmdServeAPI(c *cli.Context) error {
cfg, err := parseCliAPIServer(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
}
srv.Start()
waitSigInt()
srv.Stop()
return nil
}
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
@@ -290,11 +220,6 @@ func cmdDiscard(c *cli.Context) error {
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
}
log.Info("Resetting StateDBs...")
if err := resetStateDBs(_cfg, batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("resetStateDBs: %w", err))
}
return nil
}
@@ -319,20 +244,59 @@ func getConfig(c *cli.Context) (*Config, error) {
var cfg Config
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
if nodeCfgPath == "" {
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
}
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.node, err = config.LoadNode(nodeCfgPath)
cfg.node, err = config.LoadNode(nodeCfgPath, false)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.node, err = config.LoadCoordinator(nodeCfgPath)
cfg.node, err = config.LoadNode(nodeCfgPath, true)
if err != nil {
return nil, tracerr.Wrap(err)
}
default:
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
}
return &cfg, nil
}
// ConfigAPIServer is the configuration of the api server execution
type ConfigAPIServer struct {
mode node.Mode
server *config.APIServer
}
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
cfg, err := getConfigAPIServer(c)
if err != nil {
if err := cli.ShowAppHelp(c); err != nil {
panic(err)
}
return nil, tracerr.Wrap(err)
}
return cfg, nil
}
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
var cfg ConfigAPIServer
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -382,7 +346,7 @@ func main() {
{
Name: "wipesql",
Aliases: []string{},
Usage: "Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, " +
Usage: "Wipe the SQL DB (HistoryDB and L2DB), " +
"leaving the DB in a clean state",
Action: cmdWipeSQL,
Flags: []cli.Flag{
@@ -398,6 +362,12 @@ func main() {
Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun,
},
{
Name: "serveapi",
Aliases: []string{},
Usage: "Serve the API only",
Action: cmdServeAPI,
},
{
Name: "discard",
Aliases: []string{},

View File

@@ -72,8 +72,7 @@ func (idx Idx) BigInt() *big.Int {
// IdxFromBytes returns Idx from a byte array
func IdxFromBytes(b []byte) (Idx, error) {
if len(b) != IdxBytesLen {
return 0, tracerr.Wrap(fmt.Errorf("can not parse Idx, bytes len %d, expected %d",
len(b), IdxBytesLen))
return 0, tracerr.Wrap(fmt.Errorf("can not parse Idx, bytes len %d, expected %d", len(b), IdxBytesLen))
}
var idxBytes [8]byte
copy(idxBytes[2:], b[:])
@@ -195,8 +194,7 @@ func (a *Account) BigInts() ([NLeafElems]*big.Int, error) {
return e, nil
}
// HashValue returns the value of the Account, which is the Poseidon hash of its
// *big.Int representation
// HashValue returns the value of the Account, which is the Poseidon hash of its *big.Int representation
func (a *Account) HashValue() (*big.Int, error) {
bi, err := a.BigInts()
if err != nil {

View File

@@ -76,8 +76,7 @@ func TestNonceParser(t *testing.T) {
func TestAccount(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
pk := sk.Public()
@@ -116,8 +115,7 @@ func TestAccountLoop(t *testing.T) {
// check that for different deterministic BabyJubJub keys & random Address there is no problem
for i := 0; i < 256; i++ {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
pk := sk.Public()
@@ -201,8 +199,7 @@ func bigFromStr(h string, u int) *big.Int {
func TestAccountHashValue(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
pk := sk.Public()
@@ -215,16 +212,13 @@ func TestAccountHashValue(t *testing.T) {
}
v, err := account.HashValue()
assert.NoError(t, err)
assert.Equal(t,
"447675324273474410516096114710387312413478475468606444107594732044698919451",
v.String())
assert.Equal(t, "16297758255249203915951182296472515138555043617458222397753168518282206850764", v.String())
}
func TestAccountHashValueTestVectors(t *testing.T) {
// values from js test vectors
ay := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
(hex.EncodeToString(ay.Bytes())))
assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", (hex.EncodeToString(ay.Bytes())))
bjjPoint, err := babyjub.PointFromSignAndY(true, ay)
require.NoError(t, err)
bjj := babyjub.PublicKey(*bjjPoint)
@@ -242,22 +236,16 @@ func TestAccountHashValueTestVectors(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "9444732965739290427391", e[0].String())
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895", e[1].String())
assert.Equal(t,
"14474011154664524427946373126085988481658748083205070504932198000989141204991",
e[2].String())
assert.Equal(t, "14474011154664524427946373126085988481658748083205070504932198000989141204991", e[2].String())
assert.Equal(t, "1461501637330902918203684832716283019655932542975", e[3].String())
h, err := poseidon.Hash(e[:])
assert.NoError(t, err)
assert.Equal(t,
"13265203488631320682117942952393454767418777767637549409684833552016769103047",
h.String())
assert.Equal(t, "4550823210217540218403400309533329186487982452461145263910122718498735057257", h.String())
v, err := account.HashValue()
assert.NoError(t, err)
assert.Equal(t,
"13265203488631320682117942952393454767418777767637549409684833552016769103047",
v.String())
assert.Equal(t, "4550823210217540218403400309533329186487982452461145263910122718498735057257", v.String())
// second account
ay = big.NewInt(0)
@@ -273,9 +261,7 @@ func TestAccountHashValueTestVectors(t *testing.T) {
}
v, err = account.HashValue()
assert.NoError(t, err)
assert.Equal(t,
"2351654555892372227640888372176282444150254868378439619268573230312091195718",
v.String())
assert.Equal(t, "7750253361301235345986002241352365187241910378619330147114280396816709365657", v.String())
// third account
ay = bigFromStr("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", 16)
@@ -293,15 +279,11 @@ func TestAccountHashValueTestVectors(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "554050781187", e[0].String())
assert.Equal(t, "42000000000000000000", e[1].String())
assert.Equal(t,
"15238403086306505038849621710779816852318505119327426213168494964113886299863",
e[2].String())
assert.Equal(t, "15238403086306505038849621710779816852318505119327426213168494964113886299863", e[2].String())
assert.Equal(t, "935037732739828347587684875151694054123613453305", e[3].String())
v, err = account.HashValue()
assert.NoError(t, err)
assert.Equal(t,
"15036148928138382129196903417666258171042923749783835283230591475172197254845",
v.String())
assert.Equal(t, "10565754214047872850889045989683221123564392137456000481397520902594455245517", v.String())
}
func TestAccountErrNotInFF(t *testing.T) {
@@ -330,8 +312,7 @@ func TestAccountErrNotInFF(t *testing.T) {
func TestAccountErrNumOverflowNonce(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
pk := sk.Public()
@@ -358,8 +339,7 @@ func TestAccountErrNumOverflowNonce(t *testing.T) {
func TestAccountErrNumOverflowBalance(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
pk := sk.Public()
@@ -371,16 +351,14 @@ func TestAccountErrNumOverflowBalance(t *testing.T) {
BJJ: pk.Compress(),
EthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
}
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895",
account.Balance.String())
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895", account.Balance.String())
_, err = account.Bytes()
assert.NoError(t, err)
// force value overflow
account.Balance = new(big.Int).Exp(big.NewInt(2), big.NewInt(192), nil)
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512896",
account.Balance.String())
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512896", account.Balance.String())
b, err := account.Bytes()
assert.NotNil(t, err)
assert.Equal(t, fmt.Errorf("%s Balance", ErrNumOverflow), tracerr.Unwrap(err))

View File

@@ -84,7 +84,7 @@ func (a *AccountCreationAuth) toHash(chainID uint16,
return rawData, nil
}
// HashToSign returns the hash to be signed by the Ethereum address to authorize
// HashToSign returns the hash to be signed by the Etherum address to authorize
// the account creation, which follows the EIP-712 encoding
func (a *AccountCreationAuth) HashToSign(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) {
@@ -96,9 +96,9 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
}
// Sign signs the account creation authorization message using the provided
// `signHash` function, and stores the signature in `a.Signature`. `signHash`
// `signHash` function, and stores the signaure in `a.Signature`. `signHash`
// should do an ethereum signature using the account corresponding to
// `a.EthAddr`. The `signHash` function is used to make signing flexible: in
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in
// tests we sign directly using the private key, outside tests we sign using
// the keystore (which never exposes the private key). Sign follows the EIP-712
// encoding.

View File

@@ -13,8 +13,7 @@ import (
func TestAccountCreationAuthSignVerify(t *testing.T) {
// Ethereum key
ethSk, err :=
ethCrypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
ethSk, err := ethCrypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err)
ethAddr := ethCrypto.PubkeyToAddress(ethSk.PublicKey)
@@ -70,7 +69,6 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
sigExpected string
}
var tvs []testVector
//nolint:lll
tv0 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000001",
expectedAddress: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf",
@@ -81,7 +79,6 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
hashExpected: "c56eba41e511df100c804c5c09288f35887efea4f033be956481af335df3bea2",
sigExpected: "dbedcc5ce02db8f48afbdb2feba9a3a31848eaa8fca5f312ce37b01db45d2199208335330d4445bd2f51d1db68dbc0d0bf3585c4a07504b4efbe46a69eaae5a21b",
}
//nolint:lll
tv1 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
expectedAddress: "0x2B5AD5c4795c026514f8317c7a215E218DcCD6cF",
@@ -92,7 +89,6 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
hashExpected: "deb9afa479282cf27b442ce8ba86b19448aa87eacef691521a33db5d0feb9959",
sigExpected: "6a0da90ba2d2b1be679a28ebe54ee03082d44b836087391cd7d2607c1e4dafe04476e6e88dccb8707c68312512f16c947524b35c80f26c642d23953e9bb84c701c",
}
//nolint:lll
tv2 := testVector{
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
expectedAddress: "0xc783df8a850f42e7F7e57013759C285caa701eB6",

View File

@@ -13,9 +13,8 @@ const batchNumBytesLen = 8
// Batch is a struct that represents Hermez network batch
type Batch struct {
BatchNum BatchNum `meddler:"batch_num"`
// Ethereum block in which the batch is forged
EthBlockNum int64 `meddler:"eth_block_num"`
BatchNum BatchNum `meddler:"batch_num"`
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
ForgerAddr ethCommon.Address `meddler:"forger_addr"`
CollectedFees map[TokenID]*big.Int `meddler:"fees_collected,json"`
FeeIdxsCoordinator []Idx `meddler:"fee_idxs_coordinator,json"`
@@ -23,11 +22,9 @@ type Batch struct {
NumAccounts int `meddler:"num_accounts"`
LastIdx int64 `meddler:"last_idx"`
ExitRoot *big.Int `meddler:"exit_root,bigint"`
// ForgeL1TxsNum is optional, Only when the batch forges L1 txs. Identifier that corresponds
// to the group of L1 txs forged in the current batch.
ForgeL1TxsNum *int64 `meddler:"forge_l1_txs_num"`
SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
ForgeL1TxsNum *int64 `meddler:"forge_l1_txs_num"` // optional, Only when the batch forges L1 txs. Identifier that corresponds to the group of L1 txs forged in the current batch.
SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
}
// NewEmptyBatch creates a new empty batch
@@ -66,9 +63,7 @@ func (bn BatchNum) BigInt() *big.Int {
// BatchNumFromBytes returns BatchNum from a []byte
func BatchNumFromBytes(b []byte) (BatchNum, error) {
if len(b) != batchNumBytesLen {
return 0,
tracerr.Wrap(fmt.Errorf("can not parse BatchNumFromBytes, bytes len %d, expected %d",
len(b), batchNumBytesLen))
return 0, tracerr.Wrap(fmt.Errorf("can not parse BatchNumFromBytes, bytes len %d, expected %d", len(b), batchNumBytesLen))
}
batchNum := binary.BigEndian.Uint64(b[:batchNumBytesLen])
return BatchNum(batchNum), nil

View File

@@ -34,7 +34,7 @@ type Slot struct {
// BatchesLen int
BidValue *big.Int
BootCoord bool
// Bidder, Forger and URL correspond to the winner of the slot (which is
// Bidder, Forer and URL correspond to the winner of the slot (which is
// not always the highest bidder). These are the values of the
// coordinator that is able to forge exclusively before the deadline.
Bidder ethCommon.Address

View File

@@ -5,15 +5,10 @@ import (
)
// Coordinator represents a Hermez network coordinator who wins an auction for an specific slot
// WARNING: this is strongly based on the previous implementation, once the new spec is done, this
// may change a lot.
// WARNING: this is strongly based on the previous implementation, once the new spec is done, this may change a lot.
type Coordinator struct {
// Bidder is the address of the bidder
Bidder ethCommon.Address `meddler:"bidder_addr"`
// Forger is the address of the forger
Forger ethCommon.Address `meddler:"forger_addr"`
// EthBlockNum is the block in which the coordinator was registered
EthBlockNum int64 `meddler:"eth_block_num"`
// URL of the coordinators API
URL string `meddler:"url"`
Bidder ethCommon.Address `meddler:"bidder_addr"` // address of the bidder
Forger ethCommon.Address `meddler:"forger_addr"` // address of the forger
EthBlockNum int64 `meddler:"eth_block_num"` // block in which the coordinator was registered
URL string `meddler:"url"` // URL of the coordinators API
}

33
common/eth.go Normal file
View File

@@ -0,0 +1,33 @@
package common
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup RollupVariables `validate:"required"`
Auction AuctionVariables `validate:"required"`
WDelayer WDelayerVariables `validate:"required"`
}
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
// original SCVariables
func (v *SCVariables) AsPtr() *SCVariablesPtr {
return &SCVariablesPtr{
Rollup: &v.Rollup,
Auction: &v.Auction,
WDelayer: &v.WDelayer,
}
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *RollupVariables `validate:"required"`
Auction *AuctionVariables `validate:"required"`
WDelayer *WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup RollupConstants
Auction AuctionConstants
WDelayer WDelayerConstants
}

View File

@@ -68,13 +68,11 @@ type AuctionVariables struct {
ClosedAuctionSlots uint16 `meddler:"closed_auction_slots" validate:"required"`
// Distance (#slots) to the farthest slot to which you can bid (30 days = 4320 slots )
OpenAuctionSlots uint16 `meddler:"open_auction_slots" validate:"required"`
// How the HEZ tokens deposited by the slot winner are distributed (Burn: 40% - Donation:
// 40% - HGT: 20%)
// How the HEZ tokens deposited by the slot winner are distributed (Burn: 40% - Donation: 40% - HGT: 20%)
AllocationRatio [3]uint16 `meddler:"allocation_ratio,json" validate:"required"`
// Minimum outbid (percentage) over the previous one to consider it valid
Outbidding uint16 `meddler:"outbidding" validate:"required"`
// Number of blocks at the end of a slot in which any coordinator can forge if the winner
// has not forged one before
// Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
SlotDeadline uint8 `meddler:"slot_deadline" validate:"required"`
}

View File

@@ -20,22 +20,19 @@ const (
// RollupConstExitIDx IDX 1 is reserved for exits
RollupConstExitIDx = 1
// RollupConstLimitTokens Max number of tokens allowed to be registered inside the rollup
RollupConstLimitTokens = (1 << 32) //nolint:gomnd
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes]
// compressedSignature
RollupConstLimitTokens = (1 << 32)
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
RollupConstL1CoordinatorTotalBytes = 101
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6
// bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes]
// tokenId + [6 bytes] toIdx
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 78
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
RollupConstMaxL1UserTx = 128
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch
RollupConstMaxL1Tx = 256
// RollupConstInputSHAConstantBytes [6 bytes] lastIdx + [6 bytes] newLastIdx + [32 bytes]
// stateRoot + [32 bytes] newStRoot + [32 bytes] newExitRoot + [_MAX_L1_TX *
// _L1_USER_TOTALBYTES bytes] l1TxsData + totalL2TxsDataLength + feeIdxCoordinatorLength +
// [2 bytes] chainID = 18542 bytes + totalL2TxsDataLength + feeIdxCoordinatorLength
// RollupConstInputSHAConstantBytes [6 bytes] lastIdx + [6 bytes] newLastIdx + [32 bytes] stateRoot + [32 bytes] newStRoot + [32 bytes] newExitRoot +
// [_MAX_L1_TX * _L1_USER_TOTALBYTES bytes] l1TxsData + totalL2TxsDataLength + feeIdxCoordinatorLength + [2 bytes] chainID =
// 18542 bytes + totalL2TxsDataLength + feeIdxCoordinatorLength
RollupConstInputSHAConstantBytes = 18546
// RollupConstNumBuckets Number of buckets
RollupConstNumBuckets = 5
@@ -47,18 +44,14 @@ const (
var (
// RollupConstLimitDepositAmount Max deposit amount allowed (depositAmount: L1 --> L2)
RollupConstLimitDepositAmount, _ = new(big.Int).SetString(
"340282366920938463463374607431768211456", 10)
RollupConstLimitDepositAmount, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10)
// RollupConstLimitL2TransferAmount Max amount allowed (amount L2 --> L2)
RollupConstLimitL2TransferAmount, _ = new(big.Int).SetString(
"6277101735386680763835789423207666416102355444464034512896", 10)
RollupConstLimitL2TransferAmount, _ = new(big.Int).SetString("6277101735386680763835789423207666416102355444464034512896", 10)
// RollupConstEthAddressInternalOnly This ethereum address is used internally for rollup
// accounts that don't have ethereum address, only Babyjubjub.
// This non-ethereum accounts can be created by the coordinator and allow users to have a
// rollup account without needing an ethereum address
RollupConstEthAddressInternalOnly = ethCommon.HexToAddress(
"0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF")
// RollupConstEthAddressInternalOnly This ethereum address is used internally for rollup accounts that don't have ethereum address, only Babyjubjub
// This non-ethereum accounts can be created by the coordinator and allow users to have a rollup
// account without needing an ethereum address
RollupConstEthAddressInternalOnly = ethCommon.HexToAddress("0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF")
// RollupConstRfield Modulus zkSNARK
RollupConstRfield, _ = new(big.Int).SetString(
"21888242871839275222246405745257275088548364400416034343698204186575808495617", 10)
@@ -70,32 +63,24 @@ var (
// RollupConstRecipientInterfaceHash ERC777 recipient interface hash
RollupConstRecipientInterfaceHash = crypto.Keccak256([]byte("ERC777TokensRecipient"))
// RollupConstPerformL1UserTxSignature the signature of the function that can be called thru
// an ERC777 `send`
RollupConstPerformL1UserTxSignature = crypto.Keccak256([]byte(
"addL1Transaction(uint256,uint48,uint16,uint16,uint32,uint48)"))
// RollupConstAddTokenSignature the signature of the function that can be called thru an
// ERC777 `send`
// RollupConstPerformL1UserTxSignature the signature of the function that can be called thru an ERC777 `send`
RollupConstPerformL1UserTxSignature = crypto.Keccak256([]byte("addL1Transaction(uint256,uint48,uint16,uint16,uint32,uint48)"))
// RollupConstAddTokenSignature the signature of the function that can be called thru an ERC777 `send`
RollupConstAddTokenSignature = crypto.Keccak256([]byte("addToken(address)"))
// RollupConstSendSignature ERC777 Signature
RollupConstSendSignature = crypto.Keccak256([]byte("send(address,uint256,bytes)"))
// RollupConstERC777Granularity ERC777 Signature
RollupConstERC777Granularity = crypto.Keccak256([]byte("granularity()"))
// RollupConstWithdrawalDelayerDeposit This constant are used to deposit tokens from ERC77
// tokens into withdrawal delayer
// RollupConstWithdrawalDelayerDeposit This constant are used to deposit tokens from ERC77 tokens into withdrawal delayer
RollupConstWithdrawalDelayerDeposit = crypto.Keccak256([]byte("deposit(address,address,uint192)"))
// ERC20 signature
// RollupConstTransferSignature This constant is used in the _safeTransfer internal method
// in order to safe GAS.
// RollupConstTransferSignature This constant is used in the _safeTransfer internal method in order to safe GAS.
RollupConstTransferSignature = crypto.Keccak256([]byte("transfer(address,uint256)"))
// RollupConstTransferFromSignature This constant is used in the _safeTransfer internal
// method in order to safe GAS.
RollupConstTransferFromSignature = crypto.Keccak256([]byte(
"transferFrom(address,address,uint256)"))
// RollupConstApproveSignature This constant is used in the _safeTransfer internal method in
// order to safe GAS.
// RollupConstTransferFromSignature This constant is used in the _safeTransfer internal method in order to safe GAS.
RollupConstTransferFromSignature = crypto.Keccak256([]byte("transferFrom(address,address,uint256)"))
// RollupConstApproveSignature This constant is used in the _safeTransfer internal method in order to safe GAS.
RollupConstApproveSignature = crypto.Keccak256([]byte("approve(address,uint256)"))
// RollupConstERC20Signature ERC20 decimals signature
RollupConstERC20Signature = crypto.Keccak256([]byte("decimals()"))
@@ -156,7 +141,6 @@ type TokenExchange struct {
}
// RollupVariables are the variables of the Rollup Smart Contract
//nolint:lll
type RollupVariables struct {
EthBlockNum int64 `meddler:"eth_block_num"`
FeeAddToken *big.Int `meddler:"fee_add_token,bigint" validate:"required"`

View File

@@ -27,7 +27,6 @@ type WDelayerEscapeHatchWithdrawal struct {
}
// WDelayerVariables are the variables of the Withdrawal Delayer Smart Contract
//nolint:lll
type WDelayerVariables struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
// HermezRollupAddress ethCommon.Address `json:"hermezRollupAddress" meddler:"rollup_address"`

View File

@@ -1,4 +1,4 @@
// Package common float40.go provides methods to work with Hermez custom half
// Package common Float40 provides methods to work with Hermez custom half
// float precision, 40 bits, codification internally called Float40 has been
// adopted to encode large integers. This is done in order to save bits when L2
// transactions are published.
@@ -32,8 +32,6 @@ var (
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
// not be represented as Float40 due not enough precission
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
thres = big.NewInt(0x08_00_00_00_00)
)
// Float40 represents a float in a 64 bit format
@@ -70,7 +68,7 @@ func (f40 Float40) BigInt() (*big.Int, error) {
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
f40Bytes, err := f40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
return nil, err
}
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
@@ -88,41 +86,18 @@ func NewFloat40(f *big.Int) (Float40, error) {
e := big.NewInt(0)
zero := big.NewInt(0)
ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00)
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, tracerr.Wrap(ErrFloat40E31)
return 0, ErrFloat40E31
}
if m.Cmp(thres) >= 0 {
return 0, tracerr.Wrap(ErrFloat40NotEnoughPrecission)
return 0, ErrFloat40NotEnoughPrecission
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}
// NewFloat40Floor encodes a *big.Int integer as a Float40, rounding down in
// case of loss during the encoding. It returns an error in case that the number
// is too big (e>31). Warning: this method should not be used inside the
// hermez-node, it's a helper for external usage to generate valid Float40
// values.
func NewFloat40Floor(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
// zero := big.NewInt(0)
ten := big.NewInt(10)
for m.Cmp(thres) >= 0 {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, tracerr.Wrap(ErrFloat40E31)
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}

View File

@@ -1,11 +1,9 @@
package common
import (
"fmt"
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -57,56 +55,7 @@ func TestExpectError(t *testing.T) {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
_, err := NewFloat40(bi)
assert.Equal(t, testVector[test], tracerr.Unwrap(err))
}
}
func TestNewFloat40Floor(t *testing.T) {
testVector := map[string][]string{
// []int contains [Float40 value, Flot40 Floor value], when
// Float40 value is expected to be 0, is because is expected to
// be an error
"9922334455000000000000000000000000000000": {
"1040714485495", "1040714485495", "9922334455000000000000000000000000000000"},
"9922334455000000000000000000000000000001": { // Floor [2] will be same as prev line
"0", "1040714485495", "9922334455000000000000000000000000000000"},
"9922334454999999999999999999999999999999": {
"0", "1040714485494", "9922334454000000000000000000000000000000"},
"42949672950000000000000000000000000000000": {
"1069446856703", "1069446856703", "42949672950000000000000000000000000000000"},
"99223344556573838487575": {
"0", "456598933239", "99223344550000000000000"},
"992233445500000000000000000000000000000000": {
"0", "0", "0"}, // e>31, returns 0, err
"343597383670000000000000000000000000000000": {
"1099511627775", "1099511627775", "343597383670000000000000000000000000000000"},
"343597383680000000000000000000000000000000": {
"0", "0", "0"}, // e>31, returns 0, err
"1157073197879933027": {
"0", "286448638922", "1157073197800000000"},
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
f40, err := NewFloat40(bi)
if f40 == 0 {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, testVector[test][0], fmt.Sprint(uint64(f40)))
f40, err = NewFloat40Floor(bi)
if f40 == 0 {
assert.Equal(t, ErrFloat40E31, tracerr.Unwrap(err))
} else {
assert.NoError(t, err)
}
assert.Equal(t, testVector[test][1], fmt.Sprint(uint64(f40)))
bi2, err := f40.BigInt()
require.NoError(t, err)
assert.Equal(t, fmt.Sprint(testVector[test][2]), bi2.String())
assert.Equal(t, testVector[test], err)
}
}

View File

@@ -21,33 +21,25 @@ type L1Tx struct {
// where type:
// - L1UserTx: 0
// - L1CoordinatorTx: 1
TxID TxID `meddler:"id"`
// ToForgeL1TxsNum indicates in which the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
Position int `meddler:"position"`
// UserOrigin is set to true if the tx was originated by a user, false if it was
// aoriginated by a coordinator. Note that this differ from the spec for implementation
// simplification purpposes
UserOrigin bool `meddler:"user_origin"`
// FromIdx is used by L1Tx/Deposit to indicate the Idx receiver of the L1Tx.DepositAmount
// (deposit)
FromIdx Idx `meddler:"from_idx,zeroisnull"`
TxID TxID `meddler:"id"`
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged
Position int `meddler:"position"`
UserOrigin bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
FromIdx Idx `meddler:"from_idx,zeroisnull"` // FromIdx is used by L1Tx/Deposit to indicate the Idx receiver of the L1Tx.DepositAmount (deposit)
EffectiveFromIdx Idx `meddler:"effective_from_idx,zeroisnull"`
FromEthAddr ethCommon.Address `meddler:"from_eth_addr,zeroisnull"`
FromBJJ babyjub.PublicKeyComp `meddler:"from_bjj,zeroisnull"`
// ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer
ToIdx Idx `meddler:"to_idx"`
TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"`
ToIdx Idx `meddler:"to_idx"` // ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer
TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"`
// EffectiveAmount only applies to L1UserTx.
EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"`
DepositAmount *big.Int `meddler:"deposit_amount,bigint"`
// EffectiveDepositAmount only applies to L1UserTx.
EffectiveDepositAmount *big.Int `meddler:"effective_deposit_amount,bigintnull"`
// Ethereum Block Number in which this L1Tx was added to the queue
EthBlockNum int64 `meddler:"eth_block_num"`
Type TxType `meddler:"type"`
BatchNum *BatchNum `meddler:"batch_num"`
EffectiveDepositAmount *big.Int `meddler:"effective_deposit_amount,bigintnull"`
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
Type TxType `meddler:"type"`
BatchNum *BatchNum `meddler:"batch_num"`
}
// NewL1Tx returns the given L1Tx with the TxId & Type parameters calculated
@@ -259,7 +251,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
}
l1tx.ToIdx = toIdx
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
return &l1tx, tracerr.Wrap(err)
return &l1tx, err
}
// BytesGeneric returns the generic representation of a L1Tx. This method is
@@ -339,9 +331,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
// L1UserTxFromBytes decodes a L1Tx from []byte
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
if len(b) != RollupConstL1UserTotalBytes {
return nil,
tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d",
68, len(b)))
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
}
tx := &L1Tx{
@@ -379,12 +369,9 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
}
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx,
error) {
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != RollupConstL1CoordinatorTotalBytes {
return nil, tracerr.Wrap(
fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d",
101, len(b)))
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
}
tx := &L1Tx{

View File

@@ -29,8 +29,7 @@ func TestNewL1UserTx(t *testing.T) {
}
l1Tx, err := NewL1Tx(l1Tx)
assert.NoError(t, err)
assert.Equal(t, "0x00a6cbae3b8661fb75b0919ca6605a02cfb04d9c6dd16870fa0fcdf01befa32768",
l1Tx.TxID.String())
assert.Equal(t, "0x00a6cbae3b8661fb75b0919ca6605a02cfb04d9c6dd16870fa0fcdf01befa32768", l1Tx.TxID.String())
}
func TestNewL1CoordinatorTx(t *testing.T) {
@@ -47,8 +46,7 @@ func TestNewL1CoordinatorTx(t *testing.T) {
}
l1Tx, err := NewL1Tx(l1Tx)
assert.NoError(t, err)
assert.Equal(t, "0x01274482d73df4dab34a1b6740adfca347a462513aa14e82f27b12f818d1b68c84",
l1Tx.TxID.String())
assert.Equal(t, "0x01274482d73df4dab34a1b6740adfca347a462513aa14e82f27b12f818d1b68c84", l1Tx.TxID.String())
}
func TestL1TxCompressedData(t *testing.T) {
@@ -201,8 +199,7 @@ func TestL1userTxByteParsers(t *testing.T) {
func TestL1TxByteParsersCompatibility(t *testing.T) {
// Data from compatibility test
var pkComp babyjub.PublicKeyComp
pkCompB, err :=
hex.DecodeString("0dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a")
pkCompB, err := hex.DecodeString("0dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a")
require.NoError(t, err)
pkCompL := SwapEndianness(pkCompB)
err = pkComp.UnmarshalText([]byte(hex.EncodeToString(pkCompL)))
@@ -223,8 +220,7 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
encodedData, err := l1Tx.BytesUser()
require.NoError(t, err)
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79e" +
"e1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
assert.Equal(t, expected, hex.EncodeToString(encodedData))
}
@@ -232,8 +228,7 @@ func TestL1CoordinatorTxByteParsers(t *testing.T) {
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
chainID := big.NewInt(1337)
privateKey, err :=
crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err)
publicKey := privateKey.Public()
@@ -305,8 +300,7 @@ func TestL1CoordinatorTxByteParsersCompatibility(t *testing.T) {
signature = append(signature, v[:]...)
var pkComp babyjub.PublicKeyComp
pkCompB, err :=
hex.DecodeString("a2c2807ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c")
pkCompB, err := hex.DecodeString("a2c2807ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c")
require.NoError(t, err)
pkCompL := SwapEndianness(pkCompB)
err = pkComp.UnmarshalText([]byte(hex.EncodeToString(pkCompL)))
@@ -321,9 +315,7 @@ func TestL1CoordinatorTxByteParsersCompatibility(t *testing.T) {
encodeData, err := l1Tx.BytesCoordinatorTx(signature)
require.NoError(t, err)
expected, err := utils.HexDecode("1b186d7122ff7f654cfed3156719774898d573900c86599a885a706" +
"dbdffe5ea8cda71e5eb097e115405d84d1e7b464009b434b32c014a2df502d1f065ced8bc3ba2c28" +
"07ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c000000e7")
expected, err := utils.HexDecode("1b186d7122ff7f654cfed3156719774898d573900c86599a885a706dbdffe5ea8cda71e5eb097e115405d84d1e7b464009b434b32c014a2df502d1f065ced8bc3ba2c2807ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c000000e7")
require.NoError(t, err)
assert.Equal(t, expected, encodeData)

View File

@@ -10,7 +10,7 @@ import (
// L2Tx is a struct that represents an already forged L2 tx
type L2Tx struct {
// Stored in DB: mandatory fields
// Stored in DB: mandatory fileds
TxID TxID `meddler:"id"`
BatchNum BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged.
Position int `meddler:"position"`
@@ -21,10 +21,9 @@ type L2Tx struct {
Amount *big.Int `meddler:"amount,bigint"`
Fee FeeSelector `meddler:"fee"`
// Nonce is filled by the TxProcessor
Nonce Nonce `meddler:"nonce"`
Type TxType `meddler:"type"`
// EthBlockNum in which this L2Tx was added to the queue
EthBlockNum int64 `meddler:"eth_block_num"`
Nonce Nonce `meddler:"nonce"`
Type TxType `meddler:"type"`
EthBlockNum int64 `meddler:"eth_block_num"` // EthereumBlockNumber in which this L2Tx was added to the queue
}
// NewL2Tx returns the given L2Tx with the TxId & Type parameters calculated

View File

@@ -19,8 +19,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err := NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e",
l2Tx.TxID.String())
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -31,8 +30,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2",
l2Tx.TxID.String())
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -44,8 +42,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e",
l2Tx.TxID.String())
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -57,8 +54,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9",
l2Tx.TxID.String())
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 1,
@@ -70,8 +66,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618",
l2Tx.TxID.String())
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 999,
@@ -83,8 +78,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1",
l2Tx.TxID.String())
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 4444,
@@ -96,8 +90,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a",
l2Tx.TxID.String())
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
}
func TestL2TxByteParsers(t *testing.T) {

View File

@@ -16,8 +16,7 @@ import (
// EmptyBJJComp contains the 32 byte array of a empty BabyJubJub PublicKey
// Compressed. It is a valid point in the BabyJubJub curve, so does not give
// errors when being decompressed.
var EmptyBJJComp = babyjub.PublicKeyComp([32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
var EmptyBJJComp = babyjub.PublicKeyComp([32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
// PoolL2Tx is a struct that represents a L2Tx sent by an account to the
// coordinator that is waiting to be forged
@@ -101,8 +100,6 @@ func (tx *PoolL2Tx) SetType() error {
tx.Type = TxTypeTransferToBJJ
} else if tx.ToEthAddr != FFAddr && tx.ToEthAddr != EmptyAddr {
tx.Type = TxTypeTransferToEthAddr
} else {
return tracerr.Wrap(errors.New("malformed transaction"))
}
} else {
return tracerr.Wrap(errors.New("malformed transaction"))
@@ -306,8 +303,10 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
return nil, tracerr.Wrap(err)
}
copy(e1B[0:5], amountFloat40Bytes)
copy(e1B[5:25], tx.ToEthAddr[:])
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
copy(e1B[5:25], toEthAddr.Bytes())
e1 := new(big.Int).SetBytes(e1B[:])
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
@@ -319,8 +318,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2,
rqToEthAddr, rqToBJJY})
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
}
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp

View File

@@ -21,20 +21,17 @@ func TestNewPoolL2Tx(t *testing.T) {
}
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e",
poolL2Tx.TxID.String())
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
}
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
// test vectors values generated from javascript implementation
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
_, err := hex.Decode(skPositive[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
_, err = hex.Decode(skNegative[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090002"))
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
assert.NoError(t, err)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
@@ -126,8 +123,7 @@ func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
func TestRqTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
RqFromIdx: 7,
@@ -146,8 +142,7 @@ func TestRqTxCompressedDataV2(t *testing.T) {
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007",
hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestHashToSign(t *testing.T) {
@@ -162,15 +157,13 @@ func TestHashToSign(t *testing.T) {
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
assert.Equal(t, "0b8abaf6b7933464e4450df2514da8b72606c02bf7f89bf6e54816fbda9d9d57",
hex.EncodeToString(toSign.Bytes()))
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
}
func TestVerifyTxSignature(t *testing.T) {
chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 2,
@@ -184,49 +177,18 @@ func TestVerifyTxSignature(t *testing.T) {
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
assert.Equal(t,
"3144939470626721092564692894890580265754250231349521601298746071096761507003",
toSign.String())
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress()
assert.True(t, tx.VerifySignature(chainID, sk.Public().Compress()))
}
func TestVerifyTxSignatureEthAddrWith0(t *testing.T) {
chainID := uint16(5)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("02f0b4f87065af3797aaaf934e8b5c31563c17f2272fa71bd0146535bfbb4184"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 10659,
ToIdx: 0,
ToEthAddr: ethCommon.HexToAddress("0x0004308BD15Ead4F1173624dC289DBdcC806a309"),
Amount: big.NewInt(5000),
TokenID: 0,
Nonce: 946,
Fee: 231,
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
sig := sk.SignPoseidon(toSign)
assert.Equal(t,
"f208b8298d5f37148ac3c0c03703272ea47b9f836851bcf8dd5f7e4e3b336ca1d2f6e92ad85dc25f174daf7a0abfd5f71dead3f059b783f4c4b2f56a18a47000",
sig.Compress().String(),
)
tx.Signature = sig.Compress()
assert.True(t, tx.VerifySignature(chainID, sk.Public().Compress()))
}
func TestDecompressEmptyBJJComp(t *testing.T) {
pkComp := EmptyBJJComp
pk, err := pkComp.Decompress()
require.NoError(t, err)
assert.Equal(t,
"2957874849018779266517920829765869116077630550401372566248359756137677864698",
pk.X.String())
assert.Equal(t, "2957874849018779266517920829765869116077630550401372566248359756137677864698", pk.X.String())
assert.Equal(t, "0", pk.Y.String())
}

View File

@@ -15,9 +15,8 @@ const tokenIDBytesLen = 4
// Token is a struct that represents an Ethereum token that is supported in Hermez network
type Token struct {
TokenID TokenID `json:"id" meddler:"token_id"`
// EthBlockNum indicates the Ethereum block number in which this token was registered
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
TokenID TokenID `json:"id" meddler:"token_id"`
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"` // Ethereum block number in which this token was registered
EthAddr ethCommon.Address `json:"ethereumAddress" meddler:"eth_addr"`
Name string `json:"name" meddler:"name"`
Symbol string `json:"symbol" meddler:"symbol"`
@@ -49,8 +48,7 @@ func (t TokenID) BigInt() *big.Int {
// TokenIDFromBytes returns TokenID from a byte array
func TokenIDFromBytes(b []byte) (TokenID, error) {
if len(b) != tokenIDBytesLen {
return 0, tracerr.Wrap(fmt.Errorf("can not parse TokenID, bytes len %d, expected 4",
len(b)))
return 0, tracerr.Wrap(fmt.Errorf("can not parse TokenID, bytes len %d, expected 4", len(b)))
}
tid := binary.BigEndian.Uint32(b[:4])
return TokenID(tid), nil

View File

@@ -15,12 +15,12 @@ import (
)
const (
// TxIDPrefixL1UserTx is the prefix that determines that the TxID is for
// a L1UserTx
// TXIDPrefixL1UserTx is the prefix that determines that the TxID is
// for a L1UserTx
//nolinter:gomnd
TxIDPrefixL1UserTx = byte(0)
// TxIDPrefixL1CoordTx is the prefix that determines that the TxID is
// TXIDPrefixL1CoordTx is the prefix that determines that the TxID is
// for a L1CoordinatorTx
//nolinter:gomnd
TxIDPrefixL1CoordTx = byte(1)
@@ -51,8 +51,7 @@ func (txid *TxID) Scan(src interface{}) error {
return tracerr.Wrap(fmt.Errorf("can't scan %T into TxID", src))
}
if len(srcB) != TxIDLen {
return tracerr.Wrap(fmt.Errorf("can't scan []byte of len %d into TxID, need %d",
len(srcB), TxIDLen))
return tracerr.Wrap(fmt.Errorf("can't scan []byte of len %d into TxID, need %d", len(srcB), TxIDLen))
}
copy(txid[:], srcB)
return nil
@@ -88,7 +87,7 @@ func (txid TxID) MarshalText() ([]byte, error) {
return []byte(txid.String()), nil
}
// UnmarshalText unmarshalls a TxID
// UnmarshalText unmarshals a TxID
func (txid *TxID) UnmarshalText(data []byte) error {
idStr := string(data)
id, err := NewTxIDFromString(idStr)
@@ -103,15 +102,13 @@ func (txid *TxID) UnmarshalText(data []byte) error {
type TxType string
const (
// TxTypeExit represents L2->L1 token transfer. A leaf for this account appears in the exit
// tree of the block
// TxTypeExit represents L2->L1 token transfer. A leaf for this account appears in the exit tree of the block
TxTypeExit TxType = "Exit"
// TxTypeTransfer represents L2->L2 token transfer
TxTypeTransfer TxType = "Transfer"
// TxTypeDeposit represents L1->L2 transfer
TxTypeDeposit TxType = "Deposit"
// TxTypeCreateAccountDeposit represents creation of a new leaf in the state tree
// (newAcconut) + L1->L2 transfer
// TxTypeCreateAccountDeposit represents creation of a new leaf in the state tree (newAcconut) + L1->L2 transfer
TxTypeCreateAccountDeposit TxType = "CreateAccountDeposit"
// TxTypeCreateAccountDepositTransfer represents L1->L2 transfer + L2->L2 transfer
TxTypeCreateAccountDepositTransfer TxType = "CreateAccountDepositTransfer"
@@ -127,31 +124,24 @@ const (
TxTypeTransferToBJJ TxType = "TransferToBJJ"
)
// Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx &
// PoolL2Tx
// Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx & PoolL2Tx
type Tx struct {
// Generic
IsL1 bool `meddler:"is_l1"`
TxID TxID `meddler:"id"`
Type TxType `meddler:"type"`
Position int `meddler:"position"`
FromIdx Idx `meddler:"from_idx"`
ToIdx Idx `meddler:"to_idx"`
Amount *big.Int `meddler:"amount,bigint"`
AmountFloat float64 `meddler:"amount_f"`
TokenID TokenID `meddler:"token_id"`
USD *float64 `meddler:"amount_usd"`
// BatchNum in which this tx was forged. If the tx is L2, this must be != 0
BatchNum *BatchNum `meddler:"batch_num"`
// Ethereum Block Number in which this L1Tx was added to the queue
EthBlockNum int64 `meddler:"eth_block_num"`
IsL1 bool `meddler:"is_l1"`
TxID TxID `meddler:"id"`
Type TxType `meddler:"type"`
Position int `meddler:"position"`
FromIdx Idx `meddler:"from_idx"`
ToIdx Idx `meddler:"to_idx"`
Amount *big.Int `meddler:"amount,bigint"`
AmountFloat float64 `meddler:"amount_f"`
TokenID TokenID `meddler:"token_id"`
USD *float64 `meddler:"amount_usd"`
BatchNum *BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
// L1
// ToForgeL1TxsNum in which the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
// UserOrigin is set to true if the tx was originated by a user, false if it was aoriginated
// by a coordinator. Note that this differ from the spec for implementation simplification
// purpposes
UserOrigin *bool `meddler:"user_origin"`
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged
UserOrigin *bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
FromEthAddr ethCommon.Address `meddler:"from_eth_addr"`
FromBJJ babyjub.PublicKeyComp `meddler:"from_bjj"`
DepositAmount *big.Int `meddler:"deposit_amount,bigintnull"`

View File

@@ -21,10 +21,8 @@ func TestSignatureConstant(t *testing.T) {
func TestTxIDScannerValue(t *testing.T) {
txid0 := &TxID{}
txid1 := &TxID{}
txid0B := [TxIDLen]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 0, 1, 2}
txid1B := [TxIDLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
txid0B := [TxIDLen]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2}
txid1B := [TxIDLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
copy(txid0[:], txid0B[:])
copy(txid1[:], txid1B[:])

View File

@@ -21,23 +21,16 @@ func TestBJJFromStringWithChecksum(t *testing.T) {
assert.NoError(t, err)
// expected values computed with js implementation
assert.Equal(t,
"2492816973395423007340226948038371729989170225696553239457870892535792679622",
pk.X.String())
assert.Equal(t,
"15238403086306505038849621710779816852318505119327426213168494964113886299863",
pk.Y.String())
assert.Equal(t, "2492816973395423007340226948038371729989170225696553239457870892535792679622", pk.X.String())
assert.Equal(t, "15238403086306505038849621710779816852318505119327426213168494964113886299863", pk.Y.String())
}
func TestRmEndingZeroes(t *testing.T) {
s0, err :=
merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000000")
s0, err := merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err)
s1, err :=
merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000001")
s1, err := merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000001")
require.NoError(t, err)
s2, err :=
merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000002")
s2, err := merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000002")
require.NoError(t, err)
// expect cropped last zeroes

View File

@@ -1,4 +1,4 @@
// Package common zk.go contains all the common data structures used at the
// Package common contains all the common data structures used at the
// hermez-node, zk.go contains the zkSnark inputs used to generate the proof
package common
@@ -67,7 +67,7 @@ type ZKInputs struct {
// accumulate fees
// FeePlanTokens contains all the tokenIDs for which the fees are being
// accumulated and those fees accumulated will be paid to the FeeIdxs
// accumulated and those fees accoumulated will be paid to the FeeIdxs
// array. The order of FeeIdxs & FeePlanTokens & State3 must match.
// Coordinator fees are processed correlated such as:
// [FeePlanTokens[i], FeeIdxs[i]]
@@ -130,8 +130,8 @@ type ZKInputs struct {
RqOffset []*big.Int `json:"rqOffset"` // uint8 (max 3 bits), len: [maxTx]
// transaction L2 request data
// RqTxCompressedDataV2 big.Int (max 251 bits), len: [maxTx]
RqTxCompressedDataV2 []*big.Int `json:"rqTxCompressedDataV2"`
// RqTxCompressedDataV2
RqTxCompressedDataV2 []*big.Int `json:"rqTxCompressedDataV2"` // big.Int (max 251 bits), len: [maxTx]
// RqToEthAddr
RqToEthAddr []*big.Int `json:"rqToEthAddr"` // ethCommon.Address, len: [maxTx]
// RqToBJJAy
@@ -301,8 +301,7 @@ func (z ZKInputs) MarshalJSON() ([]byte, error) {
}
// NewZKInputs returns a pointer to an initialized struct of ZKInputs
func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32,
currentNumBatch *big.Int) *ZKInputs {
func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, currentNumBatch *big.Int) *ZKInputs {
zki := &ZKInputs{}
zki.Metadata.MaxFeeIdxs = maxFeeIdxs
zki.Metadata.MaxLevels = uint32(48) //nolint:gomnd
@@ -481,7 +480,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 528) //nolint:gomnd
l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -507,14 +506,11 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
l2TxsData = append(l2TxsData, z.Metadata.L2TxsData[i]...)
}
if len(l2TxsData) > int(expectedL2TxsDataLen) {
return nil, tracerr.Wrap(fmt.Errorf("len(l2TxsData): %d, expected: %d",
len(l2TxsData), expectedL2TxsDataLen))
return nil, tracerr.Wrap(fmt.Errorf("len(l2TxsData): %d, expected: %d", len(l2TxsData), expectedL2TxsDataLen))
}
b = append(b, l2TxsData...)
l2TxsPadding := make([]byte,
(int(z.Metadata.MaxTx)-len(z.Metadata.L1TxsDataAvailability)-
len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L1TxsDataAvailability)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
b = append(b, l2TxsPadding...)
// [NLevels * MAX_TOKENS_FEE bits] feeTxsData

View File

@@ -44,6 +44,13 @@ type ForgeBatchGasCost struct {
L2Tx uint64 `validate:"required"`
}
// CoordinatorAPI specifies the configuration parameters of the API in mode
// coordinator
type CoordinatorAPI struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
}
// Coordinator is the coordinator specific configuration.
type Coordinator struct {
// ForgerAddress is the address under which this coordinator is forging
@@ -73,7 +80,7 @@ type Coordinator struct {
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, even though at block 11 we canForge, the pipeline will be
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
@@ -83,7 +90,7 @@ type Coordinator struct {
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, even though at block 11 we canForge, the batch will be discarded
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the
@@ -126,7 +133,7 @@ type Coordinator struct {
// L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"`
// PurgeBatchDelay is the delay between batches to purge
// outdated transactions. Outdated L2Txs are those that have
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
@@ -136,7 +143,7 @@ type Coordinator struct {
// nonce.
InvalidateBatchDelay int64 `validate:"required"`
// PurgeBlockDelay is the delay between blocks to purge
// outdated transactions. Outdated L2Txs are those that have
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
@@ -168,7 +175,7 @@ type Coordinator struct {
MaxGasPrice *big.Int `validate:"required"`
// GasPriceIncPerc is the percentage increase of gas price set
// in an ethereum transaction from the suggested gas price by
// the ethereum node
// the ehtereum node
GasPriceIncPerc int64
// CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager
@@ -197,10 +204,7 @@ type Coordinator struct {
// ForgeBatch transaction.
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
} `validate:"required"`
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
API CoordinatorAPI `validate:"required"`
Debug struct {
// BatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline
@@ -215,6 +219,45 @@ type Coordinator struct {
}
}
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
// diferentiated SQL connections for read/write. If the read configuration is
// not provided, the write one it's going to be used for both reads and writes
type PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
HostWrite string `validate:"required"`
// User of the PostgreSQL write server
UserWrite string `validate:"required"`
// Password of the PostgreSQL write server
PasswordWrite string `validate:"required"`
// Name of the PostgreSQL write server database
NameWrite string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
}
// NodeDebug specifies debug configuration parameters
type NodeDebug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
// Node is the hermez node configuration.
type Node struct {
PriceUpdater struct {
@@ -231,32 +274,8 @@ type Node struct {
// Keep is the number of checkpoints to keep
Keep int `validate:"required"`
} `validate:"required"`
// It's possible to use diferentiated SQL connections for read/write.
// If the read configuration is not provided, the write one it's going to be used
// for both reads and writes
PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
HostWrite string `validate:"required"`
// User of the PostgreSQL write server
UserWrite string `validate:"required"`
// Password of the PostgreSQL write server
PasswordWrite string `validate:"required"`
// Name of the PostgreSQL write server database
NameWrite string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
} `validate:"required"`
Web3 struct {
PostgreSQL PostgreSQL `validate:"required"`
Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server
URL string `validate:"required"`
} `validate:"required"`
@@ -286,6 +305,7 @@ type Node struct {
// TokenHEZ address
TokenHEZName string `validate:"required"`
} `validate:"required"`
// API specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string
@@ -303,20 +323,45 @@ type Node struct {
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
Debug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
Debug NodeDebug `validate:"required"`
Coordinator Coordinator `validate:"-"`
}
// APIServer is the api server configuration parameters
type APIServer struct {
// NodeAPI specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string `validate:"required"`
// Explorer enables the Explorer API endpoints
Explorer bool
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Coordinator struct {
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
L2DB struct {
// MaxTxs is the maximum number of pending L2Txs that can be
// stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
} `validate:"required"`
}
Debug NodeDebug `validate:"required"`
}
// Load loads a generic config.
func Load(path string, cfg interface{}) error {
bs, err := ioutil.ReadFile(path) //nolint:gosec
@@ -330,8 +375,8 @@ func Load(path string, cfg interface{}) error {
return nil
}
// LoadCoordinator loads the Coordinator configuration from path.
func LoadCoordinator(path string) (*Node, error) {
// LoadNode loads the Node configuration from path.
func LoadNode(path string, coordinator bool) (*Node, error) {
var cfg Node
if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
@@ -340,21 +385,28 @@ func LoadCoordinator(path string) (*Node, error) {
if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil
}
// LoadNode loads the Node configuration from path.
func LoadNode(path string) (*Node, error) {
var cfg Node
// LoadAPIServer loads the APIServer configuration from path.
func LoadAPIServer(path string, coordinator bool) (*APIServer, error) {
var cfg APIServer
if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
}
validate := validator.New()
if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil
}

View File

@@ -8,7 +8,6 @@ import (
"path"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/eth"
@@ -91,9 +90,9 @@ type BatchInfo struct {
L2Txs []common.L2Tx
CoordIdxs []common.Idx
ForgeBatchArgs *eth.RollupForgeBatchArgs
Auth *bind.TransactOpts `json:"-"`
EthTx *types.Transaction
EthTxErr error
// FeesInfo
EthTx *types.Transaction
EthTxErr error
// SendTimestamp the time of batch sent to ethereum
SendTimestamp time.Time
Receipt *types.Receipt

View File

@@ -25,9 +25,8 @@ import (
var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf(
"no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
)
const (
@@ -54,7 +53,7 @@ type Config struct {
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, even though at block 11 we canForge, the pipeline will be
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
@@ -64,7 +63,7 @@ type Config struct {
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, even though at block 11 we canForge, the batch will be discarded
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
@@ -145,8 +144,8 @@ type Coordinator struct {
pipelineNum int // Pipeline sequential number. The first pipeline is 1
pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client
consts synchronizer.SCConsts
vars synchronizer.SCVariables
consts common.SCConsts
vars common.SCVariables
stats synchronizer.Stats
started bool
@@ -186,8 +185,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client,
ethClient eth.ClientInterface,
scConsts *synchronizer.SCConsts,
initSCVars *synchronizer.SCVariables,
scConsts *common.SCConsts,
initSCVars *common.SCVariables,
) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
@@ -276,13 +275,13 @@ type MsgSyncBlock struct {
Batches []common.BatchData
// Vars contains each Smart Contract variables if they are updated, or
// nil if they haven't changed.
Vars synchronizer.SCVariablesPtr
Vars common.SCVariablesPtr
}
// MsgSyncReorg indicates a reorg
type MsgSyncReorg struct {
Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr
Vars common.SCVariablesPtr
}
// MsgStopPipeline indicates a signal to reset the pipeline
@@ -301,7 +300,7 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
}
}
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
@@ -313,7 +312,7 @@ func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariable
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&c.vars, vars)
}
@@ -474,8 +473,7 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
// the next pipeline will start from the last state of the synchronizer,
// otherwise, it will state from failedBatchNum-1.
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string,
failedBatchNum common.BatchNum) error {
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1

View File

@@ -126,8 +126,7 @@ func newTestModules(t *testing.T) modules {
batchBuilderDBPath, err = ioutil.TempDir("", "tmpBatchBuilderDB")
require.NoError(t, err)
deleteme = append(deleteme, batchBuilderDBPath)
batchBuilder, err := batchbuilder.NewBatchBuilder(batchBuilderDBPath, syncStateDB, 0,
uint64(nLevels))
batchBuilder, err := batchbuilder.NewBatchBuilder(batchBuilderDBPath, syncStateDB, 0, uint64(nLevels))
assert.NoError(t, err)
return modules{
@@ -188,12 +187,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
&prover.MockClient{Delay: 400 * time.Millisecond},
}
scConsts := &synchronizer.SCConsts{
scConsts := &common.SCConsts{
Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants,
}
initSCVars := &synchronizer.SCVariables{
initSCVars := &common.SCVariables{
Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables,
@@ -206,7 +205,7 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
func newTestSynchronizer(t *testing.T, ethClient *test.Client, ethClientSetup *test.ClientSetup,
modules modules) *synchronizer.Synchronizer {
sync, err := synchronizer.NewSynchronizer(ethClient, modules.historyDB, modules.l2DB, modules.stateDB,
sync, err := synchronizer.NewSynchronizer(ethClient, modules.historyDB, modules.stateDB,
synchronizer.Config{
StatsRefreshPeriod: 0 * time.Second,
})
@@ -529,7 +528,7 @@ func TestCoordinatorStress(t *testing.T) {
coord.SendMsg(ctx, MsgSyncBlock{
Stats: *stats,
Batches: blockData.Rollup.Batches,
Vars: synchronizer.SCVariablesPtr{
Vars: common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,

View File

@@ -22,7 +22,7 @@ import (
type statsVars struct {
Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr
Vars common.SCVariablesPtr
}
type state struct {
@@ -36,7 +36,7 @@ type state struct {
type Pipeline struct {
num int
cfg Config
consts synchronizer.SCConsts
consts common.SCConsts
// state
state state
@@ -57,7 +57,7 @@ type Pipeline struct {
purger *Purger
stats synchronizer.Stats
vars synchronizer.SCVariables
vars common.SCVariables
statsVarsCh chan statsVars
ctx context.Context
@@ -90,7 +90,7 @@ func NewPipeline(ctx context.Context,
coord *Coordinator,
txManager *TxManager,
provers []prover.Client,
scConsts *synchronizer.SCConsts,
scConsts *common.SCConsts,
) (*Pipeline, error) {
proversPool := NewProversPool(len(provers))
proversPoolSize := 0
@@ -124,8 +124,7 @@ func NewPipeline(ctx context.Context,
}
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
vars *synchronizer.SCVariablesPtr) {
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done():
@@ -134,7 +133,7 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
stats *synchronizer.Stats, vars *common.SCVariables) error {
p.state = state{
batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
@@ -195,7 +194,7 @@ func (p *Pipeline) reset(batchNum common.BatchNum,
return nil
}
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&p.vars, vars)
}
@@ -210,7 +209,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
return nil, ctx.Err()
} else if err != nil {
log.Errorw("proversPool.Get", "err", err)
return nil, tracerr.Wrap(err)
return nil, err
}
defer func() {
// If we encounter any error (notice that this function returns
@@ -240,7 +239,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
} else {
log.Errorw("forgeBatch", "err", err)
}
return nil, tracerr.Wrap(err)
return nil, err
}
// 3. Send the ZKInputs to the proof server
@@ -249,14 +248,14 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
return nil, ctx.Err()
} else if err != nil {
log.Errorw("sendServerProof", "err", err)
return nil, tracerr.Wrap(err)
return nil, err
}
return batchInfo, nil
}
// Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
stats *synchronizer.Stats, vars *common.SCVariables) error {
if p.started {
log.Fatal("Pipeline already started")
}
@@ -407,6 +406,11 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
batchInfo.Debug.StartTimestamp = now
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
selectionCfg := &txselector.SelectionConfig{
MaxL1UserTxs: common.RollupConstMaxL1UserTx,
TxProcessorConfig: p.cfg.TxProcessorConfig,
}
var poolL2Txs []common.PoolL2Tx
var discardedL2Txs []common.PoolL2Tx
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
@@ -422,7 +426,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, tracerr.Wrap(errForgeBeforeDelay)
return nil, errForgeBeforeDelay
}
// 1. Decide if we forge L2Tx or L1+L2Tx
@@ -437,14 +441,14 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
return nil, tracerr.Wrap(err)
}
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, l1UserTxs)
p.txSelector.GetL1L2TxSelection(selectionCfg, l1UserTxs)
if err != nil {
return nil, tracerr.Wrap(err)
}
} else {
// 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
p.txSelector.GetL2TxSelection(selectionCfg)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -458,18 +462,17 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the number of unforged L1UserTxs
// (either in a open queue or in a frozen
// not-yet-forged queue).
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
// Query the L1UserTxs in the queue following
// the one we are trying to forge.
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues to be able to
// forge the L1UserTxs in the future.
// Otherwise, skip.
if count == 0 {
// batch to advance the queues and forge the
// L1UserTxs in the future. Otherwise, skip.
if len(nextL1UserTxs) == 0 {
noTxs = true
}
} else {
@@ -480,7 +483,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, tracerr.Wrap(errForgeNoTxsBeforeDelay)
return nil, errForgeNoTxsBeforeDelay
}
}
@@ -496,15 +499,14 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
batchInfo.CoordIdxs = coordIdxs
batchInfo.VerifierIdx = p.cfg.VerifierIdx
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs),
batchInfo.BatchNum); err != nil {
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum); err != nil {
return nil, tracerr.Wrap(err)
}
if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil {
return nil, tracerr.Wrap(err)
}
// Invalidate transactions that become invalid because of
// Invalidate transactions that become invalid beause of
// the poolL2Txs selected. Will mark as invalid the txs that have a
// (fromIdx, nonce) which already appears in the selected txs (includes
// all the nonces smaller than the current one)
@@ -541,8 +543,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
// waitServerProof gets the generated zkProof & sends it to the SmartContract
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
// until not resolved don't continue. Returns when the proof server has calculated the proof
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil {
return tracerr.Wrap(err)
}

View File

@@ -140,7 +140,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("0")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("6860514559199319426609623120853503165917774887908204288119245630904770452486")
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
ethAddTokens(blocks, ethClient)
err = ethClient.CtlAddBlocks(blocks)
@@ -206,11 +206,7 @@ PoolTransfer(0) User2-User3: 300 (126)
require.NoError(t, err)
}
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{
Rollup: *syncSCVars.Rollup,
Auction: *syncSCVars.Auction,
WDelayer: *syncSCVars.WDelayer,
})
err = pipeline.reset(batchNum, syncStats, syncSCVars)
require.NoError(t, err)
// Sanity check
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()

View File

@@ -14,7 +14,7 @@ import (
// PurgerCfg is the purger configuration
type PurgerCfg struct {
// PurgeBatchDelay is the delay between batches to purge outdated
// transactions. Outdated L2Txs are those that have been forged or
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
@@ -23,7 +23,7 @@ type PurgerCfg struct {
// transactions due to nonce lower than the account nonce.
InvalidateBatchDelay int64
// PurgeBlockDelay is the delay between blocks to purge outdated
// transactions. Outdated L2Txs are those that have been forged or
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.

View File

@@ -31,10 +31,10 @@ type TxManager struct {
batchCh chan *BatchInfo
chainID *big.Int
account accounts.Account
consts synchronizer.SCConsts
consts common.SCConsts
stats synchronizer.Stats
vars synchronizer.SCVariables
vars common.SCVariables
statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum
@@ -55,8 +55,7 @@ type TxManager struct {
// NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (
*TxManager, error) {
coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (*TxManager, error) {
chainID, err := ethClient.EthChainID()
if err != nil {
return nil, tracerr.Wrap(err)
@@ -67,7 +66,7 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
}
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, err
}
log.Infow("TxManager started", "nonce", accNonce)
return &TxManager{
@@ -103,8 +102,7 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
}
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats,
vars *synchronizer.SCVariablesPtr) {
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done():
@@ -120,7 +118,7 @@ func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
}
}
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&t.vars, vars)
}
@@ -184,30 +182,19 @@ func addPerc(v *big.Int, p int64) *big.Int {
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percentages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
// If the increase is 0, force it to be 1 so that a gas increase
// doesn't result in the same value, making the transaction to be equal
// than before.
if r.Cmp(big.NewInt(0)) == 0 {
r = big.NewInt(1)
}
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo,
resend bool) error {
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction
var err error
var auth *bind.TransactOpts
auth, err := t.NewAuth(ctx, batchInfo)
if err != nil {
return tracerr.Wrap(err)
}
auth.Nonce = big.NewInt(int64(t.accNextNonce))
if resend {
auth = batchInfo.Auth
auth.GasPrice = addPerc(auth.GasPrice, 10)
} else {
auth, err = t.NewAuth(ctx, batchInfo)
if err != nil {
return tracerr.Wrap(err)
}
batchInfo.Auth = auth
auth.Nonce = big.NewInt(int64(t.accNextNonce))
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
}
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
@@ -278,8 +265,7 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs),
batchInfo.BatchNum); err != nil {
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err)
}
return nil
@@ -311,9 +297,7 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
}
}
if err != nil {
return tracerr.Wrap(
fmt.Errorf("reached max attempts for ethClient.EthTransactionReceipt: %w",
err))
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.EthTransactionReceipt: %w", err))
}
batchInfo.Receipt = receipt
t.cfg.debugBatchStore(batchInfo)
@@ -503,7 +487,7 @@ func (t *TxManager) Run(ctx context.Context) {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and successful or
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{
@@ -568,7 +552,7 @@ func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and successful or
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
next++

View File

@@ -1,8 +1,11 @@
package historydb
import (
"database/sql"
"errors"
"fmt"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
@@ -32,9 +35,18 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
// GetBatchInternalAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow(
hdb.dbRead, batch,
d, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
@@ -180,6 +192,14 @@ func (hdb *HistoryDB) GetBestBidsAPI(
return nil, 0, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
}
func (hdb *HistoryDB) getBestBidsAPI(
d meddler.DB,
minSlotNum, maxSlotNum *int64,
bidderAddr *ethCommon.Address,
limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string
var args []interface{}
// JOIN the best bid of each slot with the latest update of each coordinator
@@ -214,7 +234,7 @@ func (hdb *HistoryDB) GetBestBidsAPI(
}
query = hdb.dbRead.Rebind(queryStr)
bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(hdb.dbRead, &bidPtrs, query, args...); err != nil {
if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
// log.Debug(query)
@@ -697,25 +717,6 @@ func (hdb *HistoryDB) GetExitsAPI(
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
}
// GetBucketUpdatesAPI retrieves latest values for each bucket
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
var bucketUpdates []*BucketUpdateAPI
err = meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
func (hdb *HistoryDB) GetCoordinatorsAPI(
bidderAddr, forgerAddr *ethCommon.Address,
@@ -800,29 +801,6 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
return auctionVars, tracerr.Wrap(err)
}
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
auctionVars := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;
`
err = meddler.QueryAll(hdb.dbRead, &auctionVars, query, slotNum, maxItems)
if err != nil {
return nil, tracerr.Wrap(err)
}
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
}
// GetAccountAPI returns an account by its index
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
@@ -941,125 +919,6 @@ func (hdb *HistoryDB) GetAccountsAPI(
accounts[0].TotalItems - uint64(len(accounts)), nil
}
// GetMetricsAPI returns metrics
func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
metrics := &Metrics{}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT
COALESCE (MIN(batch.batch_num), 0) as batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS min_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS' and batch.batch_num <= $1;`, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) as total_txs
FROM tx WHERE tx.batch_num between $1 AND $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
// Avoid dividing by 0
if seconds == 0 {
seconds++
}
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
} else {
metrics.TransactionsPerBatch = float64(0)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num between $1 and $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
if metricsTotals.TotalBatches > 0 {
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
} else {
metrics.BatchFrequency = 0
}
if metricsTotals.TotalTransactions > 0 {
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
metrics.AvgTransactionFee = 0
}
err = meddler.QueryRow(
hdb.dbRead, metrics,
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
if err != nil {
return nil, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metrics,
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0)
AS estimated_time_to_forge_l1 FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
metricsTotals.FirstBatchNum, lastBatchNum,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
return metrics, nil
}
// GetAvgTxFeeAPI returns average transaction fee of the last 1h
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return 0, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
if err != nil {
return 0, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return 0, tracerr.Wrap(err)
}
var avgTransactionFee float64
if metricsTotals.TotalTransactions > 0 {
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
avgTransactionFee = 0
}
return avgTransactionFee, nil
}
// GetCommonAccountAPI returns the account associated to an account idx
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
cancel, err := hdb.apiConnCon.Acquire()
@@ -1070,8 +929,257 @@ func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, erro
defer hdb.apiConnCon.Release()
account := &common.Account{}
err = meddler.QueryRow(
hdb.dbRead, account, `SELECT idx, token_id, batch_num, bjj, eth_addr
FROM account WHERE idx = $1;`, idx,
hdb.dbRead, account, `SELECT * FROM account WHERE idx = $1;`, idx,
)
return account, tracerr.Wrap(err)
}
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
}
func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
d, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// GetNodeInfoAPI retusnt he NodeInfo
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.GetNodeInfo()
}
// GetBucketUpdatesInternalAPI returns the latest bucket updates
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetNextForgersInternalAPI returns next forgers
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
auctionConsts *common.AuctionConstants,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForgerAPI{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum - 1
nextForger := NextForgerAPI{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
var metrics MetricsAPI
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: lastBatchNum,
}
if err := meddler.QueryRow(
hdb.dbRead, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++
}
metrics.TransactionsPerSecond = float64(nTxs) / seconds
// Set txs/batch
nBatches := p.ToBatchNum - p.FromBatchNum + 1
if nBatches == 0 { // Avoid dividing by 0
nBatches++
}
if (p.ToBatchNum - p.FromBatchNum) > 0 {
fmt.Printf("DBG ntxs: %v, nBatches: %v\n", nTxs, nBatches)
metrics.TransactionsPerBatch = float64(nTxs) /
float64(nBatches)
} else {
metrics.TransactionsPerBatch = 0
}
// Get total fee of that period
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err)
}
// Set batch frequency
metrics.BatchFrequency = seconds / float64(nBatches)
if nTxs > 0 {
metrics.AvgTransactionFee = totalFee / float64(nTxs)
} else {
metrics.AvgTransactionFee = 0
}
// Get and set amount of registered accounts
type registeredAccounts struct {
TotalIdx int64 `meddler:"total_idx"`
TotalBJJ int64 `meddler:"total_bjj"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.TotalAccounts = ra.TotalIdx
metrics.TotalBJJs = ra.TotalBJJ
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil
}
// GetStateAPI returns the StateAPI
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.getStateAPI(hdb.dbRead)
}

View File

@@ -179,7 +179,7 @@ func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error)
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
batchNum,
)
return &batch, tracerr.Wrap(err)
return &batch, err
}
// GetAllBatches retrieve all batches from the DB
@@ -235,7 +235,7 @@ func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
)
return &batch, tracerr.Wrap(err)
return &batch, err
}
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
@@ -486,14 +486,23 @@ func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
}
// GetTokenSymbolsAndAddrs returns all the token symbols and addresses from the DB
func (hdb *HistoryDB) GetTokenSymbolsAndAddrs() ([]TokenSymbolAndAddr, error) {
var tokens []*TokenSymbolAndAddr
err := meddler.QueryAll(
hdb.dbRead, &tokens,
"SELECT symbol, eth_addr FROM token;",
)
return db.SlicePtrsToSlice(tokens).([]TokenSymbolAndAddr), tracerr.Wrap(err)
// GetTokenSymbols returns all the token symbols from the DB
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
var tokenSymbols []string
rows, err := hdb.dbRead.Query("SELECT symbol FROM token;")
if err != nil {
return nil, tracerr.Wrap(err)
}
defer db.RowsClose(rows)
sym := new(string)
for rows.Next() {
err = rows.Scan(sym)
if err != nil {
return nil, tracerr.Wrap(err)
}
tokenSymbols = append(tokenSymbols, *sym)
}
return tokenSymbols, nil
}
// AddAccounts insert accounts into the DB
@@ -754,16 +763,6 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
}
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
// open or frozen queues that are not yet forged)
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) FROM tx WHERE batch_num IS NULL;`,
)
var count int
return count, tracerr.Wrap(row.Scan(&count))
}
// TODO: Think about chaning all the queries that return a last value, to queries that return the next valid value.
// GetLastTxsPosition for a given to_forge_l1_txs_num
@@ -842,6 +841,18 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
}
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
minBidInfo := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;`
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
}
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
if len(tokenExchanges) == 0 {
return nil
@@ -1140,17 +1151,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(txn.Commit())
}
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
hdb.dbRead, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
@@ -1170,3 +1170,49 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
}
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
}
// GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount =
math.Max(avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
return &recommendedFee, nil
}

View File

@@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
historyDBWithACC = NewHistoryDB(db, db, apiConnCon)
// Run tests
result := m.Run()
@@ -720,10 +720,6 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
assert.Equal(t, 5, len(l1UserTxs))
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
count, err := historyDB.GetUnforgedL1UserTxsCount()
require.NoError(t, err)
assert.Equal(t, 5, count)
// No l1UserTxs for this toForgeL1TxsNum
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
require.NoError(t, err)
@@ -1176,7 +1172,7 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err)
}
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
@@ -1254,7 +1250,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
assert.NoError(t, err)
}
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
@@ -1269,13 +1265,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsAPI(0)
assert.NoError(t, err)
}
func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetAvgTxFeeAPI()
_, err := historyDBWithACC.GetMetricsInternalAPI(0)
assert.NoError(t, err)
}
@@ -1464,3 +1454,65 @@ func setTestBlocks(from, to int64) []common.Block {
}
return blocks
}
func TestNodeInfo(t *testing.T) {
test.WipeDB(historyDB.DB())
err := historyDB.SetStateInternalAPI(&StateAPI{})
require.NoError(t, err)
clientSetup := test.NewClientSetupExample()
constants := &Constants{
SCConsts: common.SCConsts{
Rollup: *clientSetup.RollupConstants,
Auction: *clientSetup.AuctionConstants,
WDelayer: *clientSetup.WDelayerConstants,
},
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
}
err = historyDB.SetConstants(constants)
require.NoError(t, err)
// Test parameters
stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{
ForgeDelay: 3.1,
},
Network: NetworkAPI{
LastEthBlock: 12,
LastSyncBlock: 34,
},
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TotalAccounts: 42,
},
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
WithdrawalDelayer: *clientSetup.WDelayerVariables,
RecommendedFee: common.RecommendedFee{
ExistingAccount: 0.15,
},
}
err = historyDB.SetStateInternalAPI(stateAPI)
require.NoError(t, err)
nodeConfig := &NodeConfig{
MaxPoolTxs: 123,
MinFeeUSD: 0.5,
}
err = historyDB.SetNodeConfig(nodeConfig)
require.NoError(t, err)
dbConstants, err := historyDB.GetConstants()
require.NoError(t, err)
assert.Equal(t, constants, dbConstants)
dbNodeConfig, err := historyDB.GetNodeConfig()
require.NoError(t, err)
assert.Equal(t, nodeConfig, dbNodeConfig)
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
require.NoError(t, err)
assert.Equal(t, stateAPI, dbStateAPI)
}

169
db/historydb/nodeinfo.go Normal file
View File

@@ -0,0 +1,169 @@
package historydb
import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Period represents a time period in ethereum
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
// NextForgerAPI represents the next forger exposed via the API
type NextForgerAPI struct {
Coordinator CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// NetworkAPI is the network state exposed via the API
type NetworkAPI struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
}
// NodePublicConfig is the configuration of the node that is exposed via API
type NodePublicConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
Auction AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// Constants contains network constants
type Constants struct {
common.SCConsts
ChainID uint16
HermezAddress ethCommon.Address
}
// NodeConfig contains the node config exposed in the API
type NodeConfig struct {
MaxPoolTxs uint32 `meddler:"max_pool_txs"`
MinFeeUSD float64 `meddler:"min_fee"`
}
// NodeInfo contains information about he node used when serving the API
type NodeInfo struct {
ItemID int `meddler:"item_id,pk"`
StateAPI *StateAPI `meddler:"state,json"`
NodeConfig *NodeConfig `meddler:"config,json"`
Constants *Constants `meddler:"constants,json"`
}
// GetNodeInfo returns the NodeInfo
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
ni := &NodeInfo{}
err := meddler.QueryRow(
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
)
return ni, tracerr.Wrap(err)
}
// GetConstants returns the Constats
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT constants FROM node_info WHERE item_id = 1;",
)
return nodeInfo.Constants, tracerr.Wrap(err)
}
// SetConstants sets the Constants
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
_constants := struct {
Constants *Constants `meddler:"constants,json"`
}{constants}
values, err := meddler.Default.Values(&_constants, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetStateInternalAPI returns the StateAPI
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
return hdb.getStateAPI(hdb.dbRead)
}
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
d, &nodeInfo,
"SELECT state FROM node_info WHERE item_id = 1;",
)
return nodeInfo.StateAPI, tracerr.Wrap(err)
}
// SetStateInternalAPI sets the StateAPI
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
_stateAPI := struct {
StateAPI *StateAPI `meddler:"state,json"`
}{stateAPI}
values, err := meddler.Default.Values(&_stateAPI, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetNodeConfig returns the NodeConfig
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT config FROM node_info WHERE item_id = 1;",
)
return nodeInfo.NodeConfig, tracerr.Wrap(err)
}
// SetNodeConfig sets the NodeConfig
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
_nodeConfig := struct {
NodeConfig *NodeConfig `meddler:"config,json"`
}{nodeConfig}
values, err := meddler.Default.Values(&_nodeConfig, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}

View File

@@ -147,12 +147,6 @@ type txWrite struct {
Nonce *common.Nonce `meddler:"nonce"`
}
// TokenSymbolAndAddr token representation with only Eth addr and symbol
type TokenSymbolAndAddr struct {
Symbol string `meddler:"symbol"`
Addr ethCommon.Address `meddler:"eth_addr"`
}
// TokenWithUSD add USD info to common.Token
type TokenWithUSD struct {
ItemID uint64 `json:"itemId" meddler:"item_id"`
@@ -308,26 +302,15 @@ type BatchAPI struct {
LastItem uint64 `json:"-" meddler:"last_item"`
}
// Metrics define metrics of the network
type Metrics struct {
// MetricsAPI define metrics of the network
type MetricsAPI struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
AvgTransactionFee float64 `json:"avgTransactionFee"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
}
// MetricsTotals is used to get temporal information from HistoryDB
// to calculate data to be stored into the Metrics struct
type MetricsTotals struct {
TotalTransactions uint64 `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
TotalBatches int64 `meddler:"total_batches"`
TotalFeesUSD float64 `meddler:"total_fees"`
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimatedTimeToForgeL1"`
}
// BidAPI is a representation of a bid with additional information
@@ -380,6 +363,27 @@ type RollupVariablesAPI struct {
SafeMode bool `json:"safeMode" meddler:"safe_mode"`
}
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
rollupVars := RollupVariablesAPI{
EthBlockNum: rollupVariables.EthBlockNum,
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
WithdrawalDelay: rollupVariables.WithdrawalDelay,
SafeMode: rollupVariables.SafeMode,
}
for i, bucket := range rollupVariables.Buckets {
rollupVars.Buckets[i] = BucketParamsAPI{
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
}
}
return &rollupVars
}
// AuctionVariablesAPI are the variables of the Auction Smart Contract
type AuctionVariablesAPI struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
@@ -404,3 +408,28 @@ type AuctionVariablesAPI struct {
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
}
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
auctionVars := AuctionVariablesAPI{
EthBlockNum: auctionVariables.EthBlockNum,
DonationAddress: auctionVariables.DonationAddress,
BootCoordinator: auctionVariables.BootCoordinator,
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
Outbidding: auctionVariables.Outbidding,
SlotDeadline: auctionVariables.SlotDeadline,
}
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionVars.AllocationRatio[i] = ratio
}
return &auctionVars
}

View File

@@ -316,7 +316,7 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
// copy synchronizer 'BatchNumX' to 'BatchNumX'
// copy synchronizer'BatchNumX' to 'BatchNumX'
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
return tracerr.Wrap(err)
}
@@ -458,7 +458,7 @@ func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, tracerr.Wrap(err)
return false, err
}
return true, nil
}
@@ -544,12 +544,10 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
// synchronizer to the same batchNum
k.m.Lock()
defer k.m.Unlock()
return PebbleMakeCheckpoint(source, dest)
return pebbleMakeCheckpoint(source, dest)
}
// PebbleMakeCheckpoint is a hepler function to make a pebble checkpoint from
// source to dest.
func PebbleMakeCheckpoint(source, dest string) error {
func pebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); os.IsNotExist(err) {
} else if err != nil {

View File

@@ -74,16 +74,6 @@ func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error
return tracerr.Wrap(err)
}
// AddManyAccountCreationAuth inserts a batch of accounts creation authorization
// if not exist into the DB
func (l2db *L2DB) AddManyAccountCreationAuth(auths []common.AccountCreationAuth) error {
_, err := sqlx.NamedExec(l2db.dbWrite,
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES (:ethaddr, :bjj, :signature)
ON CONFLICT (eth_addr) DO NOTHING`, auths)
return tracerr.Wrap(err)
}
// GetAccountCreationAuth returns an account creation authorization from the DB
func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) {
auth := new(common.AccountCreationAuth)
@@ -320,7 +310,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
return nil
}
// Fill the batch_num in the query with Sprintf because we are using a
// named query which works with slices, and doesn't handle an extra
// named query which works with slices, and doens't handle an extra
// individual argument.
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil {

View File

@@ -38,7 +38,7 @@ func TestMain(m *testing.M) {
panic(err)
}
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, db, nil)
@@ -725,43 +725,6 @@ func TestAuth(t *testing.T) {
}
}
func TestManyAuth(t *testing.T) {
test.WipeDB(l2DB.DB())
const nAuths = 5
chainID := uint16(0)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
// Generate authorizations
genAuths := test.GenAuths(nAuths, chainID, hermezContractAddr)
auths := make([]common.AccountCreationAuth, len(genAuths))
// Convert to a non-pointer slice
for i := 0; i < len(genAuths); i++ {
auths[i] = *genAuths[i]
}
// Add a duplicate one to check the not exist condition
err := l2DB.AddAccountCreationAuth(genAuths[0])
require.NoError(t, err)
// Add to the DB
err = l2DB.AddManyAccountCreationAuth(auths)
require.NoError(t, err)
// Assert the result
for i := 0; i < len(auths); i++ {
// Fetch from DB
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
require.NoError(t, err)
// Check fetched vs generated
assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
assert.Equal(t, auths[i].BJJ, auth.BJJ)
assert.Equal(t, auths[i].Signature, auth.Signature)
assert.Equal(t, auths[i].Timestamp.Unix(), auths[i].Timestamp.Unix())
nameZone, offset := auths[i].Timestamp.Zone()
assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset)
}
}
func TestAddGet(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {

View File

@@ -661,6 +661,16 @@ CREATE TABLE account_creation_auth (
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
);
CREATE TABLE node_info (
item_id SERIAL PRIMARY KEY,
state BYTEA, -- object returned by GET /state
config BYTEA, -- Node config
-- max_pool_txs BIGINT, -- L2DB config
-- min_fee NUMERIC, -- L2DB config
constants BYTEA -- info of the network that is constant
);
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
-- +migrate Down
-- triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
@@ -675,6 +685,7 @@ DROP FUNCTION IF EXISTS set_tx;
DROP FUNCTION IF EXISTS forge_l1_user_txs;
DROP FUNCTION IF EXISTS set_pool_tx;
-- drop tables IF EXISTS
DROP TABLE IF EXISTS node_info;
DROP TABLE IF EXISTS account_creation_auth;
DROP TABLE IF EXISTS tx_pool;
DROP TABLE IF EXISTS auction_vars;

View File

@@ -17,8 +17,7 @@ import (
var (
// ErrStateDBWithoutMT is used when a method that requires a MerkleTree
// is called in a StateDB that does not have a MerkleTree defined
ErrStateDBWithoutMT = errors.New(
"Can not call method to use MerkleTree in a StateDB without MerkleTree")
ErrStateDBWithoutMT = errors.New("Can not call method to use MerkleTree in a StateDB without MerkleTree")
// ErrAccountAlreadyExists is used when CreateAccount is called and the
// Account already exists
@@ -29,8 +28,7 @@ var (
ErrIdxNotFound = errors.New("Idx can not be found")
// ErrGetIdxNoCase is used when trying to get the Idx from EthAddr &
// BJJ with not compatible combination
ErrGetIdxNoCase = errors.New(
"Can not get Idx due unexpected combination of ethereum Address & BabyJubJub PublicKey")
ErrGetIdxNoCase = errors.New("Can not get Idx due unexpected combination of ethereum Address & BabyJubJub PublicKey")
// PrefixKeyIdx is the key prefix for idx in the db
PrefixKeyIdx = []byte("i:")
@@ -146,8 +144,7 @@ func NewStateDB(cfg Config) (*StateDB, error) {
}
}
if cfg.Type == TypeTxSelector && cfg.NLevels != 0 {
return nil, tracerr.Wrap(
fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
return nil, tracerr.Wrap(fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
}
return &StateDB{
@@ -350,8 +347,7 @@ func GetAccountInTreeDB(sto db.Storage, idx common.Idx) (*common.Account, error)
// CreateAccount creates a new Account in the StateDB for the given Idx. If
// StateDB.MT==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof.
func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (
*merkletree.CircomProcessorProof, error) {
func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
cpp, err := CreateAccountInTreeDB(s.db.DB(), s.MT, idx, account)
if err != nil {
return cpp, tracerr.Wrap(err)
@@ -365,8 +361,7 @@ func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (
// from ExitTree. Creates a new Account in the StateDB for the given Idx. If
// StateDB.MT==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof.
func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx,
account *common.Account) (*merkletree.CircomProcessorProof, error) {
func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
// store at the DB the key: v, and value: leaf.Bytes()
v, err := account.HashValue()
if err != nil {
@@ -415,8 +410,7 @@ func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common
// UpdateAccount updates the Account in the StateDB for the given Idx. If
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof.
func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) (
*merkletree.CircomProcessorProof, error) {
func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
return UpdateAccountInTreeDB(s.db.DB(), s.MT, idx, account)
}
@@ -424,8 +418,7 @@ func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) (
// from ExitTree. Updates the Account in the StateDB for the given Idx. If
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof.
func UpdateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx,
account *common.Account) (*merkletree.CircomProcessorProof, error) {
func UpdateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
// store at the DB the key: v, and value: account.Bytes()
v, err := account.HashValue()
if err != nil {
@@ -510,7 +503,7 @@ func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error)
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocalStateDB. If fromSynchronizer is true, it
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {

View File

@@ -22,8 +22,7 @@ import (
func newAccount(t *testing.T, i int) *common.Account {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
require.NoError(t, err)
pk := sk.Public()
@@ -372,8 +371,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal, err := ioutil.TempDir("", "ldb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal))
ldb, err := NewLocalStateDB(Config{Path: dirLocal, Keep: 128, Type: TypeBatchBuilder,
NLevels: 32}, sdb)
ldb, err := NewLocalStateDB(Config{Path: dirLocal, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -394,8 +392,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal2, err := ioutil.TempDir("", "ldb2")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal2))
ldb2, err := NewLocalStateDB(Config{Path: dirLocal2, Keep: 128, Type: TypeBatchBuilder,
NLevels: 32}, sdb)
ldb2, err := NewLocalStateDB(Config{Path: dirLocal2, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -474,8 +471,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
// test value from js version (compatibility-canary)
assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
(hex.EncodeToString(ay0.Bytes())))
assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", (hex.EncodeToString(ay0.Bytes())))
bjjPoint0Comp := babyjub.PackSignY(true, ay0)
bjj0 := babyjub.PublicKeyComp(bjjPoint0Comp)
@@ -534,9 +530,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
require.NoError(t, err)
}
// root value generated by js version:
assert.Equal(t,
"13174362770971232417413036794215823584762073355951212910715422236001731746065",
sdb.MT.Root().BigInt().String())
assert.Equal(t, "17298264051379321456969039521810887093935433569451713402227686942080129181291", sdb.MT.Root().BigInt().String())
}
// TestListCheckpoints performs almost the same test than kvdb/kvdb_test.go

View File

@@ -18,8 +18,7 @@ func concatEthAddrTokenID(addr ethCommon.Address, tokenID common.TokenID) []byte
b = append(b[:], tokenID.Bytes()[:]...)
return b
}
func concatEthAddrBJJTokenID(addr ethCommon.Address, pk babyjub.PublicKeyComp,
tokenID common.TokenID) []byte {
func concatEthAddrBJJTokenID(addr ethCommon.Address, pk babyjub.PublicKeyComp, tokenID common.TokenID) []byte {
pkComp := pk
var b []byte
b = append(b, addr.Bytes()...)
@@ -33,8 +32,7 @@ func concatEthAddrBJJTokenID(addr ethCommon.Address, pk babyjub.PublicKeyComp,
// - key: EthAddr & BabyJubJub PublicKey Compressed, value: idx
// If Idx already exist for the given EthAddr & BJJ, the remaining Idx will be
// always the smallest one.
func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address,
pk babyjub.PublicKeyComp, tokenID common.TokenID) error {
func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address, pk babyjub.PublicKeyComp, tokenID common.TokenID) error {
oldIdx, err := s.GetIdxByEthAddrBJJ(addr, pk, tokenID)
if err == nil {
// EthAddr & BJJ already have an Idx
@@ -42,8 +40,7 @@ func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address,
// if new idx is smaller, store the new one
// if new idx is bigger, don't store and return, as the used one will be the old
if idx >= oldIdx {
log.Debug("StateDB.setIdxByEthAddrBJJ: Idx not stored because there " +
"already exist a smaller Idx for the given EthAddr & BJJ")
log.Debug("StateDB.setIdxByEthAddrBJJ: Idx not stored because there already exist a smaller Idx for the given EthAddr & BJJ")
return nil
}
}
@@ -83,8 +80,7 @@ func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address,
// GetIdxByEthAddr returns the smallest Idx in the StateDB for the given
// Ethereum Address. Will return common.Idx(0) and error in case that Idx is
// not found in the StateDB.
func (s *StateDB) GetIdxByEthAddr(addr ethCommon.Address, tokenID common.TokenID) (common.Idx,
error) {
func (s *StateDB) GetIdxByEthAddr(addr ethCommon.Address, tokenID common.TokenID) (common.Idx, error) {
k := concatEthAddrTokenID(addr, tokenID)
b, err := s.db.DB().Get(append(PrefixKeyAddr, k...))
if err != nil {
@@ -120,22 +116,18 @@ func (s *StateDB) GetIdxByEthAddrBJJ(addr ethCommon.Address, pk babyjub.PublicKe
return common.Idx(0), tracerr.Wrap(ErrIdxNotFound)
} else if err != nil {
return common.Idx(0),
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d",
ErrIdxNotFound, addr.Hex(), pk, tokenID))
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", ErrIdxNotFound, addr.Hex(), pk, tokenID))
}
idx, err := common.IdxFromBytes(b)
if err != nil {
return common.Idx(0),
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d",
err, addr.Hex(), pk, tokenID))
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", err, addr.Hex(), pk, tokenID))
}
return idx, nil
}
// rest of cases (included case ToEthAddr==0) are not possible
return common.Idx(0),
tracerr.Wrap(
fmt.Errorf("GetIdxByEthAddrBJJ: Not found, %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d",
ErrGetIdxNoCase, addr.Hex(), pk, tokenID))
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: Not found, %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", ErrGetIdxNoCase, addr.Hex(), pk, tokenID))
}
// GetTokenIDsFromIdxs returns a map containing the common.TokenID with its
@@ -145,9 +137,7 @@ func (s *StateDB) GetTokenIDsFromIdxs(idxs []common.Idx) (map[common.TokenID]com
for i := 0; i < len(idxs); i++ {
a, err := s.GetAccount(idxs[i])
if err != nil {
return nil,
tracerr.Wrap(fmt.Errorf("GetTokenIDsFromIdxs error on GetAccount with Idx==%d: %s",
idxs[i], err.Error()))
return nil, tracerr.Wrap(fmt.Errorf("GetTokenIDsFromIdxs error on GetAccount with Idx==%d: %s", idxs[i], err.Error()))
}
m[a.TokenID] = idxs[i]
}

View File

@@ -93,8 +93,8 @@ type APIConnectionController struct {
timeout time.Duration
}
// NewAPIConnectionController initialize APIConnectionController
func NewAPIConnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
// NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{
smphr: semaphore.NewWeighted(int64(maxConnections)),
timeout: timeout,

View File

@@ -70,8 +70,7 @@ type AuctionEventInitialize struct {
}
// AuctionVariables returns the AuctionVariables from the initialize event
func (ei *AuctionEventInitialize) AuctionVariables(
InitialMinimalBidding *big.Int) *common.AuctionVariables {
func (ei *AuctionEventInitialize) AuctionVariables(InitialMinimalBidding *big.Int) *common.AuctionVariables {
return &common.AuctionVariables{
EthBlockNum: 0,
DonationAddress: ei.DonationAddress,
@@ -223,15 +222,12 @@ type AuctionInterface interface {
AuctionGetAllocationRatio() ([3]uint16, error)
AuctionSetDonationAddress(newDonationAddress ethCommon.Address) (*types.Transaction, error)
AuctionGetDonationAddress() (*ethCommon.Address, error)
AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address,
newBootCoordinatorURL string) (*types.Transaction, error)
AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, newBootCoordinatorURL string) (*types.Transaction, error)
AuctionGetBootCoordinator() (*ethCommon.Address, error)
AuctionChangeDefaultSlotSetBid(slotSet int64,
newInitialMinBid *big.Int) (*types.Transaction, error)
AuctionChangeDefaultSlotSetBid(slotSet int64, newInitialMinBid *big.Int) (*types.Transaction, error)
// Coordinator Management
AuctionSetCoordinator(forger ethCommon.Address, coordinatorURL string) (*types.Transaction,
error)
AuctionSetCoordinator(forger ethCommon.Address, coordinatorURL string) (*types.Transaction, error)
// Slot Info
AuctionGetSlotNumber(blockNum int64) (int64, error)
@@ -241,8 +237,7 @@ type AuctionInterface interface {
AuctionGetSlotSet(slot int64) (*big.Int, error)
// Bidding
AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, deadline *big.Int) (
tx *types.Transaction, err error)
AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, deadline *big.Int) (tx *types.Transaction, err error)
AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64, slotSets [6]bool,
maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error)
@@ -280,10 +275,8 @@ type AuctionClient struct {
}
// NewAuctionClient creates a new AuctionClient. `tokenAddress` is the address of the HEZ tokens.
func NewAuctionClient(client *EthereumClient, address ethCommon.Address,
tokenHEZCfg TokenConfig) (*AuctionClient, error) {
contractAbi, err :=
abi.JSON(strings.NewReader(string(HermezAuctionProtocol.HermezAuctionProtocolABI)))
func NewAuctionClient(client *EthereumClient, address ethCommon.Address, tokenHEZCfg TokenConfig) (*AuctionClient, error) {
contractAbi, err := abi.JSON(strings.NewReader(string(HermezAuctionProtocol.HermezAuctionProtocolABI)))
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -338,8 +331,7 @@ func (c *AuctionClient) AuctionGetSlotDeadline() (slotDeadline uint8, err error)
}
// AuctionSetOpenAuctionSlots is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetOpenAuctionSlots(
newOpenAuctionSlots uint16) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionSetOpenAuctionSlots(newOpenAuctionSlots uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -363,8 +355,7 @@ func (c *AuctionClient) AuctionGetOpenAuctionSlots() (openAuctionSlots uint16, e
}
// AuctionSetClosedAuctionSlots is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetClosedAuctionSlots(
newClosedAuctionSlots uint16) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionSetClosedAuctionSlots(newClosedAuctionSlots uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -388,8 +379,7 @@ func (c *AuctionClient) AuctionGetClosedAuctionSlots() (closedAuctionSlots uint1
}
// AuctionSetOutbidding is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetOutbidding(newOutbidding uint16) (tx *types.Transaction,
err error) {
func (c *AuctionClient) AuctionSetOutbidding(newOutbidding uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
12500000, //nolint:gomnd
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -413,8 +403,7 @@ func (c *AuctionClient) AuctionGetOutbidding() (outbidding uint16, err error) {
}
// AuctionSetAllocationRatio is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetAllocationRatio(
newAllocationRatio [3]uint16) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionSetAllocationRatio(newAllocationRatio [3]uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -438,8 +427,7 @@ func (c *AuctionClient) AuctionGetAllocationRatio() (allocationRation [3]uint16,
}
// AuctionSetDonationAddress is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetDonationAddress(
newDonationAddress ethCommon.Address) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionSetDonationAddress(newDonationAddress ethCommon.Address) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -452,8 +440,7 @@ func (c *AuctionClient) AuctionSetDonationAddress(
}
// AuctionGetDonationAddress is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetDonationAddress() (donationAddress *ethCommon.Address,
err error) {
func (c *AuctionClient) AuctionGetDonationAddress() (donationAddress *ethCommon.Address, err error) {
var _donationAddress ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error {
_donationAddress, err = c.auction.GetDonationAddress(c.opts)
@@ -465,13 +452,11 @@ func (c *AuctionClient) AuctionGetDonationAddress() (donationAddress *ethCommon.
}
// AuctionSetBootCoordinator is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address,
newBootCoordinatorURL string) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, newBootCoordinatorURL string) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
return c.auction.SetBootCoordinator(auth, newBootCoordinator,
newBootCoordinatorURL)
return c.auction.SetBootCoordinator(auth, newBootCoordinator, newBootCoordinatorURL)
},
); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed setting bootCoordinator: %w", err))
@@ -480,8 +465,7 @@ func (c *AuctionClient) AuctionSetBootCoordinator(newBootCoordinator ethCommon.A
}
// AuctionGetBootCoordinator is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetBootCoordinator() (bootCoordinator *ethCommon.Address,
err error) {
func (c *AuctionClient) AuctionGetBootCoordinator() (bootCoordinator *ethCommon.Address, err error) {
var _bootCoordinator ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error {
_bootCoordinator, err = c.auction.GetBootCoordinator(c.opts)
@@ -493,8 +477,7 @@ func (c *AuctionClient) AuctionGetBootCoordinator() (bootCoordinator *ethCommon.
}
// AuctionChangeDefaultSlotSetBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionChangeDefaultSlotSetBid(slotSet int64,
newInitialMinBid *big.Int) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionChangeDefaultSlotSetBid(slotSet int64, newInitialMinBid *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -508,8 +491,7 @@ func (c *AuctionClient) AuctionChangeDefaultSlotSetBid(slotSet int64,
}
// AuctionGetClaimableHEZ is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetClaimableHEZ(
claimAddress ethCommon.Address) (claimableHEZ *big.Int, err error) {
func (c *AuctionClient) AuctionGetClaimableHEZ(claimAddress ethCommon.Address) (claimableHEZ *big.Int, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error {
claimableHEZ, err = c.auction.GetClaimableHEZ(c.opts, claimAddress)
return tracerr.Wrap(err)
@@ -520,8 +502,7 @@ func (c *AuctionClient) AuctionGetClaimableHEZ(
}
// AuctionSetCoordinator is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetCoordinator(forger ethCommon.Address,
coordinatorURL string) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionSetCoordinator(forger ethCommon.Address, coordinatorURL string) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -570,8 +551,7 @@ func (c *AuctionClient) AuctionGetSlotSet(slot int64) (slotSet *big.Int, err err
}
// AuctionGetDefaultSlotSetBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetDefaultSlotSetBid(slotSet uint8) (minBidSlotSet *big.Int,
err error) {
func (c *AuctionClient) AuctionGetDefaultSlotSetBid(slotSet uint8) (minBidSlotSet *big.Int, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error {
minBidSlotSet, err = c.auction.GetDefaultSlotSetBid(c.opts, slotSet)
return tracerr.Wrap(err)
@@ -594,8 +574,7 @@ func (c *AuctionClient) AuctionGetSlotNumber(blockNum int64) (slot int64, err er
}
// AuctionBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int,
deadline *big.Int) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -607,8 +586,7 @@ func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.I
}
tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID,
amount, nonce, deadline, tokenName)
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, amount, deadline, digest, signature)
_slot := big.NewInt(slot)
@@ -621,8 +599,8 @@ func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.I
}
// AuctionMultiBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64,
slotSets [6]bool, maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error) {
func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64, slotSets [6]bool,
maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
1000000, //nolint:gomnd
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -635,14 +613,12 @@ func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlo
tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID,
amount, nonce, deadline, tokenName)
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, amount, deadline, digest, signature)
_startingSlot := big.NewInt(startingSlot)
_endingSlot := big.NewInt(endingSlot)
return c.auction.ProcessMultiBid(auth, amount, _startingSlot, _endingSlot,
slotSets, maxBid, minBid, permit)
return c.auction.ProcessMultiBid(auth, amount, _startingSlot, _endingSlot, slotSets, maxBid, minBid, permit)
},
); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed multibid: %w", err))
@@ -651,8 +627,7 @@ func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlo
}
// AuctionCanForge is the interface to call the smart contract function
func (c *AuctionClient) AuctionCanForge(forger ethCommon.Address, blockNum int64) (canForge bool,
err error) {
func (c *AuctionClient) AuctionCanForge(forger ethCommon.Address, blockNum int64) (canForge bool, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error {
canForge, err = c.auction.CanForge(c.opts, forger, big.NewInt(blockNum))
return tracerr.Wrap(err)
@@ -705,8 +680,7 @@ func (c *AuctionClient) AuctionConstants() (auctionConstants *common.AuctionCons
if err != nil {
return tracerr.Wrap(err)
}
auctionConstants.InitialMinimalBidding, err =
c.auction.INITIALMINIMALBIDDING(c.opts)
auctionConstants.InitialMinimalBidding, err = c.auction.INITIALMINIMALBIDDING(c.opts)
if err != nil {
return tracerr.Wrap(err)
}
@@ -777,35 +751,21 @@ func (c *AuctionClient) AuctionVariables() (auctionVariables *common.AuctionVari
}
var (
logAuctionNewBid = crypto.Keccak256Hash([]byte(
"NewBid(uint128,uint128,address)"))
logAuctionNewSlotDeadline = crypto.Keccak256Hash([]byte(
"NewSlotDeadline(uint8)"))
logAuctionNewClosedAuctionSlots = crypto.Keccak256Hash([]byte(
"NewClosedAuctionSlots(uint16)"))
logAuctionNewOutbidding = crypto.Keccak256Hash([]byte(
"NewOutbidding(uint16)"))
logAuctionNewDonationAddress = crypto.Keccak256Hash([]byte(
"NewDonationAddress(address)"))
logAuctionNewBootCoordinator = crypto.Keccak256Hash([]byte(
"NewBootCoordinator(address,string)"))
logAuctionNewOpenAuctionSlots = crypto.Keccak256Hash([]byte(
"NewOpenAuctionSlots(uint16)"))
logAuctionNewAllocationRatio = crypto.Keccak256Hash([]byte(
"NewAllocationRatio(uint16[3])"))
logAuctionSetCoordinator = crypto.Keccak256Hash([]byte(
"SetCoordinator(address,address,string)"))
logAuctionNewForgeAllocated = crypto.Keccak256Hash([]byte(
"NewForgeAllocated(address,address,uint128,uint128,uint128,uint128)"))
logAuctionNewDefaultSlotSetBid = crypto.Keccak256Hash([]byte(
"NewDefaultSlotSetBid(uint128,uint128)"))
logAuctionNewForge = crypto.Keccak256Hash([]byte(
"NewForge(address,uint128)"))
logAuctionHEZClaimed = crypto.Keccak256Hash([]byte(
"HEZClaimed(address,uint128)"))
logAuctionInitialize = crypto.Keccak256Hash([]byte(
"InitializeHermezAuctionProtocolEvent(address,address,string," +
"uint16,uint8,uint16,uint16,uint16[3])"))
logAuctionNewBid = crypto.Keccak256Hash([]byte("NewBid(uint128,uint128,address)"))
logAuctionNewSlotDeadline = crypto.Keccak256Hash([]byte("NewSlotDeadline(uint8)"))
logAuctionNewClosedAuctionSlots = crypto.Keccak256Hash([]byte("NewClosedAuctionSlots(uint16)"))
logAuctionNewOutbidding = crypto.Keccak256Hash([]byte("NewOutbidding(uint16)"))
logAuctionNewDonationAddress = crypto.Keccak256Hash([]byte("NewDonationAddress(address)"))
logAuctionNewBootCoordinator = crypto.Keccak256Hash([]byte("NewBootCoordinator(address,string)"))
logAuctionNewOpenAuctionSlots = crypto.Keccak256Hash([]byte("NewOpenAuctionSlots(uint16)"))
logAuctionNewAllocationRatio = crypto.Keccak256Hash([]byte("NewAllocationRatio(uint16[3])"))
logAuctionSetCoordinator = crypto.Keccak256Hash([]byte("SetCoordinator(address,address,string)"))
logAuctionNewForgeAllocated = crypto.Keccak256Hash([]byte("NewForgeAllocated(address,address,uint128,uint128,uint128,uint128)"))
logAuctionNewDefaultSlotSetBid = crypto.Keccak256Hash([]byte("NewDefaultSlotSetBid(uint128,uint128)"))
logAuctionNewForge = crypto.Keccak256Hash([]byte("NewForge(address,uint128)"))
logAuctionHEZClaimed = crypto.Keccak256Hash([]byte("HEZClaimed(address,uint128)"))
logAuctionInitialize = crypto.Keccak256Hash([]byte(
"InitializeHermezAuctionProtocolEvent(address,address,string,uint16,uint8,uint16,uint16,uint16[3])"))
)
// AuctionEventInit returns the initialize event with its corresponding block number
@@ -821,8 +781,7 @@ func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, erro
return nil, 0, tracerr.Wrap(err)
}
if len(logs) != 1 {
return nil, 0,
tracerr.Wrap(fmt.Errorf("no event of type InitializeHermezAuctionProtocolEvent found"))
return nil, 0, tracerr.Wrap(fmt.Errorf("no event of type InitializeHermezAuctionProtocolEvent found"))
}
vLog := logs[0]
if vLog.Topics[0] != logAuctionInitialize {
@@ -870,8 +829,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got",
vLog.BlockHash.String())
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
}
switch vLog.Topics[0] {
@@ -882,8 +840,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
Address ethCommon.Address
}
var newBid AuctionEventNewBid
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid",
vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
newBid.BidAmount = auxNewBid.BidAmount
@@ -892,60 +849,48 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
auctionEvents.NewBid = append(auctionEvents.NewBid, newBid)
case logAuctionNewSlotDeadline:
var newSlotDeadline AuctionEventNewSlotDeadline
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline,
"NewSlotDeadline", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, "NewSlotDeadline", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline)
case logAuctionNewClosedAuctionSlots:
var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots,
"NewClosedAuctionSlots", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, "NewClosedAuctionSlots", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
auctionEvents.NewClosedAuctionSlots =
append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
auctionEvents.NewClosedAuctionSlots = append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
case logAuctionNewOutbidding:
var newOutbidding AuctionEventNewOutbidding
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding",
vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding)
case logAuctionNewDonationAddress:
var newDonationAddress AuctionEventNewDonationAddress
newDonationAddress.NewDonationAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.NewDonationAddress = append(auctionEvents.NewDonationAddress,
newDonationAddress)
auctionEvents.NewDonationAddress = append(auctionEvents.NewDonationAddress, newDonationAddress)
case logAuctionNewBootCoordinator:
var newBootCoordinator AuctionEventNewBootCoordinator
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator,
"NewBootCoordinator", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, "NewBootCoordinator", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator,
newBootCoordinator)
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, newBootCoordinator)
case logAuctionNewOpenAuctionSlots:
var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots,
"NewOpenAuctionSlots", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, "NewOpenAuctionSlots", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
auctionEvents.NewOpenAuctionSlots =
append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
auctionEvents.NewOpenAuctionSlots = append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
case logAuctionNewAllocationRatio:
var newAllocationRatio AuctionEventNewAllocationRatio
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio,
"NewAllocationRatio", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, "NewAllocationRatio", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio,
newAllocationRatio)
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, newAllocationRatio)
case logAuctionSetCoordinator:
var setCoordinator AuctionEventSetCoordinator
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator,
"SetCoordinator", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, "SetCoordinator", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
@@ -953,29 +898,25 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
auctionEvents.SetCoordinator = append(auctionEvents.SetCoordinator, setCoordinator)
case logAuctionNewForgeAllocated:
var newForgeAllocated AuctionEventNewForgeAllocated
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated,
"NewForgeAllocated", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, "NewForgeAllocated", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
newForgeAllocated.SlotToForge = new(big.Int).SetBytes(vLog.Topics[3][:]).Int64()
auctionEvents.NewForgeAllocated = append(auctionEvents.NewForgeAllocated,
newForgeAllocated)
auctionEvents.NewForgeAllocated = append(auctionEvents.NewForgeAllocated, newForgeAllocated)
case logAuctionNewDefaultSlotSetBid:
var auxNewDefaultSlotSetBid struct {
SlotSet *big.Int
NewInitialMinBid *big.Int
}
var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid,
"NewDefaultSlotSetBid", vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, "NewDefaultSlotSetBid", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid
newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64()
auctionEvents.NewDefaultSlotSetBid =
append(auctionEvents.NewDefaultSlotSetBid, newDefaultSlotSetBid)
auctionEvents.NewDefaultSlotSetBid = append(auctionEvents.NewDefaultSlotSetBid, newDefaultSlotSetBid)
case logAuctionNewForge:
var newForge AuctionEventNewForge
newForge.Forger = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
@@ -983,8 +924,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
auctionEvents.NewForge = append(auctionEvents.NewForge, newForge)
case logAuctionHEZClaimed:
var HEZClaimed AuctionEventHEZClaimed
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed",
vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
}
HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())

View File

@@ -58,8 +58,7 @@ func TestAuctionConstants(t *testing.T) {
func TestAuctionVariables(t *testing.T) {
INITMINBID := new(big.Int)
INITMINBID.SetString(minBidStr, 10)
defaultSlotSetBid := [6]*big.Int{INITMINBID, INITMINBID, INITMINBID, INITMINBID, INITMINBID,
INITMINBID}
defaultSlotSetBid := [6]*big.Int{INITMINBID, INITMINBID, INITMINBID, INITMINBID, INITMINBID, INITMINBID}
auctionVariables, err := auctionClientTest.AuctionVariables()
require.Nil(t, err)
@@ -133,8 +132,7 @@ func TestAuctionSetClosedAuctionSlots(t *testing.T) {
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
require.Nil(t, err)
assert.Equal(t, newClosedAuctionSlots,
auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
require.Nil(t, err)
}
@@ -230,8 +228,7 @@ func TestAuctionSetBootCoordinator(t *testing.T) {
require.Nil(t, err)
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL)
_, err = auctionClientTest.AuctionSetBootCoordinator(bootCoordinatorAddressConst,
bootCoordinatorURL)
_, err = auctionClientTest.AuctionSetBootCoordinator(bootCoordinatorAddressConst, bootCoordinatorURL)
require.Nil(t, err)
}
@@ -345,8 +342,7 @@ func TestAuctionMultiBid(t *testing.T) {
budget := new(big.Int)
budget.SetString("45200000000000000000", 10)
bidderAddress := governanceAddressConst
_, err = auctionClientTest.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet,
maxBid, minBid, deadline)
_, err = auctionClientTest.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, maxBid, minBid, deadline)
require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
@@ -387,8 +383,7 @@ func TestAuctionClaimHEZ(t *testing.T) {
}
func TestAuctionForge(t *testing.T) {
auctionClientTestHermez, err := NewAuctionClient(ethereumClientHermez,
auctionTestAddressConst, tokenHEZ)
auctionClientTestHermez, err := NewAuctionClient(ethereumClientHermez, auctionTestAddressConst, tokenHEZ)
require.Nil(t, err)
slotConst := 4
blockNum := int64(int(blocksPerSlot)*slotConst + int(genesisBlock))

View File

@@ -64,19 +64,16 @@ type ClientConfig struct {
}
// NewClient creates a new Client to interact with Ethereum and the Hermez smart contracts.
func NewClient(client *ethclient.Client, account *accounts.Account, ks *ethKeystore.KeyStore,
cfg *ClientConfig) (*Client, error) {
func NewClient(client *ethclient.Client, account *accounts.Account, ks *ethKeystore.KeyStore, cfg *ClientConfig) (*Client, error) {
ethereumClient, err := NewEthereumClient(client, account, ks, &cfg.Ethereum)
if err != nil {
return nil, tracerr.Wrap(err)
}
auctionClient, err := NewAuctionClient(ethereumClient, cfg.Auction.Address,
cfg.Auction.TokenHEZ)
auctionClient, err := NewAuctionClient(ethereumClient, cfg.Auction.Address, cfg.Auction.TokenHEZ)
if err != nil {
return nil, tracerr.Wrap(err)
}
rollupClient, err := NewRollupClient(ethereumClient, cfg.Rollup.Address,
cfg.Auction.TokenHEZ)
rollupClient, err := NewRollupClient(ethereumClient, cfg.Rollup.Address, cfg.Auction.TokenHEZ)
if err != nil {
return nil, tracerr.Wrap(err)
}

View File

@@ -64,8 +64,7 @@ type EthereumConfig struct {
GasPriceDiv uint64
}
// EthereumClient is an ethereum client to call Smart Contract methods and check blockchain
// information.
// EthereumClient is an ethereum client to call Smart Contract methods and check blockchain information.
type EthereumClient struct {
client *ethclient.Client
chainID *big.Int
@@ -77,8 +76,7 @@ type EthereumClient struct {
// NewEthereumClient creates a EthereumClient instance. The account is not mandatory (it can
// be nil). If the account is nil, CallAuth will fail with ErrAccountNil.
func NewEthereumClient(client *ethclient.Client, account *accounts.Account,
ks *ethKeystore.KeyStore, config *EthereumConfig) (*EthereumClient, error) {
func NewEthereumClient(client *ethclient.Client, account *accounts.Account, ks *ethKeystore.KeyStore, config *EthereumConfig) (*EthereumClient, error) {
if config == nil {
config = &EthereumConfig{
CallGasLimit: defaultCallGasLimit,
@@ -168,8 +166,7 @@ func (c *EthereumClient) NewAuth() (*bind.TransactOpts, error) {
// This call requires a valid account with Ether that can be spend during the
// call.
func (c *EthereumClient) CallAuth(gasLimit uint64,
fn func(*ethclient.Client, *bind.TransactOpts) (*types.Transaction, error)) (*types.Transaction,
error) {
fn func(*ethclient.Client, *bind.TransactOpts) (*types.Transaction, error)) (*types.Transaction, error) {
if c.account == nil {
return nil, tracerr.Wrap(ErrAccountNil)
}
@@ -215,8 +212,7 @@ func (c *EthereumClient) Call(fn func(*ethclient.Client) error) error {
}
// EthTransactionReceipt returns the transaction receipt of the given txHash
func (c *EthereumClient) EthTransactionReceipt(ctx context.Context,
txHash ethCommon.Hash) (*types.Receipt, error) {
func (c *EthereumClient) EthTransactionReceipt(ctx context.Context, txHash ethCommon.Hash) (*types.Receipt, error) {
return c.client.TransactionReceipt(ctx, txHash)
}
@@ -232,15 +228,13 @@ func (c *EthereumClient) EthLastBlock() (int64, error) {
}
// EthHeaderByNumber internally calls ethclient.Client HeaderByNumber
// func (c *EthereumClient) EthHeaderByNumber(ctx context.Context, number *big.Int) (*types.Header,
// error) {
// func (c *EthereumClient) EthHeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
// return c.client.HeaderByNumber(ctx, number)
// }
// EthBlockByNumber internally calls ethclient.Client BlockByNumber and returns
// *common.Block. If number == -1, the latests known block is returned.
func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*common.Block,
error) {
func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*common.Block, error) {
blockNum := big.NewInt(number)
if number == -1 {
blockNum = nil

View File

@@ -14,8 +14,7 @@ import (
func addBlock(url string) {
method := "POST"
payload := strings.NewReader(
"{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_mine\",\n \"params\":[],\n \"id\":1\n}")
payload := strings.NewReader("{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_mine\",\n \"params\":[],\n \"id\":1\n}")
client := &http.Client{}
req, err := http.NewRequest(method, url, payload)
@@ -46,9 +45,7 @@ func addTime(seconds float64, url string) {
secondsStr := strconv.FormatFloat(seconds, 'E', -1, 32)
method := "POST"
payload := strings.NewReader(
"{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_increaseTime\",\n \"params\":[" +
secondsStr + "],\n \"id\":1\n}")
payload := strings.NewReader("{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_increaseTime\",\n \"params\":[" + secondsStr + "],\n \"id\":1\n}")
client := &http.Client{}
req, err := http.NewRequest(method, url, payload)
@@ -69,16 +66,13 @@ func addTime(seconds float64, url string) {
}()
}
func createPermitDigest(tokenAddr, owner, spender ethCommon.Address, chainID, value, nonce,
deadline *big.Int, tokenName string) ([]byte, error) {
func createPermitDigest(tokenAddr, owner, spender ethCommon.Address, chainID, value, nonce, deadline *big.Int, tokenName string) ([]byte, error) {
// NOTE: We ignore hash.Write errors because we are writing to a memory
// buffer and don't expect any errors to occur.
abiPermit :=
[]byte("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)")
abiPermit := []byte("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)")
hashPermit := sha3.NewLegacyKeccak256()
hashPermit.Write(abiPermit) //nolint:errcheck,gosec
abiEIP712Domain :=
[]byte("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)")
abiEIP712Domain := []byte("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)")
hashEIP712Domain := sha3.NewLegacyKeccak256()
hashEIP712Domain.Write(abiEIP712Domain) //nolint:errcheck,gosec
var encodeBytes []byte
@@ -130,8 +124,7 @@ func createPermitDigest(tokenAddr, owner, spender ethCommon.Address, chainID, va
return hashBytes2.Sum(nil), nil
}
func createPermit(owner, spender ethCommon.Address, amount, deadline *big.Int, digest,
signature []byte) []byte {
func createPermit(owner, spender ethCommon.Address, amount, deadline *big.Int, digest, signature []byte) []byte {
r := signature[0:32]
s := signature[32:64]
v := signature[64] + byte(27) //nolint:gomnd

View File

@@ -26,8 +26,7 @@ var (
mnemonic = "explain tackle mirror kit van hammer degree position ginger unfair soup bonus"
)
func genAcc(w *hdwallet.Wallet, ks *keystore.KeyStore, i int) (*accounts.Account,
ethCommon.Address) {
func genAcc(w *hdwallet.Wallet, ks *keystore.KeyStore, i int) (*accounts.Account, ethCommon.Address) {
path := hdwallet.MustParseDerivationPath(fmt.Sprintf("m/44'/60'/0'/0/%d", i))
account, err := w.Derive(path, false)
if err != nil {
@@ -112,9 +111,7 @@ func getEnvVariables() {
if err != nil {
log.Fatal(errEnvVar)
}
if auctionAddressStr == "" || auctionTestAddressStr == "" || tokenHEZAddressStr == "" ||
hermezRollupAddressStr == "" || wdelayerAddressStr == "" || wdelayerTestAddressStr == "" ||
genesisBlockEnv == "" {
if auctionAddressStr == "" || auctionTestAddressStr == "" || tokenHEZAddressStr == "" || hermezRollupAddressStr == "" || wdelayerAddressStr == "" || wdelayerTestAddressStr == "" || genesisBlockEnv == "" {
log.Fatal(errEnvVar)
}
@@ -192,8 +189,7 @@ func TestMain(m *testing.M) {
log.Fatal(err)
}
ethereumClientEmergencyCouncil, err = NewEthereumClient(ethClient,
emergencyCouncilAccount, ks, nil)
ethereumClientEmergencyCouncil, err = NewEthereumClient(ethClient, emergencyCouncilAccount, ks, nil)
if err != nil {
log.Fatal(err)
}

View File

@@ -243,20 +243,13 @@ type RollupInterface interface {
// Public Functions
RollupForgeBatch(*RollupForgeBatchArgs, *bind.TransactOpts) (*types.Transaction, error)
RollupAddToken(tokenAddress ethCommon.Address, feeAddToken,
deadline *big.Int) (*types.Transaction, error)
RollupAddToken(tokenAddress ethCommon.Address, feeAddToken, deadline *big.Int) (*types.Transaction, error)
RollupWithdrawMerkleProof(babyPubKey babyjub.PublicKeyComp, tokenID uint32, numExitRoot,
idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (*types.Transaction,
error)
RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32,
numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error)
RollupWithdrawMerkleProof(babyPubKey babyjub.PublicKeyComp, tokenID uint32, numExitRoot, idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (*types.Transaction, error)
RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error)
RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int,
amount *big.Int, tokenID uint32, toIdx int64) (*types.Transaction, error)
RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64,
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64,
deadline *big.Int) (tx *types.Transaction, err error)
RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (*types.Transaction, error)
RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64, deadline *big.Int) (tx *types.Transaction, err error)
// Governance Public Functions
RollupUpdateForgeL1L2BatchTimeout(newForgeL1L2BatchTimeout int64) (*types.Transaction, error)
@@ -294,8 +287,7 @@ type RollupClient struct {
}
// NewRollupClient creates a new RollupClient
func NewRollupClient(client *EthereumClient, address ethCommon.Address,
tokenHEZCfg TokenConfig) (*RollupClient, error) {
func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZCfg TokenConfig) (*RollupClient, error) {
contractAbi, err := abi.JSON(strings.NewReader(string(Hermez.HermezABI)))
if err != nil {
return nil, tracerr.Wrap(err)
@@ -331,8 +323,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address,
}
// RollupForgeBatch is the interface to call the smart contract function
func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs,
auth *bind.TransactOpts) (tx *types.Transaction, err error) {
func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.TransactOpts) (tx *types.Transaction, err error) {
if auth == nil {
auth, err = c.client.NewAuth()
if err != nil {
@@ -410,8 +401,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs,
// RollupAddToken is the interface to call the smart contract function.
// `feeAddToken` is the amount of HEZ tokens that will be paid to add the
// token. `feeAddToken` must match the public value of the smart contract.
func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToken,
deadline *big.Int) (tx *types.Transaction, err error) {
func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToken, deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -423,11 +413,9 @@ func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToke
}
tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID,
feeAddToken, nonce, deadline, tokenName)
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, feeAddToken, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, feeAddToken, deadline, digest,
signature)
permit := createPermit(owner, spender, feeAddToken, deadline, digest, signature)
return c.hermez.AddToken(auth, tokenAddress, permit)
},
@@ -438,9 +426,7 @@ func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToke
}
// RollupWithdrawMerkleProof is the interface to call the smart contract function
func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp, tokenID uint32,
numExitRoot, idx int64, amount *big.Int, siblings []*big.Int,
instantWithdraw bool) (tx *types.Transaction, err error) {
func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp, tokenID uint32, numExitRoot, idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -448,8 +434,7 @@ func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp,
babyPubKey := new(big.Int).SetBytes(pkCompB)
numExitRootB := uint32(numExitRoot)
idxBig := big.NewInt(idx)
return c.hermez.WithdrawMerkleProof(auth, tokenID, amount, babyPubKey,
numExitRootB, siblings, idxBig, instantWithdraw)
return c.hermez.WithdrawMerkleProof(auth, tokenID, amount, babyPubKey, numExitRootB, siblings, idxBig, instantWithdraw)
},
); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed update WithdrawMerkleProof: %w", err))
@@ -458,17 +443,13 @@ func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp,
}
// RollupWithdrawCircuit is the interface to call the smart contract function
func (c *RollupClient) RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int,
tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction,
error) {
func (c *RollupClient) RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error) {
log.Error("TODO")
return nil, tracerr.Wrap(errTODO)
}
// RollupL1UserTxERC20ETH is the interface to call the smart contract function
func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64,
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (tx *types.Transaction,
err error) {
func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -503,9 +484,7 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
}
// RollupL1UserTxERC20Permit is the interface to call the smart contract function
func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64,
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64,
deadline *big.Int) (tx *types.Transaction, err error) {
func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64, deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -537,12 +516,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
}
tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID,
amount, nonce, deadline, tokenName)
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, amount, deadline, digest, signature)
return c.hermez.AddL1Transaction(auth, babyPubKey, fromIdxBig,
uint16(depositAmountF), uint16(amountF), tokenID, toIdxBig, permit)
return c.hermez.AddL1Transaction(auth, babyPubKey, fromIdxBig, uint16(depositAmountF),
uint16(amountF), tokenID, toIdxBig, permit)
},
); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed add L1 Tx ERC20Permit: %w", err))
@@ -574,13 +552,11 @@ func (c *RollupClient) RollupLastForgedBatch() (lastForgedBatch int64, err error
}
// RollupUpdateForgeL1L2BatchTimeout is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateForgeL1L2BatchTimeout(
newForgeL1L2BatchTimeout int64) (tx *types.Transaction, err error) {
func (c *RollupClient) RollupUpdateForgeL1L2BatchTimeout(newForgeL1L2BatchTimeout int64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
return c.hermez.UpdateForgeL1L2BatchTimeout(auth,
uint8(newForgeL1L2BatchTimeout))
return c.hermez.UpdateForgeL1L2BatchTimeout(auth, uint8(newForgeL1L2BatchTimeout))
},
); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed update ForgeL1L2BatchTimeout: %w", err))
@@ -589,8 +565,7 @@ func (c *RollupClient) RollupUpdateForgeL1L2BatchTimeout(
}
// RollupUpdateFeeAddToken is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Transaction,
err error) {
func (c *RollupClient) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -625,8 +600,7 @@ func (c *RollupClient) RollupUpdateBucketsParameters(
}
// RollupUpdateTokenExchange is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateTokenExchange(addressArray []ethCommon.Address,
valueArray []uint64) (tx *types.Transaction, err error) {
func (c *RollupClient) RollupUpdateTokenExchange(addressArray []ethCommon.Address, valueArray []uint64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -639,8 +613,7 @@ func (c *RollupClient) RollupUpdateTokenExchange(addressArray []ethCommon.Addres
}
// RollupUpdateWithdrawalDelay is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateWithdrawalDelay(newWithdrawalDelay int64) (tx *types.Transaction,
err error) {
func (c *RollupClient) RollupUpdateWithdrawalDelay(newWithdrawalDelay int64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -666,8 +639,7 @@ func (c *RollupClient) RollupSafeMode() (tx *types.Transaction, err error) {
}
// RollupInstantWithdrawalViewer is the interface to call the smart contract function
func (c *RollupClient) RollupInstantWithdrawalViewer(tokenAddress ethCommon.Address,
amount *big.Int) (instantAllowed bool, err error) {
func (c *RollupClient) RollupInstantWithdrawalViewer(tokenAddress ethCommon.Address, amount *big.Int) (instantAllowed bool, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error {
instantAllowed, err = c.hermez.InstantWithdrawalViewer(c.opts, tokenAddress, amount)
return tracerr.Wrap(err)
@@ -702,8 +674,7 @@ func (c *RollupClient) RollupConstants() (rollupConstants *common.RollupConstant
}
newRollupVerifier.MaxTx = rollupVerifier.MaxTx.Int64()
newRollupVerifier.NLevels = rollupVerifier.NLevels.Int64()
rollupConstants.Verifiers = append(rollupConstants.Verifiers,
newRollupVerifier)
rollupConstants.Verifiers = append(rollupConstants.Verifiers, newRollupVerifier)
}
rollupConstants.HermezAuctionContract, err = c.hermez.HermezAuctionContract(c.opts)
if err != nil {
@@ -722,30 +693,19 @@ func (c *RollupClient) RollupConstants() (rollupConstants *common.RollupConstant
}
var (
logHermezL1UserTxEvent = crypto.Keccak256Hash([]byte(
"L1UserTxEvent(uint32,uint8,bytes)"))
logHermezAddToken = crypto.Keccak256Hash([]byte(
"AddToken(address,uint32)"))
logHermezForgeBatch = crypto.Keccak256Hash([]byte(
"ForgeBatch(uint32,uint16)"))
logHermezUpdateForgeL1L2BatchTimeout = crypto.Keccak256Hash([]byte(
"UpdateForgeL1L2BatchTimeout(uint8)"))
logHermezUpdateFeeAddToken = crypto.Keccak256Hash([]byte(
"UpdateFeeAddToken(uint256)"))
logHermezWithdrawEvent = crypto.Keccak256Hash([]byte(
"WithdrawEvent(uint48,uint32,bool)"))
logHermezUpdateBucketWithdraw = crypto.Keccak256Hash([]byte(
"UpdateBucketWithdraw(uint8,uint256,uint256)"))
logHermezUpdateWithdrawalDelay = crypto.Keccak256Hash([]byte(
"UpdateWithdrawalDelay(uint64)"))
logHermezUpdateBucketsParameters = crypto.Keccak256Hash([]byte(
"UpdateBucketsParameters(uint256[4][" + strconv.Itoa(common.RollupConstNumBuckets) + "])"))
logHermezUpdateTokenExchange = crypto.Keccak256Hash([]byte(
"UpdateTokenExchange(address[],uint64[])"))
logHermezSafeMode = crypto.Keccak256Hash([]byte(
"SafeMode()"))
logHermezInitialize = crypto.Keccak256Hash([]byte(
"InitializeHermezEvent(uint8,uint256,uint64)"))
logHermezL1UserTxEvent = crypto.Keccak256Hash([]byte("L1UserTxEvent(uint32,uint8,bytes)"))
logHermezAddToken = crypto.Keccak256Hash([]byte("AddToken(address,uint32)"))
logHermezForgeBatch = crypto.Keccak256Hash([]byte("ForgeBatch(uint32,uint16)"))
logHermezUpdateForgeL1L2BatchTimeout = crypto.Keccak256Hash([]byte("UpdateForgeL1L2BatchTimeout(uint8)"))
logHermezUpdateFeeAddToken = crypto.Keccak256Hash([]byte("UpdateFeeAddToken(uint256)"))
logHermezWithdrawEvent = crypto.Keccak256Hash([]byte("WithdrawEvent(uint48,uint32,bool)"))
logHermezUpdateBucketWithdraw = crypto.Keccak256Hash([]byte("UpdateBucketWithdraw(uint8,uint256,uint256)"))
logHermezUpdateWithdrawalDelay = crypto.Keccak256Hash([]byte("UpdateWithdrawalDelay(uint64)"))
logHermezUpdateBucketsParameters = crypto.Keccak256Hash([]byte("UpdateBucketsParameters(uint256[4][" +
strconv.Itoa(common.RollupConstNumBuckets) + "])"))
logHermezUpdateTokenExchange = crypto.Keccak256Hash([]byte("UpdateTokenExchange(address[],uint64[])"))
logHermezSafeMode = crypto.Keccak256Hash([]byte("SafeMode()"))
logHermezInitialize = crypto.Keccak256Hash([]byte("InitializeHermezEvent(uint8,uint256,uint64)"))
)
// RollupEventInit returns the initialize event with its corresponding block number
@@ -769,8 +729,7 @@ func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error)
}
var rollupInit RollupEventInitialize
if err := c.contractAbi.UnpackIntoInterface(&rollupInit, "InitializeHermezEvent",
vLog.Data); err != nil {
if err := c.contractAbi.UnpackIntoInterface(&rollupInit, "InitializeHermezEvent", vLog.Data); err != nil {
return nil, 0, tracerr.Wrap(err)
}
return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err)
@@ -851,8 +810,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var updateForgeL1L2BatchTimeout struct {
NewForgeL1L2BatchTimeout uint8
}
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout,
"UpdateForgeL1L2BatchTimeout", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, "UpdateForgeL1L2BatchTimeout", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -880,16 +838,14 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
case logHermezUpdateBucketWithdraw:
var updateBucketWithdrawAux rollupEventUpdateBucketWithdrawAux
var updateBucketWithdraw RollupEventUpdateBucketWithdraw
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux,
"UpdateBucketWithdraw", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, "UpdateBucketWithdraw", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals
updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64())
updateBucketWithdraw.BlockStamp = new(big.Int).SetBytes(vLog.Topics[2][:]).Int64()
rollupEvents.UpdateBucketWithdraw =
append(rollupEvents.UpdateBucketWithdraw, updateBucketWithdraw)
rollupEvents.UpdateBucketWithdraw = append(rollupEvents.UpdateBucketWithdraw, updateBucketWithdraw)
case logHermezUpdateWithdrawalDelay:
var withdrawalDelay RollupEventUpdateWithdrawalDelay
@@ -901,8 +857,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
case logHermezUpdateBucketsParameters:
var bucketsParametersAux rollupEventUpdateBucketsParametersAux
var bucketsParameters RollupEventUpdateBucketsParameters
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux,
"UpdateBucketsParameters", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, "UpdateBucketsParameters", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -912,8 +867,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
bucketsParameters.ArrayBuckets[i].BlockWithdrawalRate = bucket[2]
bucketsParameters.ArrayBuckets[i].MaxWithdrawals = bucket[3]
}
rollupEvents.UpdateBucketsParameters =
append(rollupEvents.UpdateBucketsParameters, bucketsParameters)
rollupEvents.UpdateBucketsParameters = append(rollupEvents.UpdateBucketsParameters, bucketsParameters)
case logHermezUpdateTokenExchange:
var tokensExchange RollupEventUpdateTokenExchange
err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data)
@@ -945,8 +899,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the
// Rollup Smart Contract in the given transaction, and the sender address.
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash)
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("TransactionByHash: %w", err))
@@ -961,8 +914,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
sender, err := c.client.client.TransactionSender(context.Background(), tx,
receipt.Logs[0].BlockHash, receipt.Logs[0].Index)
sender, err := c.client.client.TransactionSender(context.Background(), tx, receipt.Logs[0].BlockHash, receipt.Logs[0].Index)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
@@ -987,7 +939,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
FeeIdxCoordinator: []common.Idx{},
}
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1) //nolint:gomnd
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
@@ -997,9 +949,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
l1UserTxsData = aux.L1L2TxsData[:numBytesL1TxUser]
}
for i := 0; i < int(l1UserTxsLen); i++ {
l1Tx, err :=
common.L1TxFromDataAvailability(l1UserTxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes],
uint32(nLevels))
l1Tx, err := common.L1TxFromDataAvailability(l1UserTxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes], uint32(nLevels))
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
@@ -1011,17 +961,14 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
}
numTxsL2 := len(l2TxsData) / lenL1L2TxsBytes
for i := 0; i < numTxsL2; i++ {
l2Tx, err :=
common.L2TxFromBytesDataAvailability(l2TxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes],
int(nLevels))
l2Tx, err := common.L2TxFromBytesDataAvailability(l2TxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes], int(nLevels))
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
}
for i := 0; i < numTxsL1Coord; i++ {
bytesL1Coordinator :=
aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes] //nolint:lll
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
var signature []byte
v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33]
@@ -1034,29 +981,24 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
return nil, nil, tracerr.Wrap(err)
}
rollupForgeBatchArgs.L1CoordinatorTxs = append(rollupForgeBatchArgs.L1CoordinatorTxs, *l1Tx)
rollupForgeBatchArgs.L1CoordinatorTxsAuths =
append(rollupForgeBatchArgs.L1CoordinatorTxsAuths, signature)
rollupForgeBatchArgs.L1CoordinatorTxsAuths = append(rollupForgeBatchArgs.L1CoordinatorTxsAuths, signature)
}
lenFeeIdxCoordinatorBytes := int(nLevels / 8) //nolint:gomnd
numFeeIdxCoordinator := len(aux.FeeIdxCoordinator) / lenFeeIdxCoordinatorBytes
for i := 0; i < numFeeIdxCoordinator; i++ {
var paddedFeeIdx [6]byte
// TODO: This check is not necessary: the first case will always work. Test it
// before removing the if.
// TODO: This check is not necessary: the first case will always work. Test it before removing the if.
if lenFeeIdxCoordinatorBytes < common.IdxBytesLen {
copy(paddedFeeIdx[6-lenFeeIdxCoordinatorBytes:],
aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
copy(paddedFeeIdx[6-lenFeeIdxCoordinatorBytes:], aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
} else {
copy(paddedFeeIdx[:],
aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
copy(paddedFeeIdx[:], aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
}
feeIdxCoordinator, err := common.IdxFromBytes(paddedFeeIdx[:])
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
if feeIdxCoordinator != common.Idx(0) {
rollupForgeBatchArgs.FeeIdxCoordinator =
append(rollupForgeBatchArgs.FeeIdxCoordinator, feeIdxCoordinator)
rollupForgeBatchArgs.FeeIdxCoordinator = append(rollupForgeBatchArgs.FeeIdxCoordinator, feeIdxCoordinator)
}
}
return &rollupForgeBatchArgs, &sender, nil

View File

@@ -116,8 +116,7 @@ func TestRollupForgeBatch(t *testing.T) {
minBid.SetString("11000000000000000000", 10)
budget := new(big.Int)
budget.SetString("45200000000000000000", 10)
_, err = auctionClient.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet,
maxBid, minBid, deadline)
_, err = auctionClient.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, maxBid, minBid, deadline)
require.NoError(t, err)
// Add Blocks
@@ -129,18 +128,12 @@ func TestRollupForgeBatch(t *testing.T) {
// Forge Batch 1
args := new(RollupForgeBatchArgs)
// When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
args.FeeIdxCoordinator = []common.Idx{}
l1CoordinatorBytes, err := hex.DecodeString(
"1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf" +
"42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230" +
"de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
require.NoError(t, err)
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
for i := 0; i < numTxsL1; i++ {
bytesL1Coordinator :=
l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*
common.RollupConstL1CoordinatorTotalBytes]
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
var signature []byte
v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33]
@@ -156,12 +149,9 @@ func TestRollupForgeBatch(t *testing.T) {
args.L1UserTxs = []common.L1Tx{}
args.L2TxsData = []common.L2Tx{}
newStateRoot := new(big.Int)
newStateRoot.SetString(
"18317824016047294649053625209337295956588174734569560016974612130063629505228",
10)
newStateRoot.SetString("18317824016047294649053625209337295956588174734569560016974612130063629505228", 10)
newExitRoot := new(big.Int)
bytesNumExitRoot, err := hex.DecodeString(
"10a89d5fe8d488eda1ba371d633515739933c706c210c604f5bd209180daa43b")
bytesNumExitRoot, err := hex.DecodeString("10a89d5fe8d488eda1ba371d633515739933c706c210c604f5bd209180daa43b")
require.NoError(t, err)
newExitRoot.SetBytes(bytesNumExitRoot)
args.NewLastIdx = int64(300)
@@ -216,8 +206,7 @@ func TestRollupUpdateForgeL1L2BatchTimeout(t *testing.T) {
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
require.NoError(t, err)
assert.Equal(t, newForgeL1L2BatchTimeout,
rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
}
func TestRollupUpdateFeeAddToken(t *testing.T) {
@@ -259,8 +248,7 @@ func TestRollupUpdateWithdrawalDelay(t *testing.T) {
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
require.NoError(t, err)
assert.Equal(t, newWithdrawalDelay,
int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
assert.Equal(t, newWithdrawalDelay, int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
}
func TestRollupUpdateTokenExchange(t *testing.T) {
@@ -299,8 +287,7 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -312,13 +299,11 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
key := genKeysBjj(1)
fromIdxInt64 := int64(0)
@@ -334,8 +319,7 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -347,13 +331,11 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
key := genKeysBjj(3)
fromIdxInt64 := int64(0)
@@ -369,8 +351,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -382,13 +363,11 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxETHDeposit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(256)
toIdxInt64 := int64(0)
@@ -404,8 +383,7 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -416,13 +394,11 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20Deposit(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(257)
toIdxInt64 := int64(0)
@@ -437,8 +413,7 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -449,13 +424,11 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(258)
toIdxInt64 := int64(0)
@@ -469,8 +442,7 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -481,13 +453,11 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(256)
toIdxInt64 := int64(257)
@@ -503,8 +473,7 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -515,13 +484,11 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(257)
toIdxInt64 := int64(258)
@@ -536,8 +503,7 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -548,13 +514,11 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(258)
toIdxInt64 := int64(259)
@@ -569,8 +533,7 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -581,13 +544,11 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(256)
toIdxInt64 := int64(257)
@@ -603,8 +564,7 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -615,13 +575,11 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(257)
toIdxInt64 := int64(258)
@@ -636,8 +594,7 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -648,13 +605,11 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(258)
toIdxInt64 := int64(259)
@@ -669,8 +624,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -681,13 +635,11 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(256)
toIdxInt64 := int64(257)
@@ -702,8 +654,7 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -714,13 +665,11 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(257)
toIdxInt64 := int64(258)
@@ -734,8 +683,7 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -746,13 +694,11 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(259)
toIdxInt64 := int64(260)
@@ -766,8 +712,7 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -778,13 +723,11 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxETHForceExit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(256)
toIdxInt64 := int64(1)
@@ -799,8 +742,7 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -811,13 +753,11 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(257)
toIdxInt64 := int64(1)
@@ -831,8 +771,7 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -843,13 +782,11 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst,
tokenHEZ)
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
require.NoError(t, err)
fromIdxInt64 := int64(258)
toIdxInt64 := int64(1)
@@ -865,8 +802,7 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
}
L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64,
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
@@ -877,8 +813,7 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address,
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
}
func TestRollupForgeBatch2(t *testing.T) {
@@ -894,8 +829,7 @@ func TestRollupForgeBatch2(t *testing.T) {
// Forge Batch 3
args := new(RollupForgeBatchArgs)
// When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
args.FeeIdxCoordinator = []common.Idx{}
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
args.L1CoordinatorTxs = argsForge.L1CoordinatorTxs
args.L1CoordinatorTxsAuths = argsForge.L1CoordinatorTxsAuths
for i := 0; i < len(L1UserTxs); i++ {
@@ -903,19 +837,14 @@ func TestRollupForgeBatch2(t *testing.T) {
l1UserTx.EffectiveAmount = l1UserTx.Amount
l1Bytes, err := l1UserTx.BytesDataAvailability(uint32(nLevels))
require.NoError(t, err)
l1UserTxDataAvailability, err := common.L1TxFromDataAvailability(l1Bytes,
uint32(nLevels))
l1UserTxDataAvailability, err := common.L1TxFromDataAvailability(l1Bytes, uint32(nLevels))
require.NoError(t, err)
args.L1UserTxs = append(args.L1UserTxs, *l1UserTxDataAvailability)
}
newStateRoot := new(big.Int)
newStateRoot.SetString(
"18317824016047294649053625209337295956588174734569560016974612130063629505228",
10)
newStateRoot.SetString("18317824016047294649053625209337295956588174734569560016974612130063629505228", 10)
newExitRoot := new(big.Int)
newExitRoot.SetString(
"1114281409737474688393837964161044726766678436313681099613347372031079422302",
10)
newExitRoot.SetString("1114281409737474688393837964161044726766678436313681099613347372031079422302", 10)
amount := new(big.Int)
amount.SetString("79000000", 10)
l2Tx := common.L2Tx{
@@ -975,8 +904,7 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
require.NoError(t, err)
var pkComp babyjub.PublicKeyComp
pkCompBE, err :=
hex.DecodeString("adc3b754f8da621967b073a787bef8eec7052f2ba712b23af57d98f65beea8b2")
pkCompBE, err := hex.DecodeString("adc3b754f8da621967b073a787bef8eec7052f2ba712b23af57d98f65beea8b2")
require.NoError(t, err)
pkCompLE := common.SwapEndianness(pkCompBE)
copy(pkComp[:], pkCompLE)
@@ -986,20 +914,16 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
numExitRoot := int64(3)
fromIdx := int64(256)
amount, _ := new(big.Int).SetString("20000000000000000000", 10)
// siblingBytes0, err := new(big.Int).SetString(
// "19508838618377323910556678335932426220272947530531646682154552299216398748115",
// 10)
// siblingBytes0, err := new(big.Int).SetString("19508838618377323910556678335932426220272947530531646682154552299216398748115", 10)
// require.NoError(t, err)
// siblingBytes1, err := new(big.Int).SetString(
// "15198806719713909654457742294233381653226080862567104272457668857208564789571", 10)
// siblingBytes1, err := new(big.Int).SetString("15198806719713909654457742294233381653226080862567104272457668857208564789571", 10)
// require.NoError(t, err)
var siblings []*big.Int
// siblings = append(siblings, siblingBytes0)
// siblings = append(siblings, siblingBytes1)
instantWithdraw := true
_, err = rollupClientAux.RollupWithdrawMerkleProof(pkComp, tokenID, numExitRoot, fromIdx,
amount, siblings, instantWithdraw)
_, err = rollupClientAux.RollupWithdrawMerkleProof(pkComp, tokenID, numExitRoot, fromIdx, amount, siblings, instantWithdraw)
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()

View File

@@ -132,8 +132,7 @@ type WDelayerInterface interface {
WDelayerDepositInfo(owner, token ethCommon.Address) (depositInfo DepositState, err error)
WDelayerDeposit(onwer, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error)
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address,
amount *big.Int) (*types.Transaction, error)
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
WDelayerConstants() (*common.WDelayerConstants, error)
@@ -144,8 +143,7 @@ type WDelayerInterface interface {
// Implementation
//
// WDelayerClient is the implementation of the interface to the WithdrawDelayer
// Smart Contract in ethereum.
// WDelayerClient is the implementation of the interface to the WithdrawDelayer Smart Contract in ethereum.
type WDelayerClient struct {
client *EthereumClient
address ethCommon.Address
@@ -174,8 +172,7 @@ func NewWDelayerClient(client *EthereumClient, address ethCommon.Address) (*WDel
}
// WDelayerGetHermezGovernanceAddress is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerGetHermezGovernanceAddress() (
hermezGovernanceAddress *ethCommon.Address, err error) {
func (c *WDelayerClient) WDelayerGetHermezGovernanceAddress() (hermezGovernanceAddress *ethCommon.Address, err error) {
var _hermezGovernanceAddress ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error {
_hermezGovernanceAddress, err = c.wdelayer.GetHermezGovernanceAddress(c.opts)
@@ -187,8 +184,7 @@ func (c *WDelayerClient) WDelayerGetHermezGovernanceAddress() (
}
// WDelayerTransferGovernance is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerTransferGovernance(newAddress ethCommon.Address) (
tx *types.Transaction, err error) {
func (c *WDelayerClient) WDelayerTransferGovernance(newAddress ethCommon.Address) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -214,8 +210,7 @@ func (c *WDelayerClient) WDelayerClaimGovernance() (tx *types.Transaction, err e
}
// WDelayerGetEmergencyCouncil is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerGetEmergencyCouncil() (emergencyCouncilAddress *ethCommon.Address,
err error) {
func (c *WDelayerClient) WDelayerGetEmergencyCouncil() (emergencyCouncilAddress *ethCommon.Address, err error) {
var _emergencyCouncilAddress ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error {
_emergencyCouncilAddress, err = c.wdelayer.GetEmergencyCouncil(c.opts)
@@ -227,8 +222,7 @@ func (c *WDelayerClient) WDelayerGetEmergencyCouncil() (emergencyCouncilAddress
}
// WDelayerTransferEmergencyCouncil is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerTransferEmergencyCouncil(newAddress ethCommon.Address) (
tx *types.Transaction, err error) {
func (c *WDelayerClient) WDelayerTransferEmergencyCouncil(newAddress ethCommon.Address) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -277,8 +271,7 @@ func (c *WDelayerClient) WDelayerGetWithdrawalDelay() (withdrawalDelay int64, er
}
// WDelayerGetEmergencyModeStartingTime is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerGetEmergencyModeStartingTime() (emergencyModeStartingTime int64,
err error) {
func (c *WDelayerClient) WDelayerGetEmergencyModeStartingTime() (emergencyModeStartingTime int64, err error) {
var _emergencyModeStartingTime uint64
if err := c.client.Call(func(ec *ethclient.Client) error {
_emergencyModeStartingTime, err = c.wdelayer.GetEmergencyModeStartingTime(c.opts)
@@ -303,8 +296,7 @@ func (c *WDelayerClient) WDelayerEnableEmergencyMode() (tx *types.Transaction, e
}
// WDelayerChangeWithdrawalDelay is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64) (
tx *types.Transaction, err error) {
func (c *WDelayerClient) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -317,8 +309,7 @@ func (c *WDelayerClient) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64
}
// WDelayerDepositInfo is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerDepositInfo(owner, token ethCommon.Address) (
depositInfo DepositState, err error) {
func (c *WDelayerClient) WDelayerDepositInfo(owner, token ethCommon.Address) (depositInfo DepositState, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error {
amount, depositTimestamp, err := c.wdelayer.DepositInfo(c.opts, owner, token)
depositInfo.Amount = amount
@@ -331,8 +322,7 @@ func (c *WDelayerClient) WDelayerDepositInfo(owner, token ethCommon.Address) (
}
// WDelayerDeposit is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerDeposit(owner, token ethCommon.Address, amount *big.Int) (
tx *types.Transaction, err error) {
func (c *WDelayerClient) WDelayerDeposit(owner, token ethCommon.Address, amount *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -345,8 +335,7 @@ func (c *WDelayerClient) WDelayerDeposit(owner, token ethCommon.Address, amount
}
// WDelayerWithdrawal is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.Transaction,
err error) {
func (c *WDelayerClient) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -359,8 +348,7 @@ func (c *WDelayerClient) WDelayerWithdrawal(owner, token ethCommon.Address) (tx
}
// WDelayerEscapeHatchWithdrawal is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address,
amount *big.Int) (tx *types.Transaction, err error) {
func (c *WDelayerClient) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth(
0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -396,21 +384,14 @@ func (c *WDelayerClient) WDelayerConstants() (constants *common.WDelayerConstant
}
var (
logWDelayerDeposit = crypto.Keccak256Hash([]byte(
"Deposit(address,address,uint192,uint64)"))
logWDelayerWithdraw = crypto.Keccak256Hash([]byte(
"Withdraw(address,address,uint192)"))
logWDelayerEmergencyModeEnabled = crypto.Keccak256Hash([]byte(
"EmergencyModeEnabled()"))
logWDelayerNewWithdrawalDelay = crypto.Keccak256Hash([]byte(
"NewWithdrawalDelay(uint64)"))
logWDelayerEscapeHatchWithdrawal = crypto.Keccak256Hash([]byte(
"EscapeHatchWithdrawal(address,address,address,uint256)"))
logWDelayerNewEmergencyCouncil = crypto.Keccak256Hash([]byte(
"NewEmergencyCouncil(address)"))
logWDelayerNewHermezGovernanceAddress = crypto.Keccak256Hash([]byte(
"NewHermezGovernanceAddress(address)"))
logWDelayerInitialize = crypto.Keccak256Hash([]byte(
logWDelayerDeposit = crypto.Keccak256Hash([]byte("Deposit(address,address,uint192,uint64)"))
logWDelayerWithdraw = crypto.Keccak256Hash([]byte("Withdraw(address,address,uint192)"))
logWDelayerEmergencyModeEnabled = crypto.Keccak256Hash([]byte("EmergencyModeEnabled()"))
logWDelayerNewWithdrawalDelay = crypto.Keccak256Hash([]byte("NewWithdrawalDelay(uint64)"))
logWDelayerEscapeHatchWithdrawal = crypto.Keccak256Hash([]byte("EscapeHatchWithdrawal(address,address,address,uint256)"))
logWDelayerNewEmergencyCouncil = crypto.Keccak256Hash([]byte("NewEmergencyCouncil(address)"))
logWDelayerNewHermezGovernanceAddress = crypto.Keccak256Hash([]byte("NewHermezGovernanceAddress(address)"))
logWDelayerInitialize = crypto.Keccak256Hash([]byte(
"InitializeWithdrawalDelayerEvent(uint64,address,address)"))
)
@@ -502,51 +483,42 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
case logWDelayerEmergencyModeEnabled:
var emergencyModeEnabled WDelayerEventEmergencyModeEnabled
wdelayerEvents.EmergencyModeEnabled =
append(wdelayerEvents.EmergencyModeEnabled, emergencyModeEnabled)
wdelayerEvents.EmergencyModeEnabled = append(wdelayerEvents.EmergencyModeEnabled, emergencyModeEnabled)
case logWDelayerNewWithdrawalDelay:
var withdrawalDelay WDelayerEventNewWithdrawalDelay
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay,
"NewWithdrawalDelay", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "NewWithdrawalDelay", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
wdelayerEvents.NewWithdrawalDelay =
append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
wdelayerEvents.NewWithdrawalDelay = append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
case logWDelayerEscapeHatchWithdrawal:
var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal,
"EscapeHatchWithdrawal", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, "EscapeHatchWithdrawal", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
escapeHatchWithdrawal.Token = ethCommon.BytesToAddress(vLog.Topics[3].Bytes())
wdelayerEvents.EscapeHatchWithdrawal =
append(wdelayerEvents.EscapeHatchWithdrawal, escapeHatchWithdrawal)
wdelayerEvents.EscapeHatchWithdrawal = append(wdelayerEvents.EscapeHatchWithdrawal, escapeHatchWithdrawal)
case logWDelayerNewEmergencyCouncil:
var emergencyCouncil WDelayerEventNewEmergencyCouncil
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil,
"NewEmergencyCouncil", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, "NewEmergencyCouncil", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
wdelayerEvents.NewEmergencyCouncil =
append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
wdelayerEvents.NewEmergencyCouncil = append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
case logWDelayerNewHermezGovernanceAddress:
var governanceAddress WDelayerEventNewHermezGovernanceAddress
err := c.contractAbi.UnpackIntoInterface(&governanceAddress,
"NewHermezGovernanceAddress", vLog.Data)
err := c.contractAbi.UnpackIntoInterface(&governanceAddress, "NewHermezGovernanceAddress", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
}
wdelayerEvents.NewHermezGovernanceAddress =
append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
wdelayerEvents.NewHermezGovernanceAddress = append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
}
}
return &wdelayerEvents, nil

View File

@@ -54,8 +54,7 @@ func TestWDelayerSetHermezGovernanceAddress(t *testing.T) {
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
require.Nil(t, err)
assert.Equal(t, auxAddressConst,
wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
_, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst)
require.Nil(t, err)
_, err = wdelayerClientTest.WDelayerClaimGovernance()
@@ -69,8 +68,7 @@ func TestWDelayerGetEmergencyCouncil(t *testing.T) {
}
func TestWDelayerSetEmergencyCouncil(t *testing.T) {
wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil,
wdelayerTestAddressConst)
wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil, wdelayerTestAddressConst)
require.Nil(t, err)
wdelayerClientAux, err := NewWDelayerClient(ethereumClientAux, wdelayerTestAddressConst)
require.Nil(t, err)
@@ -202,18 +200,13 @@ func TestWDelayerGetEmergencyModeStartingTime(t *testing.T) {
func TestWDelayerEscapeHatchWithdrawal(t *testing.T) {
amount := new(big.Int)
amount.SetString("10000000000000000", 10)
wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil,
wdelayerTestAddressConst)
wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil, wdelayerTestAddressConst)
require.Nil(t, err)
_, err =
wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst,
tokenHEZAddressConst, amount)
_, err = wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst, tokenHEZAddressConst, amount)
require.Contains(t, err.Error(), "NO_MAX_EMERGENCY_MODE_TIME")
seconds := maxEmergencyModeTime.Seconds()
addTime(seconds, ethClientDialURL)
_, err =
wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst,
tokenHEZAddressConst, amount)
_, err = wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst, tokenHEZAddressConst, amount)
require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)

4
go.mod
View File

@@ -11,8 +11,8 @@ require (
github.com/gin-gonic/gin v1.5.0
github.com/gobuffalo/packr/v2 v2.8.1
github.com/hermeznetwork/tracerr v0.3.1-0.20210120162744-5da60b576169
github.com/iden3/go-iden3-crypto v0.0.6-0.20210308142348-8f85683b2cef
github.com/iden3/go-merkletree v0.0.0-20210308143313-8b63ca866189
github.com/iden3/go-iden3-crypto v0.0.6-0.20201221160344-58e589b6eb4c
github.com/iden3/go-merkletree v0.0.0-20210119155851-bb53e6ad1a12
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0

19
go.sum
View File

@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
@@ -85,6 +87,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
@@ -170,6 +174,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -330,10 +336,11 @@ github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/iden3/go-iden3-crypto v0.0.6-0.20210308142348-8f85683b2cef h1:72PG9b2eDlLqKszJVLrsoJbpt4CtgJLhKOjH1MJqCVY=
github.com/iden3/go-iden3-crypto v0.0.6-0.20210308142348-8f85683b2cef/go.mod h1:oBgthFLboAWi9feaBUFy7OxEcyn9vA1khHSL/WwWFyg=
github.com/iden3/go-merkletree v0.0.0-20210308143313-8b63ca866189 h1:hoarWk/SwNwnMXE0kiskcZULW0XBLQIUGYK4C39ozfs=
github.com/iden3/go-merkletree v0.0.0-20210308143313-8b63ca866189/go.mod h1:56abMeBKD4BIFe346rk+yuJ4MQgfMHe28sRx4o2gOpk=
github.com/iden3/go-iden3-crypto v0.0.6-0.20201218111145-a2015adb2f1b/go.mod h1:oBgthFLboAWi9feaBUFy7OxEcyn9vA1khHSL/WwWFyg=
github.com/iden3/go-iden3-crypto v0.0.6-0.20201221160344-58e589b6eb4c h1:D2u8FFYey6iFXLsqqJZ8R7ch8gZum+/b98whvoSDbyg=
github.com/iden3/go-iden3-crypto v0.0.6-0.20201221160344-58e589b6eb4c/go.mod h1:oBgthFLboAWi9feaBUFy7OxEcyn9vA1khHSL/WwWFyg=
github.com/iden3/go-merkletree v0.0.0-20210119155851-bb53e6ad1a12 h1:DXWT0BLCSm7cJmTMQy7+iOlxkA1/5ADglufhLK52e10=
github.com/iden3/go-merkletree v0.0.0-20210119155851-bb53e6ad1a12/go.mod h1:FdUFTW2qJiwHyy5R70uErwq7Kaq1uskyFdTfodcUJqA=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
@@ -601,6 +608,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -619,6 +628,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=

View File

@@ -53,9 +53,10 @@ const (
// Node is the Hermez Node
type Node struct {
nodeAPI *NodeAPI
debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
nodeAPI *NodeAPI
stateAPIUpdater *api.StateAPIUpdater
debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
// Coordinator
coord *coordinator.Coordinator
@@ -67,6 +68,7 @@ type Node struct {
mode Mode
sqlConnRead *sqlx.DB
sqlConnWrite *sqlx.DB
historyDB *historydb.HistoryDB
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
@@ -107,7 +109,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
}
var apiConnCon *dbUtils.APIConnectionController
if cfg.API.Explorer || mode == ModeCoordinator {
apiConnCon = dbUtils.NewAPIConnectionController(
apiConnCon = dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
@@ -146,12 +148,12 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if minForgeBalance != nil && balance.Cmp(minForgeBalance) == -1 {
return nil, tracerr.Wrap(fmt.Errorf(
"forger account balance is less than cfg.Coordinator.MinimumForgeAddressBalance: %v < %v",
balance, minForgeBalance))
balance.Int64(), minForgeBalance))
}
log.Infow("forger ethereum account balance",
"addr", cfg.Coordinator.ForgerAddress,
"balance", balance,
"minForgeBalance", minForgeBalance,
"balance", balance.Int64(),
"minForgeBalance", minForgeBalance.Int64(),
)
// Unlock Coordinator ForgerAddr in the keystore to make calls
@@ -220,6 +222,44 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err)
}
sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB, synchronizer.Config{
StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration,
ChainID: chainIDU16,
})
if err != nil {
return nil, tracerr.Wrap(err)
}
initSCVars := sync.SCVars()
scConsts := common.SCConsts{
Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(),
}
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
return nil, tracerr.Wrap(err)
}
hdbConsts := historydb.Constants{
SCConsts: common.SCConsts{
Rollup: scConsts.Rollup,
Auction: scConsts.Auction,
WDelayer: scConsts.WDelayer,
},
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
}
if err := historyDB.SetConstants(&hdbConsts); err != nil {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := api.NewStateAPIUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
var coord *coordinator.Coordinator
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
@@ -230,25 +270,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
)
}
sync, err := synchronizer.NewSynchronizer(client, historyDB, l2DB, stateDB, synchronizer.Config{
StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration,
ChainID: chainIDU16,
})
if err != nil {
return nil, tracerr.Wrap(err)
}
initSCVars := sync.SCVars()
scConsts := synchronizer.SCConsts{
Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(),
}
var coord *coordinator.Coordinator
if mode == ModeCoordinator {
// Unlock FeeAccount EthAddr in the keystore to generate the
// account creation authorization
if !keyStore.HasAddress(cfg.Coordinator.FeeAccount.Address) {
@@ -270,15 +292,14 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err := auth.Sign(func(msg []byte) ([]byte, error) {
return keyStore.SignHash(feeAccount, msg)
}, chainIDU16, cfg.SmartContracts.Rollup); err != nil {
return nil, tracerr.Wrap(err)
return nil, err
}
coordAccount := &txselector.CoordAccount{
Addr: cfg.Coordinator.FeeAccount.Address,
BJJ: cfg.Coordinator.FeeAccount.BJJ,
AccountCreationAuth: auth.Signature,
}
txSelector, err := txselector.NewTxSelector(coordAccount,
cfg.Coordinator.TxSelector.Path, stateDB, l2DB)
txSelector, err := txselector.NewTxSelector(coordAccount, cfg.Coordinator.TxSelector.Path, stateDB, l2DB)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -367,11 +388,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
serverProofs,
client,
&scConsts,
&synchronizer.SCVariables{
Rollup: *initSCVars.Rollup,
Auction: *initSCVars.Auction,
WDelayer: *initSCVars.WDelayer,
},
initSCVars,
)
if err != nil {
return nil, tracerr.Wrap(err)
@@ -403,23 +420,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
coord, cfg.API.Explorer,
server,
historyDB,
stateDB,
l2DB,
&api.Config{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
},
cfg.Coordinator.ForgeDelay.Duration,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
}
var debugAPI *debugapi.DebugAPI
if cfg.Debug.APIAddress != "" {
@@ -432,20 +437,138 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
}
ctx, cancel := context.WithCancel(context.Background())
return &Node{
nodeAPI: nodeAPI,
debugAPI: debugAPI,
priceUpdater: priceUpdater,
coord: coord,
sync: sync,
cfg: cfg,
mode: mode,
sqlConnRead: dbRead,
sqlConnWrite: dbWrite,
ctx: ctx,
cancel: cancel,
stateAPIUpdater: stateAPIUpdater,
nodeAPI: nodeAPI,
debugAPI: debugAPI,
priceUpdater: priceUpdater,
coord: coord,
sync: sync,
cfg: cfg,
mode: mode,
sqlConnRead: dbRead,
sqlConnWrite: dbWrite,
historyDB: historyDB,
ctx: ctx,
cancel: cancel,
}, nil
}
// APIServer is a server that only runs the API
type APIServer struct {
nodeAPI *NodeAPI
mode Mode
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
}
// NewAPIServer creates a new APIServer
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
apiConnCon := dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
dbRead, dbWrite,
0,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
0,
apiConnCon,
)
}
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
server := gin.Default()
coord := false
if mode == ModeCoordinator {
coord = cfg.Coordinator.API.Coordinator
}
nodeAPI, err := NewNodeAPI(
cfg.API.Address,
coord, cfg.API.Explorer,
server,
historyDB,
l2DB,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
return &APIServer{
nodeAPI: nodeAPI,
mode: mode,
ctx: ctx,
cancel: cancel,
}, nil
}
// Start the APIServer
func (s *APIServer) Start() {
log.Infow("Starting api server...", "mode", s.mode)
log.Info("Starting NodeAPI...")
s.wg.Add(1)
go func() {
defer func() {
log.Info("NodeAPI routine stopped")
s.wg.Done()
}()
if err := s.nodeAPI.Run(s.ctx); err != nil {
if s.ctx.Err() != nil {
return
}
log.Fatalw("NodeAPI.Run", "err", err)
}
}()
}
// Stop the APIServer
func (s *APIServer) Stop() {
log.Infow("Stopping NodeAPI...")
s.cancel()
s.wg.Wait()
}
// NodeAPI holds the node http API
type NodeAPI struct { //nolint:golint
api *api.API
@@ -465,10 +588,7 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine,
hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB,
config *api.Config,
forgeDelay time.Duration,
) (*NodeAPI, error) {
engine := gin.Default()
engine.NoRoute(handleNoRoute)
@@ -478,10 +598,6 @@ func NewNodeAPI(
engine,
hdb,
l2db,
config,
&api.NodeConfig{
ForgeDelay: forgeDelay.Seconds(),
},
)
if err != nil {
return nil, tracerr.Wrap(err)
@@ -493,8 +609,8 @@ func NewNodeAPI(
}, nil
}
// Run starts the http server of the NodeAPI. To stop it, pass a context
// with cancellation.
// Run starts the http server of the NodeAPI. To stop it, pass a context with
// cancelation.
func (a *NodeAPI) Run(ctx context.Context) error {
server := &http.Server{
Handler: a.engine,
@@ -526,65 +642,56 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil
}
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats,
vars synchronizer.SCVariablesPtr, batches []common.BatchData) {
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr,
batches []common.BatchData) error {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats,
Vars: vars,
Vars: *vars,
Batches: batches,
})
}
if n.nodeAPI != nil {
if vars.Rollup != nil {
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
n.stateAPIUpdater.SetSCVars(vars)
if stats.Synced() {
if err := n.stateAPIUpdater.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
}
if vars.Auction != nil {
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
}
if vars.WDelayer != nil {
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
}
if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err)
}
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
}
}
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
vars synchronizer.SCVariablesPtr) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: vars,
})
}
if n.nodeAPI != nil {
vars := n.sync.SCVars()
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
n.nodeAPI.api.UpdateNetworkInfoBlock(
} else {
n.stateAPIUpdater.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
}
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariables) error {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: *vars.AsPtr(),
})
}
n.stateAPIUpdater.SetSCVars(vars.AsPtr())
n.stateAPIUpdater.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
// don't have to pass it around.
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block,
time.Duration, error) {
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
blockData, discarded, err := n.sync.Sync(ctx, lastBlock)
stats := n.sync.Stats()
if err != nil {
@@ -594,16 +701,20 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars()
n.handleReorg(ctx, stats, vars)
if err := n.handleReorg(ctx, stats, vars); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return nil, time.Duration(0), nil
} else if blockData != nil {
// case: new block
vars := synchronizer.SCVariablesPtr{
vars := common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars,
}
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return &blockData.Block, time.Duration(0), nil
} else {
// case: no block
@@ -622,7 +733,9 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized
stats := n.sync.Stats()
vars := n.sync.SCVars()
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil {
log.Fatalw("Node.handleNewBlock", "err", err)
}
n.wg.Add(1)
go func() {
@@ -709,18 +822,25 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1)
go func() {
// Do an initial update on startup
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for {
select {
case <-n.ctx.Done():
log.Info("API.UpdateMetrics loop done")
log.Info("ApiStateUpdater.UpdateMetrics loop done")
n.wg.Done()
return
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
}
}
@@ -729,18 +849,25 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1)
go func() {
// Do an initial update on startup
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err)
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for {
select {
case <-n.ctx.Done():
log.Info("API.UpdateRecommendedFee loop done")
log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done")
n.wg.Done()
return
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err)
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
}
}

View File

@@ -4,12 +4,9 @@ import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/dghubble/sling"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
@@ -26,16 +23,12 @@ type APIType string
const (
// APITypeBitFinexV2 is the http API used by bitfinex V2
APITypeBitFinexV2 APIType = "bitfinexV2"
// APITypeCoingeckoV3 is the http API used by copingecko V3
APITypeCoingeckoV3 APIType = "coingeckoV3"
)
func (t *APIType) valid() bool {
switch *t {
case APITypeBitFinexV2:
return true
case APITypeCoingeckoV3:
return true
default:
return false
}
@@ -43,23 +36,23 @@ func (t *APIType) valid() bool {
// PriceUpdater definition
type PriceUpdater struct {
db *historydb.HistoryDB
apiURL string
apiType APIType
tokens []historydb.TokenSymbolAndAddr
db *historydb.HistoryDB
apiURL string
apiType APIType
tokenSymbols []string
}
// NewPriceUpdater is the constructor for the updater
func NewPriceUpdater(apiURL string, apiType APIType, db *historydb.HistoryDB) (*PriceUpdater,
error) {
func NewPriceUpdater(apiURL string, apiType APIType, db *historydb.HistoryDB) (*PriceUpdater, error) {
tokenSymbols := []string{}
if !apiType.valid() {
return nil, tracerr.Wrap(fmt.Errorf("Invalid apiType: %v", apiType))
}
return &PriceUpdater{
db: db,
apiURL: apiURL,
apiType: apiType,
tokens: []historydb.TokenSymbolAndAddr{},
db: db,
apiURL: apiURL,
apiType: apiType,
tokenSymbols: tokenSymbols,
}, nil
}
@@ -80,39 +73,7 @@ func getTokenPriceBitfinex(ctx context.Context, client *sling.Sling,
return state[6], nil
}
func getTokenPriceCoingecko(ctx context.Context, client *sling.Sling,
tokenAddr ethCommon.Address) (float64, error) {
responseObject := make(map[string]map[string]float64)
var url string
var id string
if tokenAddr == common.EmptyAddr { // Special case for Ether
url = "simple/price?ids=ethereum&vs_currencies=usd"
id = "ethereum"
} else { // Common case (ERC20)
id = strings.ToLower(tokenAddr.String())
url = "simple/token_price/ethereum?contract_addresses=" +
id + "&vs_currencies=usd"
}
req, err := client.New().Get(url).Request()
if err != nil {
return 0, tracerr.Wrap(err)
}
res, err := client.Do(req.WithContext(ctx), &responseObject, nil)
if err != nil {
return 0, tracerr.Wrap(err)
}
if res.StatusCode != http.StatusOK {
return 0, tracerr.Wrap(fmt.Errorf("http response is not is %v", res.StatusCode))
}
price := responseObject[id]["usd"]
if price <= 0 {
return 0, tracerr.Wrap(fmt.Errorf("price not found for %v", id))
}
return price, nil
}
// UpdatePrices is triggered by the Coordinator, and internally will update the
// token prices in the db
// UpdatePrices is triggered by the Coordinator, and internally will update the token prices in the db
func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
tr := &http.Transport{
MaxIdleConns: defaultMaxIdleConns,
@@ -122,35 +83,33 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
httpClient := &http.Client{Transport: tr}
client := sling.New().Base(p.apiURL).Client(httpClient)
for _, token := range p.tokens {
for _, tokenSymbol := range p.tokenSymbols {
var tokenPrice float64
var err error
switch p.apiType {
case APITypeBitFinexV2:
tokenPrice, err = getTokenPriceBitfinex(ctx, client, token.Symbol)
case APITypeCoingeckoV3:
tokenPrice, err = getTokenPriceCoingecko(ctx, client, token.Addr)
tokenPrice, err = getTokenPriceBitfinex(ctx, client, tokenSymbol)
}
if ctx.Err() != nil {
return
}
if err != nil {
log.Warnw("token price not updated (get error)",
"err", err, "token", token.Symbol, "apiType", p.apiType)
"err", err, "token", tokenSymbol, "apiType", p.apiType)
}
if err = p.db.UpdateTokenValue(token.Symbol, tokenPrice); err != nil {
if err = p.db.UpdateTokenValue(tokenSymbol, tokenPrice); err != nil {
log.Errorw("token price not updated (db error)",
"err", err, "token", token.Symbol, "apiType", p.apiType)
"err", err, "token", tokenSymbol, "apiType", p.apiType)
}
}
}
// UpdateTokenList get the registered token symbols from HistoryDB
func (p *PriceUpdater) UpdateTokenList() error {
tokens, err := p.db.GetTokenSymbolsAndAddrs()
tokenSymbols, err := p.db.GetTokenSymbols()
if err != nil {
return tracerr.Wrap(err)
}
p.tokens = tokens
p.tokenSymbols = tokenSymbols
return nil
}

View File

@@ -2,6 +2,7 @@ package priceupdater
import (
"context"
"math/big"
"os"
"testing"
@@ -14,45 +15,29 @@ import (
"github.com/stretchr/testify/require"
)
var historyDB *historydb.HistoryDB
func TestMain(m *testing.M) {
func TestPriceUpdater(t *testing.T) {
// Init DB
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
if err != nil {
panic(err)
}
historyDB = historydb.NewHistoryDB(db, db, nil)
assert.NoError(t, err)
historyDB := historydb.NewHistoryDB(db, db, nil)
// Clean DB
test.WipeDB(historyDB.DB())
// Populate DB
// Gen blocks and add them to DB
blocks := test.GenBlocks(1, 2)
err = historyDB.AddBlocks(blocks)
if err != nil {
panic(err)
}
assert.NoError(t, historyDB.AddBlocks(blocks))
// Gen tokens and add them to DB
tokens := []common.Token{}
tokens = append(tokens, common.Token{
TokenID: 1,
EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f"),
EthAddr: ethCommon.BigToAddress(big.NewInt(2)),
Name: "DAI",
Symbol: "DAI",
Decimals: 18,
})
err = historyDB.AddTokens(tokens)
if err != nil {
panic(err)
}
result := m.Run()
os.Exit(result)
}
func TestPriceUpdaterBitfinex(t *testing.T) {
assert.NoError(t, historyDB.AddTokens(tokens))
// Init price updater
pu, err := NewPriceUpdater("https://api-pub.bitfinex.com/v2/", APITypeBitFinexV2, historyDB)
require.NoError(t, err)
@@ -60,29 +45,13 @@ func TestPriceUpdaterBitfinex(t *testing.T) {
assert.NoError(t, pu.UpdateTokenList())
// Update prices
pu.UpdatePrices(context.Background())
assertTokenHasPriceAndClean(t)
}
func TestPriceUpdaterCoingecko(t *testing.T) {
// Init price updater
pu, err := NewPriceUpdater("https://api.coingecko.com/api/v3/", APITypeCoingeckoV3, historyDB)
require.NoError(t, err)
// Update token list
assert.NoError(t, pu.UpdateTokenList())
// Update prices
pu.UpdatePrices(context.Background())
assertTokenHasPriceAndClean(t)
}
func assertTokenHasPriceAndClean(t *testing.T) {
// Check that prices have been updated
fetchedTokens, err := historyDB.GetTokensTest()
require.NoError(t, err)
// TokenID 0 (ETH) is always on the DB
assert.Equal(t, 2, len(fetchedTokens))
for _, token := range fetchedTokens {
require.NotNil(t, token.USD)
require.NotNil(t, token.USDUpdate)
assert.Greater(t, *token.USD, 0.0)
assert.NotNil(t, token.USD)
assert.NotNil(t, token.USDUpdate)
}
}

View File

@@ -235,7 +235,7 @@ func (p *ProofServerClient) CalculateProof(ctx context.Context, zkInputs *common
return tracerr.Wrap(p.apiInput(ctx, zkInputs))
}
// GetProof retrieves the Proof and Public Data (public inputs) from the
// GetProof retreives the Proof and Public Data (public inputs) from the
// ServerProof, blocking until the proof is ready.
func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
if err := p.WaitReady(ctx); err != nil {
@@ -256,8 +256,7 @@ func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, e
}
return &proof, pubInputs, nil
}
return nil, nil, tracerr.Wrap(fmt.Errorf("status != %v, status = %v", StatusCodeSuccess,
status.Status))
return nil, nil, tracerr.Wrap(fmt.Errorf("status != %v, status = %v", StatusCodeSuccess, status.Status))
}
// Cancel cancels any current proof computation
@@ -298,7 +297,7 @@ func (p *MockClient) CalculateProof(ctx context.Context, zkInputs *common.ZKInpu
return nil
}
// GetProof retrieves the Proof from the ServerProof
// GetProof retreives the Proof from the ServerProof
func (p *MockClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
// Simulate a delay
select {

View File

@@ -11,7 +11,6 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
@@ -32,7 +31,7 @@ var (
ErrUnknownBlock = fmt.Errorf("unknown block")
)
// Stats of the synchronizer
// Stats of the syncrhonizer
type Stats struct {
Eth struct {
RefreshPeriod time.Duration
@@ -184,26 +183,26 @@ type StartBlockNums struct {
}
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup common.RollupVariables `validate:"required"`
Auction common.AuctionVariables `validate:"required"`
WDelayer common.WDelayerVariables `validate:"required"`
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *common.RollupVariables `validate:"required"`
Auction *common.AuctionVariables `validate:"required"`
WDelayer *common.WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup common.RollupConstants
Auction common.AuctionConstants
WDelayer common.WDelayerConstants
}
// type SCVariables struct {
// Rollup common.RollupVariables `validate:"required"`
// Auction common.AuctionVariables `validate:"required"`
// WDelayer common.WDelayerVariables `validate:"required"`
// }
//
// // SCVariablesPtr joins all the smart contract variables as pointers in a single
// // struct
// type SCVariablesPtr struct {
// Rollup *common.RollupVariables `validate:"required"`
// Auction *common.AuctionVariables `validate:"required"`
// WDelayer *common.WDelayerVariables `validate:"required"`
// }
//
// // SCConsts joins all the smart contract constants in a single struct
// type SCConsts struct {
// Rollup common.RollupConstants
// Auction common.AuctionConstants
// WDelayer common.WDelayerConstants
// }
// Config is the Synchronizer configuration
type Config struct {
@@ -214,21 +213,20 @@ type Config struct {
// Synchronizer implements the Synchronizer type
type Synchronizer struct {
ethClient eth.ClientInterface
consts SCConsts
consts common.SCConsts
historyDB *historydb.HistoryDB
l2DB *l2db.L2DB
stateDB *statedb.StateDB
cfg Config
initVars SCVariables
initVars common.SCVariables
startBlockNum int64
vars SCVariables
vars common.SCVariables
stats *StatsHolder
resetStateFailed bool
}
// NewSynchronizer creates a new Synchronizer
func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, stateDB *statedb.StateDB, cfg Config) (*Synchronizer, error) {
stateDB *statedb.StateDB, cfg Config) (*Synchronizer, error) {
auctionConstants, err := ethClient.AuctionConstants()
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.AuctionConstants(): %w",
@@ -244,7 +242,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
err))
}
consts := SCConsts{
consts := common.SCConsts{
Rollup: *rollupConstants,
Auction: *auctionConstants,
WDelayer: *wDelayerConstants,
@@ -273,7 +271,6 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
ethClient: ethClient,
consts: consts,
historyDB: historyDB,
l2DB: l2DB,
stateDB: stateDB,
cfg: cfg,
initVars: *initVars,
@@ -310,11 +307,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
}
// SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() SCVariablesPtr {
return SCVariablesPtr{
Rollup: s.vars.Rollup.Copy(),
Auction: s.vars.Auction.Copy(),
WDelayer: s.vars.WDelayer.Copy(),
func (s *Synchronizer) SCVars() *common.SCVariables {
return &common.SCVariables{
Rollup: *s.vars.Rollup.Copy(),
Auction: *s.vars.Auction.Copy(),
WDelayer: *s.vars.WDelayer.Copy(),
}
}
@@ -350,7 +347,7 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
}
// updateCurrentSlot updates the slot with information of the current slot.
// The information about which coordinator is allowed to forge is only updated
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
// hasBatch is true when the last synced block contained at least one batch.
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
@@ -403,7 +400,7 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
}
// updateNextSlot updates the slot with information of the next slot.
// The information about which coordinator is allowed to forge is only updated
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined
@@ -506,9 +503,9 @@ func (s *Synchronizer) resetIntermediateState() error {
return nil
}
// Sync attempts to synchronize an ethereum block starting from lastSavedBlock.
// Sync attems to synchronize an ethereum block starting from lastSavedBlock.
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
// If a block is synced, it will be returned and also stored in the DB. If a
// If a block is synched, it will be returned and also stored in the DB. If a
// reorg is detected, the number of discarded blocks will be returned and no
// synchronization will be made.
// TODO: Be smart about locking: only lock during the read/write operations
@@ -561,7 +558,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
"ethLastBlock", s.stats.Eth.LastBlock,
)
// Check that the obtained ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
// Check that the obtianed ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
if lastSavedBlock != nil {
if lastSavedBlock.Hash != ethBlock.ParentHash {
// Reorg detected
@@ -581,7 +578,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
// If there was an error during sync, reset to the last block
// in the historyDB because the historyDB is written last in
// the Sync method and is the source of consistency. This
// allows resetting the stateDB in the case a batch was
// allows reseting the stateDB in the case a batch was
// processed but the historyDB block was not committed due to an
// error.
if err != nil {
@@ -660,7 +657,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
if len(rollupData.Batches) > 0 {
hasBatch = true
}
if err = s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
return nil, nil, tracerr.Wrap(err)
}
@@ -727,7 +724,7 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
}
func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
@@ -743,7 +740,7 @@ func getInitialVariables(ethClient eth.ClientInterface,
rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
wDelayerVars := wDelayerInit.WDelayerVariables()
return &SCVariables{
return &common.SCVariables{
Rollup: *rollupVars,
Auction: *auctionVars,
WDelayer: *wDelayerVars,
@@ -821,7 +818,7 @@ func (s *Synchronizer) resetState(block *common.Block) error {
return nil
}
// rollupSync retrieves all the Rollup Smart Contract Data that happened at
// rollupSync retreives all the Rollup Smart Contract Data that happened at
// ethBlock.blockNum with ethBlock.Hash.
func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, error) {
blockNum := ethBlock.Num
@@ -898,9 +895,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
position = len(l1UserTxs)
}
l1TxsAuth := make([]common.AccountCreationAuth,
0, len(forgeBatchArgs.L1CoordinatorTxsAuths))
// Get L1 Coordinator Txs
for i := range forgeBatchArgs.L1CoordinatorTxs {
l1CoordinatorTx := forgeBatchArgs.L1CoordinatorTxs[i]
@@ -916,30 +910,9 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
batchData.L1CoordinatorTxs = append(batchData.L1CoordinatorTxs, *l1Tx)
position++
// Create a slice of account creation auth to be
// inserted later if not exists
if l1CoordinatorTx.FromEthAddr != common.RollupConstEthAddressInternalOnly {
l1CoordinatorTxAuth := forgeBatchArgs.L1CoordinatorTxsAuths[i]
l1TxsAuth = append(l1TxsAuth, common.AccountCreationAuth{
EthAddr: l1CoordinatorTx.FromEthAddr,
BJJ: l1CoordinatorTx.FromBJJ,
Signature: l1CoordinatorTxAuth,
})
}
// fmt.Println("DGB l1coordtx")
}
// Insert the slice of account creation auth
// only if the node run as a coordinator
if s.l2DB != nil && len(l1TxsAuth) > 0 {
err = s.l2DB.AddManyAccountCreationAuth(l1TxsAuth)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Insert all the txs forged in this batch (l1UserTxs,
// L1CoordinatorTxs, PoolL2Txs) into stateDB so that they are
// processed.
@@ -952,8 +925,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
}
// Transform L2 txs to PoolL2Txs
// NOTE: This is a big ugly, find a better way
poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2TxsData)
poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2TxsData) // NOTE: This is a big ugly, find a better way
if int(forgeBatchArgs.VerifierIdx) >= len(s.consts.Rollup.Verifiers) {
return nil, tracerr.Wrap(fmt.Errorf("forgeBatchArgs.VerifierIdx (%v) >= "+

View File

@@ -15,7 +15,6 @@ import (
"github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/test"
@@ -43,8 +42,7 @@ func accountsCmp(accounts []common.Account) func(i, j int) bool {
// Check Sync output and HistoryDB state against expected values generated by
// til
func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block,
syncBlock *common.BlockData) {
func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBlock *common.BlockData) {
// Check Blocks
dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err)
@@ -304,14 +302,13 @@ func TestMain(m *testing.M) {
os.Exit(exitVal)
}
func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db.L2DB) {
func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
// Int State DB
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
deleteme = append(deleteme, dir)
stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
require.NoError(t, err)
// Init History DB
@@ -322,10 +319,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db
// Clear DB
test.WipeDB(historyDB.DB())
// Init L2 DB
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
return stateDB, historyDB, l2DB
return stateDB, historyDB
}
func newBigInt(s string) *big.Int {
@@ -341,7 +335,7 @@ func TestSyncGeneral(t *testing.T) {
// Setup
//
stateDB, historyDB, l2DB := newTestModules(t)
stateDB, historyDB := newTestModules(t)
// Init eth client
var timer timer
@@ -351,7 +345,7 @@ func TestSyncGeneral(t *testing.T) {
client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
// Create Synchronizer
s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{
s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second,
})
require.NoError(t, err)
@@ -378,9 +372,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
vars := s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err)
@@ -450,9 +444,9 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("11432094872416618651837327395264042968926668786266585816625577088890451620254")
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("16914212635847451457076355431350059348585556180740555407203882688922702410093")
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
// blocks 1 (blockNum=3)
i = 1
require.Equal(t, 3, int(blocks[i].Block.Num))
@@ -461,9 +455,9 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("13535760140937349829640752733057594576151546047374619177689224612061148090678")
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("19413739476363469870744893742469056615496274423228302914851564791727474664804")
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
// Generate extra required data
ethAddTokens(blocks, client)
@@ -520,11 +514,9 @@ func TestSyncGeneral(t *testing.T) {
// Block 4
// Generate 2 withdraws manually
_, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256,
big.NewInt(100), []*big.Int{}, true)
_, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256, big.NewInt(100), []*big.Int{}, true)
require.NoError(t, err)
_, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258,
big.NewInt(50), []*big.Int{}, false)
_, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258, big.NewInt(50), []*big.Int{}, false)
require.NoError(t, err)
client.CtlMineBlock()
@@ -541,9 +533,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
dbExits, err := s.historyDB.GetAllExits()
require.NoError(t, err)
@@ -642,9 +634,9 @@ func TestSyncGeneral(t *testing.T) {
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("14095767774967159269372103336737817266053275274769794195030162905513860477094")
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("2095674348545184674850951945506660952512376416769035169971006930847780339914")
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
for i := 0; i < 4; i++ {
client.CtlRollback()
@@ -673,9 +665,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced())
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
// At this point, the DB only has data up to block 1
dbBlock, err := s.historyDB.GetLastBlock()
@@ -712,9 +704,9 @@ func TestSyncGeneral(t *testing.T) {
}
vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
}
dbBlock, err = s.historyDB.GetLastBlock()
@@ -731,7 +723,7 @@ func TestSyncGeneral(t *testing.T) {
}
func TestSyncForgerCommitment(t *testing.T) {
stateDB, historyDB, l2DB := newTestModules(t)
stateDB, historyDB := newTestModules(t)
// Init eth client
var timer timer
@@ -744,7 +736,7 @@ func TestSyncForgerCommitment(t *testing.T) {
client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
// Create Synchronizer
s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{
s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second,
})
require.NoError(t, err)
@@ -827,7 +819,7 @@ func TestSyncForgerCommitment(t *testing.T) {
// Store ForgerComitmnent observed at every block by the live synchronizer
syncCommitment := map[int64]bool{}
// Store ForgerComitmnent observed at every block by a synchronizer that is restarted
// Store ForgerComitmnent observed at every block by a syncrhonizer that is restarted
syncRestartedCommitment := map[int64]bool{}
for _, block := range blocks {
// Add block data to the smart contracts
@@ -844,7 +836,7 @@ func TestSyncForgerCommitment(t *testing.T) {
require.True(t, stats.Synced())
syncCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment
s2, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{
s2, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second,
})
require.NoError(t, err)

View File

@@ -101,8 +101,8 @@ func (a *DebugAPI) handleSyncStats(c *gin.Context) {
c.JSON(http.StatusOK, stats)
}
// Run starts the http server of the DebugAPI. To stop it, pass a context
// with cancellation (see `debugapi_test.go` for an example).
// Run starts the http server of the DebugAPI. To stop it, pass a context with
// cancelation (see `debugapi_test.go` for an example).
func (a *DebugAPI) Run(ctx context.Context) error {
api := gin.Default()
api.NoRoute(handleNoRoute)

View File

@@ -44,14 +44,13 @@ func TestDebugAPI(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
require.Nil(t, err)
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
require.Nil(t, err)
addr := "localhost:12345"
// We won't test the sync/stats endpoint, so we can se the synchronizer to nil
// We won't test the sync/stats endpoint, so we can se the Syncrhonizer to nil
debugAPI := NewDebugAPI(addr, sdb, nil)
ctx, cancel := context.WithCancel(context.Background())
@@ -85,7 +84,7 @@ func TestDebugAPI(t *testing.T) {
require.Nil(t, err)
// Testing against a hardcoded value obtained by running the test and
// printing the value previously.
assert.Equal(t, "5705124827515775272209811244650636195377535115082365089650934878384850534213",
assert.Equal(t, "21765339739823365993496282904432398015268846626944509989242908567129545640185",
mtroot.String())
var accountAPI common.Account

View File

@@ -48,8 +48,7 @@ func (w *WDelayerBlock) addTransaction(tx *types.Transaction) *types.Transaction
return tx
}
func (w *WDelayerBlock) deposit(txHash ethCommon.Hash, owner, token ethCommon.Address,
amount *big.Int) {
func (w *WDelayerBlock) deposit(txHash ethCommon.Hash, owner, token ethCommon.Address, amount *big.Int) {
w.Events.Deposit = append(w.Events.Deposit, eth.WDelayerEventDeposit{
Owner: owner,
Token: token,
@@ -183,8 +182,7 @@ func (a *AuctionBlock) canForge(forger ethCommon.Address, blockNum int64) (bool,
slotToForge := a.getSlotNumber(blockNum)
// Get the relativeBlock to check if the slotDeadline has been exceeded
relativeBlock := blockNum - (a.Constants.GenesisBlockNum + (slotToForge *
int64(a.Constants.BlocksPerSlot)))
relativeBlock := blockNum - (a.Constants.GenesisBlockNum + (slotToForge * int64(a.Constants.BlocksPerSlot)))
// If the closedMinBid is 0 it means that we have to take as minBid the
// one that is set for this slot set, otherwise the one that has been
@@ -283,6 +281,10 @@ type ClientSetup struct {
// and 1 will be premined.
//nolint:gomnd
func NewClientSetupExample() *ClientSetup {
// rfield, ok := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495617", 10)
// if !ok {
// panic("bad rfield")
// }
initialMinimalBidding, ok := new(big.Int).SetString("10000000000000000000", 10) // 10 * (1e18)
if !ok {
panic("bad initialMinimalBidding")
@@ -624,8 +626,7 @@ func (c *Client) EthPendingNonceAt(ctx context.Context, account ethCommon.Addres
// EthNonceAt returns the account nonce of the given account. The block number can
// be nil, in which case the nonce is taken from the latest known block.
func (c *Client) EthNonceAt(ctx context.Context, account ethCommon.Address,
blockNumber *big.Int) (uint64, error) {
func (c *Client) EthNonceAt(ctx context.Context, account ethCommon.Address, blockNumber *big.Int) (uint64, error) {
// NOTE: For now Client doesn't simulate nonces
return 0, nil
}
@@ -644,8 +645,7 @@ func (c *Client) EthKeyStore() *ethKeystore.KeyStore {
// EthCall runs the transaction as a call (without paying) in the local node at
// blockNum.
func (c *Client) EthCall(ctx context.Context, tx *types.Transaction,
blockNum *big.Int) ([]byte, error) {
func (c *Client) EthCall(ctx context.Context, tx *types.Transaction, blockNum *big.Int) ([]byte, error) {
return nil, tracerr.Wrap(common.ErrTODO)
}
@@ -662,8 +662,7 @@ func (c *Client) EthLastBlock() (int64, error) {
}
// EthTransactionReceipt returns the transaction receipt of the given txHash
func (c *Client) EthTransactionReceipt(ctx context.Context,
txHash ethCommon.Hash) (*types.Receipt, error) {
func (c *Client) EthTransactionReceipt(ctx context.Context, txHash ethCommon.Hash) (*types.Receipt, error) {
c.rw.RLock()
defer c.rw.RUnlock()
@@ -779,9 +778,7 @@ var errTODO = fmt.Errorf("TODO: Not implemented yet")
// }
// RollupL1UserTxERC20Permit is the interface to call the smart contract function
func (c *Client) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64,
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64,
deadline *big.Int) (tx *types.Transaction, err error) {
func (c *Client) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64, deadline *big.Int) (tx *types.Transaction, err error) {
log.Error("TODO")
return nil, tracerr.Wrap(errTODO)
}
@@ -848,9 +845,7 @@ func (c *Client) RollupL1UserTxERC20ETH(
}
// RollupL1UserTxERC777 is the interface to call the smart contract function
// func (c *Client) RollupL1UserTxERC777(fromBJJ *babyjub.PublicKey, fromIdx int64,
// depositAmount *big.Int, amount *big.Int, tokenID uint32,
// toIdx int64) (*types.Transaction, error) {
// func (c *Client) RollupL1UserTxERC777(fromBJJ *babyjub.PublicKey, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (*types.Transaction, error) {
// log.Error("TODO")
// return nil, errTODO
// }
@@ -872,17 +867,13 @@ func (c *Client) RollupLastForgedBatch() (int64, error) {
}
// RollupWithdrawCircuit is the interface to call the smart contract function
func (c *Client) RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int,
tokenID uint32, numExitRoot, idx int64, amount *big.Int,
instantWithdraw bool) (*types.Transaction, error) {
func (c *Client) RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error) {
log.Error("TODO")
return nil, tracerr.Wrap(errTODO)
}
// RollupWithdrawMerkleProof is the interface to call the smart contract function
func (c *Client) RollupWithdrawMerkleProof(babyPubKey babyjub.PublicKeyComp,
tokenID uint32, numExitRoot, idx int64, amount *big.Int, siblings []*big.Int,
instantWithdraw bool) (tx *types.Transaction, err error) {
func (c *Client) RollupWithdrawMerkleProof(babyPubKey babyjub.PublicKeyComp, tokenID uint32, numExitRoot, idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -954,8 +945,7 @@ func (c *Client) newTransaction(name string, value interface{}) *types.Transacti
}
// RollupForgeBatch is the interface to call the smart contract function
func (c *Client) RollupForgeBatch(args *eth.RollupForgeBatchArgs,
auth *bind.TransactOpts) (tx *types.Transaction, err error) {
func (c *Client) RollupForgeBatch(args *eth.RollupForgeBatchArgs, auth *bind.TransactOpts) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -983,8 +973,7 @@ func (c *Client) RollupForgeBatch(args *eth.RollupForgeBatchArgs,
// TODO: If successful, store the tx in a successful array.
// TODO: If failed, store the tx in a failed array.
// TODO: Add method to move the tx to another block, reapply it there, and possibly go from
// successful to failed.
// TODO: Add method to move the tx to another block, reapply it there, and possibly go from successful to failed.
return c.addBatch(args)
}
@@ -1029,8 +1018,7 @@ func (c *Client) addBatch(args *eth.RollupForgeBatchArgs) (*types.Transaction, e
// RollupAddTokenSimple is a wrapper around RollupAddToken that automatically
// sets `deadlie`.
func (c *Client) RollupAddTokenSimple(tokenAddress ethCommon.Address,
feeAddToken *big.Int) (tx *types.Transaction, err error) {
func (c *Client) RollupAddTokenSimple(tokenAddress ethCommon.Address, feeAddToken *big.Int) (tx *types.Transaction, err error) {
return c.RollupAddToken(tokenAddress, feeAddToken, big.NewInt(9999)) //nolint:gomnd
}
@@ -1051,16 +1039,13 @@ func (c *Client) RollupAddToken(tokenAddress ethCommon.Address, feeAddToken *big
return nil, tracerr.Wrap(fmt.Errorf("Token %v already registered", tokenAddress))
}
if feeAddToken.Cmp(r.Vars.FeeAddToken) != 0 {
return nil,
tracerr.Wrap(fmt.Errorf("Expected fee: %v but got: %v",
r.Vars.FeeAddToken, feeAddToken))
return nil, tracerr.Wrap(fmt.Errorf("Expected fee: %v but got: %v", r.Vars.FeeAddToken, feeAddToken))
}
r.State.TokenMap[tokenAddress] = true
r.State.TokenList = append(r.State.TokenList, tokenAddress)
r.Events.AddToken = append(r.Events.AddToken, eth.RollupEventAddToken{
TokenAddress: tokenAddress,
TokenID: uint32(len(r.State.TokenList) - 1)})
r.Events.AddToken = append(r.Events.AddToken, eth.RollupEventAddToken{TokenAddress: tokenAddress,
TokenID: uint32(len(r.State.TokenList) - 1)})
return r.addTransaction(c.newTransaction("addtoken", tokenAddress)), nil
}
@@ -1074,8 +1059,7 @@ func (c *Client) RollupGetCurrentTokens() (*big.Int, error) {
}
// RollupUpdateForgeL1L2BatchTimeout is the interface to call the smart contract function
func (c *Client) RollupUpdateForgeL1L2BatchTimeout(newForgeL1Timeout int64) (tx *types.Transaction,
err error) {
func (c *Client) RollupUpdateForgeL1L2BatchTimeout(newForgeL1Timeout int64) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1094,8 +1078,7 @@ func (c *Client) RollupUpdateForgeL1L2BatchTimeout(newForgeL1Timeout int64) (tx
}
// RollupUpdateFeeAddToken is the interface to call the smart contract function
func (c *Client) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Transaction,
err error) {
func (c *Client) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1109,8 +1092,7 @@ func (c *Client) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Tra
}
// RollupUpdateTokensHEZ is the interface to call the smart contract function
// func (c *Client) RollupUpdateTokensHEZ(newTokenHEZ ethCommon.Address) (tx *types.Transaction,
// err error) {
// func (c *Client) RollupUpdateTokensHEZ(newTokenHEZ ethCommon.Address) (tx *types.Transaction, err error) {
// c.rw.Lock()
// defer c.rw.Unlock()
// cpy := c.nextBlock().copy()
@@ -1121,8 +1103,7 @@ func (c *Client) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Tra
// }
// RollupUpdateGovernance is the interface to call the smart contract function
// func (c *Client) RollupUpdateGovernance() (*types.Transaction, error) {
// // TODO (Not defined in Hermez.sol)
// func (c *Client) RollupUpdateGovernance() (*types.Transaction, error) { // TODO (Not defined in Hermez.sol)
// return nil, errTODO
// }
@@ -1161,10 +1142,8 @@ func (c *Client) RollupEventInit() (*eth.RollupEventInitialize, int64, error) {
}, 1, nil
}
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the Rollup Smart Contract
// in the given transaction
func (c *Client) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
l1UserTxsLen uint16) (*eth.RollupForgeBatchArgs, *ethCommon.Address, error) {
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the Rollup Smart Contract in the given transaction
func (c *Client) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*eth.RollupForgeBatchArgs, *ethCommon.Address, error) {
c.rw.RLock()
defer c.rw.RUnlock()
@@ -1203,8 +1182,7 @@ func (c *Client) AuctionGetSlotDeadline() (uint8, error) {
}
// AuctionSetOpenAuctionSlots is the interface to call the smart contract function
func (c *Client) AuctionSetOpenAuctionSlots(newOpenAuctionSlots uint16) (tx *types.Transaction,
err error) {
func (c *Client) AuctionSetOpenAuctionSlots(newOpenAuctionSlots uint16) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1232,8 +1210,7 @@ func (c *Client) AuctionGetOpenAuctionSlots() (uint16, error) {
}
// AuctionSetClosedAuctionSlots is the interface to call the smart contract function
func (c *Client) AuctionSetClosedAuctionSlots(newClosedAuctionSlots uint16) (tx *types.Transaction,
err error) {
func (c *Client) AuctionSetClosedAuctionSlots(newClosedAuctionSlots uint16) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1279,8 +1256,7 @@ func (c *Client) AuctionGetOutbidding() (uint16, error) {
}
// AuctionSetAllocationRatio is the interface to call the smart contract function
func (c *Client) AuctionSetAllocationRatio(newAllocationRatio [3]uint16) (tx *types.Transaction,
err error) {
func (c *Client) AuctionSetAllocationRatio(newAllocationRatio [3]uint16) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1303,8 +1279,7 @@ func (c *Client) AuctionGetAllocationRatio() ([3]uint16, error) {
}
// AuctionSetDonationAddress is the interface to call the smart contract function
func (c *Client) AuctionSetDonationAddress(
newDonationAddress ethCommon.Address) (tx *types.Transaction, err error) {
func (c *Client) AuctionSetDonationAddress(newDonationAddress ethCommon.Address) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1327,8 +1302,7 @@ func (c *Client) AuctionGetDonationAddress() (*ethCommon.Address, error) {
}
// AuctionSetBootCoordinator is the interface to call the smart contract function
func (c *Client) AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address,
newBootCoordinatorURL string) (tx *types.Transaction, err error) {
func (c *Client) AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, newBootCoordinatorURL string) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1353,8 +1327,7 @@ func (c *Client) AuctionGetBootCoordinator() (*ethCommon.Address, error) {
}
// AuctionChangeDefaultSlotSetBid is the interface to call the smart contract function
func (c *Client) AuctionChangeDefaultSlotSetBid(slotSet int64,
newInitialMinBid *big.Int) (tx *types.Transaction, err error) {
func (c *Client) AuctionChangeDefaultSlotSetBid(slotSet int64, newInitialMinBid *big.Int) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1368,8 +1341,7 @@ func (c *Client) AuctionChangeDefaultSlotSetBid(slotSet int64,
}
// AuctionSetCoordinator is the interface to call the smart contract function
func (c *Client) AuctionSetCoordinator(forger ethCommon.Address,
URL string) (tx *types.Transaction, err error) {
func (c *Client) AuctionSetCoordinator(forger ethCommon.Address, URL string) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1398,8 +1370,7 @@ func (c *Client) AuctionSetCoordinator(forger ethCommon.Address,
ForgerAddress ethCommon.Address
URL string
}
return a.addTransaction(c.newTransaction("registercoordinator", data{*c.addr, forger, URL})),
nil
return a.addTransaction(c.newTransaction("registercoordinator", data{*c.addr, forger, URL})), nil
}
// AuctionIsRegisteredCoordinator is the interface to call the smart contract function
@@ -1412,8 +1383,7 @@ func (c *Client) AuctionIsRegisteredCoordinator(forgerAddress ethCommon.Address)
}
// AuctionUpdateCoordinatorInfo is the interface to call the smart contract function
func (c *Client) AuctionUpdateCoordinatorInfo(forgerAddress ethCommon.Address,
newWithdrawAddress ethCommon.Address, newURL string) (tx *types.Transaction, err error) {
func (c *Client) AuctionUpdateCoordinatorInfo(forgerAddress ethCommon.Address, newWithdrawAddress ethCommon.Address, newURL string) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1473,14 +1443,12 @@ func (c *Client) AuctionGetSlotSet(slot int64) (*big.Int, error) {
}
// AuctionTokensReceived is the interface to call the smart contract function
// func (c *Client) AuctionTokensReceived(operator, from, to ethCommon.Address, amount *big.Int,
// userData, operatorData []byte) error {
// func (c *Client) AuctionTokensReceived(operator, from, to ethCommon.Address, amount *big.Int, userData, operatorData []byte) error {
// return errTODO
// }
// AuctionBidSimple is a wrapper around AuctionBid that automatically sets `amount` and `deadline`.
func (c *Client) AuctionBidSimple(slot int64, bidAmount *big.Int) (tx *types.Transaction,
err error) {
func (c *Client) AuctionBidSimple(slot int64, bidAmount *big.Int) (tx *types.Transaction, err error) {
return c.AuctionBid(bidAmount, slot, bidAmount, big.NewInt(99999)) //nolint:gomnd
}
@@ -1503,8 +1471,7 @@ func (c *Client) AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int,
return nil, tracerr.Wrap(errBidClosed)
}
if slot >
a.getCurrentSlotNumber()+int64(a.Vars.ClosedAuctionSlots)+int64(a.Vars.OpenAuctionSlots) {
if slot > a.getCurrentSlotNumber()+int64(a.Vars.ClosedAuctionSlots)+int64(a.Vars.OpenAuctionSlots) {
return nil, tracerr.Wrap(errBidNotOpen)
}
@@ -1541,8 +1508,8 @@ func (c *Client) AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int,
// AuctionMultiBid is the interface to call the smart contract function. This
// implementation behaves as if any address has infinite tokens.
func (c *Client) AuctionMultiBid(amount *big.Int, startingSlot int64, endingSlot int64,
slotSet [6]bool, maxBid, closedMinBid, deadline *big.Int) (tx *types.Transaction, err error) {
func (c *Client) AuctionMultiBid(amount *big.Int, startingSlot int64, endingSlot int64, slotSet [6]bool,
maxBid, closedMinBid, deadline *big.Int) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1656,8 +1623,7 @@ func (c *Client) WDelayerGetHermezGovernanceAddress() (*ethCommon.Address, error
}
// WDelayerTransferGovernance is the interface to call the smart contract function
func (c *Client) WDelayerTransferGovernance(newAddress ethCommon.Address) (tx *types.Transaction,
err error) {
func (c *Client) WDelayerTransferGovernance(newAddress ethCommon.Address) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1694,8 +1660,7 @@ func (c *Client) WDelayerGetEmergencyCouncil() (*ethCommon.Address, error) {
}
// WDelayerTransferEmergencyCouncil is the interface to call the smart contract function
func (c *Client) WDelayerTransferEmergencyCouncil(newAddress ethCommon.Address) (
tx *types.Transaction, err error) {
func (c *Client) WDelayerTransferEmergencyCouncil(newAddress ethCommon.Address) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1764,8 +1729,7 @@ func (c *Client) WDelayerEnableEmergencyMode() (tx *types.Transaction, err error
}
// WDelayerChangeWithdrawalDelay is the interface to call the smart contract function
func (c *Client) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64) (tx *types.Transaction,
err error) {
func (c *Client) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1793,8 +1757,7 @@ func (c *Client) WDelayerDepositInfo(owner, token ethCommon.Address) (eth.Deposi
}
// WDelayerDeposit is the interface to call the smart contract function
func (c *Client) WDelayerDeposit(onwer, token ethCommon.Address, amount *big.Int) (
tx *types.Transaction, err error) {
func (c *Client) WDelayerDeposit(onwer, token ethCommon.Address, amount *big.Int) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1808,8 +1771,7 @@ func (c *Client) WDelayerDeposit(onwer, token ethCommon.Address, amount *big.Int
}
// WDelayerWithdrawal is the interface to call the smart contract function
func (c *Client) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.Transaction,
err error) {
func (c *Client) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1823,8 +1785,7 @@ func (c *Client) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.T
}
// WDelayerEscapeHatchWithdrawal is the interface to call the smart contract function
func (c *Client) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (
tx *types.Transaction, err error) {
func (c *Client) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (tx *types.Transaction, err error) {
c.rw.Lock()
defer c.rw.Unlock()
cpy := c.nextBlock().copy()
@@ -1882,31 +1843,25 @@ func (c *Client) CtlAddBlocks(blocks []common.BlockData) (err error) {
rollup := nextBlock.Rollup
auction := nextBlock.Auction
for _, token := range block.Rollup.AddedTokens {
if _, err := c.RollupAddTokenSimple(token.EthAddr,
rollup.Vars.FeeAddToken); err != nil {
if _, err := c.RollupAddTokenSimple(token.EthAddr, rollup.Vars.FeeAddToken); err != nil {
return tracerr.Wrap(err)
}
}
for _, tx := range block.Rollup.L1UserTxs {
c.CtlSetAddr(tx.FromEthAddr)
if _, err := c.RollupL1UserTxERC20ETH(tx.FromBJJ, int64(tx.FromIdx),
tx.DepositAmount, tx.Amount, uint32(tx.TokenID),
int64(tx.ToIdx)); err != nil {
if _, err := c.RollupL1UserTxERC20ETH(tx.FromBJJ, int64(tx.FromIdx), tx.DepositAmount, tx.Amount,
uint32(tx.TokenID), int64(tx.ToIdx)); err != nil {
return tracerr.Wrap(err)
}
}
c.CtlSetAddr(auction.Vars.BootCoordinator)
for _, batch := range block.Rollup.Batches {
auths := make([][]byte, len(batch.L1CoordinatorTxs))
for i := range auths {
auths[i] = make([]byte, 65)
}
if _, err := c.RollupForgeBatch(&eth.RollupForgeBatchArgs{
NewLastIdx: batch.Batch.LastIdx,
NewStRoot: batch.Batch.StateRoot,
NewExitRoot: batch.Batch.ExitRoot,
L1CoordinatorTxs: batch.L1CoordinatorTxs,
L1CoordinatorTxsAuths: auths,
L1CoordinatorTxsAuths: [][]byte{}, // Intentionally empty
L2TxsData: batch.L2Txs,
FeeIdxCoordinator: batch.Batch.FeeIdxsCoordinator,
// Circuit selector

View File

@@ -49,9 +49,7 @@ func TestClientEth(t *testing.T) {
require.Nil(t, err)
assert.Equal(t, int64(0), block.Num)
assert.Equal(t, time.Unix(0, 0), block.Timestamp)
assert.Equal(t,
"0x0000000000000000000000000000000000000000000000000000000000000000",
block.Hash.Hex())
assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000000", block.Hash.Hex())
// Mine some empty blocks

View File

@@ -40,8 +40,7 @@ var EthToken common.Token = common.Token{
// WARNING: the generators in this file doesn't necessary follow the protocol
// they are intended to check that the parsers between struct <==> DB are correct
// GenBlocks generates block from, to block numbers. WARNING: This is meant for DB/API testing, and
// may not be fully consistent with the protocol.
// GenBlocks generates block from, to block numbers. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenBlocks(from, to int64) []common.Block {
var blocks []common.Block
for i := from; i < to; i++ {
@@ -55,10 +54,8 @@ func GenBlocks(from, to int64) []common.Block {
return blocks
}
// GenTokens generates tokens. WARNING: This is meant for DB/API testing, and may not be fully
// consistent with the protocol.
func GenTokens(nTokens int, blocks []common.Block) (tokensToAddInDB []common.Token,
ethToken common.Token) {
// GenTokens generates tokens. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenTokens(nTokens int, blocks []common.Block) (tokensToAddInDB []common.Token, ethToken common.Token) {
tokensToAddInDB = []common.Token{}
for i := 1; i < nTokens; i++ {
token := common.Token{
@@ -81,8 +78,7 @@ func GenTokens(nTokens int, blocks []common.Block) (tokensToAddInDB []common.Tok
}
}
// GenBatches generates batches. WARNING: This is meant for DB/API testing, and may not be fully
// consistent with the protocol.
// GenBatches generates batches. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenBatches(nBatches int, blocks []common.Block) []common.Batch {
batches := []common.Batch{}
collectedFees := make(map[common.TokenID]*big.Int)
@@ -112,10 +108,8 @@ func GenBatches(nBatches int, blocks []common.Block) []common.Batch {
return batches
}
// GenAccounts generates accounts. WARNING: This is meant for DB/API testing, and may not be fully
// consistent with the protocol.
func GenAccounts(totalAccounts, userAccounts int, tokens []common.Token,
userAddr *ethCommon.Address, userBjj *babyjub.PublicKey, batches []common.Batch) []common.Account {
// GenAccounts generates accounts. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenAccounts(totalAccounts, userAccounts int, tokens []common.Token, userAddr *ethCommon.Address, userBjj *babyjub.PublicKey, batches []common.Batch) []common.Account {
if totalAccounts < userAccounts {
panic("totalAccounts must be greater than userAccounts")
}
@@ -143,8 +137,7 @@ func GenAccounts(totalAccounts, userAccounts int, tokens []common.Token,
return accs
}
// GenL1Txs generates L1 txs. WARNING: This is meant for DB/API testing, and may not be fully
// consistent with the protocol.
// GenL1Txs generates L1 txs. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenL1Txs(
fromIdx int,
totalTxs, nUserTxs int,
@@ -270,8 +263,7 @@ func setFromToAndAppend(
}
}
// GenL2Txs generates L2 txs. WARNING: This is meant for DB/API testing, and may not be fully
// consistent with the protocol.
// GenL2Txs generates L2 txs. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenL2Txs(
fromIdx int,
totalTxs, nUserTxs int,
@@ -290,9 +282,7 @@ func GenL2Txs(
amount := big.NewInt(int64(i + 1))
fee := common.FeeSelector(i % 256) //nolint:gomnd
tx := common.L2Tx{
// only for testing purposes
TxID: common.TxID([common.TxIDLen]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i)}),
TxID: common.TxID([common.TxIDLen]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i)}), // only for testing purposes
BatchNum: batches[i%len(batches)].BatchNum,
Position: i - fromIdx,
Amount: amount,
@@ -347,8 +337,7 @@ func GenL2Txs(
return userTxs, othersTxs
}
// GenCoordinators generates coordinators. WARNING: This is meant for DB/API testing, and may not be
// fully consistent with the protocol.
// GenCoordinators generates coordinators. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenCoordinators(nCoords int, blocks []common.Block) []common.Coordinator {
coords := []common.Coordinator{}
for i := 0; i < nCoords; i++ {
@@ -362,8 +351,7 @@ func GenCoordinators(nCoords int, blocks []common.Block) []common.Coordinator {
return coords
}
// GenBids generates bids. WARNING: This is meant for DB/API testing, and may not be fully
// consistent with the protocol.
// GenBids generates bids. WARNING: This is meant for DB/API testing, and may not be fully consistent with the protocol.
func GenBids(nBids int, blocks []common.Block, coords []common.Coordinator) []common.Bid {
bids := []common.Bid{}
for i := 0; i < nBids*2; i = i + 2 { //nolint:gomnd
@@ -385,8 +373,7 @@ func GenBids(nBids int, blocks []common.Block, coords []common.Coordinator) []co
// GenExitTree generates an exitTree (as an array of Exits)
//nolint:gomnd
func GenExitTree(n int, batches []common.Batch, accounts []common.Account,
blocks []common.Block) []common.ExitInfo {
func GenExitTree(n int, batches []common.Batch, accounts []common.Account, blocks []common.Block) []common.ExitInfo {
exitTree := make([]common.ExitInfo, n)
for i := 0; i < n; i++ {
exitTree[i] = common.ExitInfo{
@@ -425,8 +412,7 @@ func GenExitTree(n int, batches []common.Batch, accounts []common.Account,
return exitTree
}
func randomAccount(seed int, userAccount bool, userAddr *ethCommon.Address,
accs []common.Account) (*common.Account, error) {
func randomAccount(seed int, userAccount bool, userAddr *ethCommon.Address, accs []common.Account) (*common.Account, error) {
i := seed % len(accs)
firstI := i
for {

View File

@@ -67,8 +67,7 @@ func GenPoolTxs(n int, tokens []common.Token) []*common.PoolL2Tx {
}
// GenAuths generates account creation authorizations
func GenAuths(nAuths int, chainID uint16,
hermezContractAddr ethCommon.Address) []*common.AccountCreationAuth {
func GenAuths(nAuths int, chainID uint16, hermezContractAddr ethCommon.Address) []*common.AccountCreationAuth {
auths := []*common.AccountCreationAuth{}
for i := 0; i < nAuths; i++ {
// Generate keys

View File

@@ -68,7 +68,6 @@ func (s *Mock) handleCancel(c *gin.Context) {
c.JSON(http.StatusOK, "OK")
}
//nolint:lll
/* Status example from the real server proof:
Status:

View File

@@ -310,8 +310,7 @@ func (p *parser) parseLine(setType setType) (*Instruction, error) {
} else if lit == "PoolL2" {
return &Instruction{Typ: "PoolL2"}, setTypeLine
} else {
return c,
tracerr.Wrap(fmt.Errorf("Invalid set type: '%s'. Valid set types: 'Blockchain', 'PoolL2'", lit))
return c, tracerr.Wrap(fmt.Errorf("Invalid set type: '%s'. Valid set types: 'Blockchain', 'PoolL2'", lit))
}
} else if lit == "AddToken" {
if err := p.expectChar(c, "("); err != nil {
@@ -392,9 +391,7 @@ func (p *parser) parseLine(setType setType) (*Instruction, error) {
return c, tracerr.Wrap(fmt.Errorf("Unexpected PoolL2 tx type: %s", lit))
}
} else {
return c,
tracerr.Wrap(fmt.Errorf("Invalid set type: '%s'. Valid set types: 'Blockchain', 'PoolL2'",
setType))
return c, tracerr.Wrap(fmt.Errorf("Invalid set type: '%s'. Valid set types: 'Blockchain', 'PoolL2'", setType))
}
if err := p.expectChar(c, "("); err != nil {
@@ -525,18 +522,14 @@ func (p *parser) parse() (*parsedSet, error) {
}
if tracerr.Unwrap(err) == setTypeLine {
if ps.typ != "" {
return ps,
tracerr.Wrap(fmt.Errorf("Line %d: Instruction of 'Type: %s' when "+
"there is already a previous instruction 'Type: %s' defined",
i, instruction.Typ, ps.typ))
return ps, tracerr.Wrap(fmt.Errorf("Line %d: Instruction of 'Type: %s' when there is already a previous instruction 'Type: %s' defined", i, instruction.Typ, ps.typ))
}
if instruction.Typ == "PoolL2" {
ps.typ = SetTypePoolL2
} else if instruction.Typ == "Blockchain" {
ps.typ = SetTypeBlockchain
} else {
log.Fatalf("Line %d: Invalid set type: '%s'. Valid set types: "+
"'Blockchain', 'PoolL2'", i, instruction.Typ)
log.Fatalf("Line %d: Invalid set type: '%s'. Valid set types: 'Blockchain', 'PoolL2'", i, instruction.Typ)
}
continue
}
@@ -559,9 +552,7 @@ func (p *parser) parse() (*parsedSet, error) {
}
ps.instructions = append(ps.instructions, *instruction)
users[instruction.From] = true
if instruction.Typ == common.TxTypeTransfer ||
instruction.Typ == common.TxTypeTransferToEthAddr ||
instruction.Typ == common.TxTypeTransferToBJJ { // type: Transfer
if instruction.Typ == common.TxTypeTransfer || instruction.Typ == common.TxTypeTransferToEthAddr || instruction.Typ == common.TxTypeTransferToBJJ { // type: Transfer
users[instruction.To] = true
}
}

View File

@@ -72,19 +72,12 @@ func TestParseBlockchainTxs(t *testing.T) {
assert.Equal(t, TxTypeCreateAccountDepositCoordinator, instructions.instructions[7].Typ)
assert.Equal(t, TypeNewBatch, instructions.instructions[11].Typ)
assert.Equal(t, "Deposit(1)User0:20", instructions.instructions[16].raw())
assert.Equal(t,
"Type: DepositTransfer, From: A, To: B, DepositAmount: 15, Amount: 10, Fee: 0, TokenID: 1\n",
instructions.instructions[13].String())
assert.Equal(t,
"Type: Transfer, From: User1, To: User0, Amount: 15, Fee: 1, TokenID: 3\n",
instructions.instructions[19].String())
assert.Equal(t, "Type: DepositTransfer, From: A, To: B, DepositAmount: 15, Amount: 10, Fee: 0, TokenID: 1\n", instructions.instructions[13].String())
assert.Equal(t, "Type: Transfer, From: User1, To: User0, Amount: 15, Fee: 1, TokenID: 3\n", instructions.instructions[19].String())
assert.Equal(t, "Transfer(2)A-B:15(1)", instructions.instructions[15].raw())
assert.Equal(t,
"Type: Transfer, From: A, To: B, Amount: 15, Fee: 1, TokenID: 2\n",
instructions.instructions[15].String())
assert.Equal(t, "Type: Transfer, From: A, To: B, Amount: 15, Fee: 1, TokenID: 2\n", instructions.instructions[15].String())
assert.Equal(t, "Exit(1)A:5", instructions.instructions[24].raw())
assert.Equal(t, "Type: Exit, From: A, Amount: 5, TokenID: 1\n",
instructions.instructions[24].String())
assert.Equal(t, "Type: Exit, From: A, Amount: 5, TokenID: 1\n", instructions.instructions[24].String())
}
func TestParsePoolTxs(t *testing.T) {
@@ -165,9 +158,7 @@ func TestParseErrors(t *testing.T) {
`
parser = newParser(strings.NewReader(s))
_, err = parser.parse()
assert.Equal(t,
"Line 2: Transfer(1)A-B:10(256)\n, err: Fee 256 can not be bigger than 255",
err.Error())
assert.Equal(t, "Line 2: Transfer(1)A-B:10(256)\n, err: Fee 256 can not be bigger than 255", err.Error())
// check that the PoolTransfer & Transfer are only accepted in the
// correct case case (PoolTxs/BlockchainTxs)
@@ -184,9 +175,7 @@ func TestParseErrors(t *testing.T) {
`
parser = newParser(strings.NewReader(s))
_, err = parser.parse()
assert.Equal(t,
"Line 2: PoolTransfer, err: Unexpected Blockchain tx type: PoolTransfer",
err.Error())
assert.Equal(t, "Line 2: PoolTransfer, err: Unexpected Blockchain tx type: PoolTransfer", err.Error())
s = `
Type: Blockchain
@@ -194,9 +183,7 @@ func TestParseErrors(t *testing.T) {
`
parser = newParser(strings.NewReader(s))
_, err = parser.parse()
assert.Equal(t,
"Line 2: >, err: Unexpected '> btch', expected '> batch' or '> block'",
err.Error())
assert.Equal(t, "Line 2: >, err: Unexpected '> btch', expected '> batch' or '> block'", err.Error())
// check definition of set Type
s = `PoolTransfer(1) A-B: 10 (1)`
@@ -206,23 +193,17 @@ func TestParseErrors(t *testing.T) {
s = `Type: PoolL1`
parser = newParser(strings.NewReader(s))
_, err = parser.parse()
assert.Equal(t,
"Line 1: Type:, err: Invalid set type: 'PoolL1'. Valid set types: 'Blockchain', 'PoolL2'",
err.Error())
assert.Equal(t, "Line 1: Type:, err: Invalid set type: 'PoolL1'. Valid set types: 'Blockchain', 'PoolL2'", err.Error())
s = `Type: PoolL1
Type: Blockchain`
parser = newParser(strings.NewReader(s))
_, err = parser.parse()
assert.Equal(t,
"Line 1: Type:, err: Invalid set type: 'PoolL1'. Valid set types: 'Blockchain', 'PoolL2'",
err.Error())
assert.Equal(t, "Line 1: Type:, err: Invalid set type: 'PoolL1'. Valid set types: 'Blockchain', 'PoolL2'", err.Error())
s = `Type: PoolL2
Type: Blockchain`
parser = newParser(strings.NewReader(s))
_, err = parser.parse()
assert.Equal(t,
"Line 2: Instruction of 'Type: Blockchain' when there is already a previous "+
"instruction 'Type: PoolL2' defined", err.Error())
assert.Equal(t, "Line 2: Instruction of 'Type: Blockchain' when there is already a previous instruction 'Type: PoolL2' defined", err.Error())
s = `Type: Blockchain
AddToken(1)

View File

@@ -161,9 +161,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) {
return nil, tracerr.Wrap(err)
}
if parsedSet.typ != SetTypeBlockchain {
return nil,
tracerr.Wrap(fmt.Errorf("Expected set type: %s, found: %s",
SetTypeBlockchain, parsedSet.typ))
return nil, tracerr.Wrap(fmt.Errorf("Expected set type: %s, found: %s", SetTypeBlockchain, parsedSet.typ))
}
tc.instructions = parsedSet.instructions
@@ -211,9 +209,7 @@ func (tc *Context) generateBlocks() ([]common.BlockData, error) {
TokenID: inst.TokenID,
Amount: big.NewInt(0),
DepositAmount: big.NewInt(0),
// as TxTypeCreateAccountDepositCoordinator is
// not valid oustide Til package
Type: common.TxTypeCreateAccountDeposit,
Type: common.TxTypeCreateAccountDeposit, // as TxTypeCreateAccountDepositCoordinator is not valid oustide Til package
}
testTx := L1Tx{
lineNum: inst.LineNum,
@@ -222,8 +218,7 @@ func (tc *Context) generateBlocks() ([]common.BlockData, error) {
}
tc.currBatchTest.l1CoordinatorTxs = append(tc.currBatchTest.l1CoordinatorTxs, testTx)
case common.TxTypeCreateAccountDeposit, common.TxTypeCreateAccountDepositTransfer:
// tx source: L1UserTx
case common.TxTypeCreateAccountDeposit, common.TxTypeCreateAccountDepositTransfer: // tx source: L1UserTx
if err := tc.checkIfTokenIsRegistered(inst); err != nil {
log.Error(err)
return nil, tracerr.Wrap(fmt.Errorf("Line %d: %s", inst.LineNum, err.Error()))
@@ -286,8 +281,7 @@ func (tc *Context) generateBlocks() ([]common.BlockData, error) {
Type: common.TxTypeTransfer,
EthBlockNum: tc.blockNum,
}
// when converted to PoolL2Tx BatchNum parameter is lost
tx.BatchNum = common.BatchNum(tc.currBatchNum)
tx.BatchNum = common.BatchNum(tc.currBatchNum) // when converted to PoolL2Tx BatchNum parameter is lost
testTx := L2Tx{
lineNum: inst.LineNum,
fromIdxName: inst.From,
@@ -328,8 +322,7 @@ func (tc *Context) generateBlocks() ([]common.BlockData, error) {
Type: common.TxTypeExit,
EthBlockNum: tc.blockNum,
}
// when converted to PoolL2Tx BatchNum parameter is lost
tx.BatchNum = common.BatchNum(tc.currBatchNum)
tx.BatchNum = common.BatchNum(tc.currBatchNum) // when converted to PoolL2Tx BatchNum parameter is lost
testTx := L2Tx{
lineNum: inst.LineNum,
fromIdxName: inst.From,
@@ -402,10 +395,7 @@ func (tc *Context) generateBlocks() ([]common.BlockData, error) {
EthBlockNum: tc.blockNum,
}
if inst.TokenID != tc.LastRegisteredTokenID+1 {
return nil,
tracerr.Wrap(fmt.Errorf("Line %d: AddToken TokenID should be "+
"sequential, expected TokenID: %d, defined TokenID: %d",
inst.LineNum, tc.LastRegisteredTokenID+1, inst.TokenID))
return nil, tracerr.Wrap(fmt.Errorf("Line %d: AddToken TokenID should be sequential, expected TokenID: %d, defined TokenID: %d", inst.LineNum, tc.LastRegisteredTokenID+1, inst.TokenID))
}
tc.LastRegisteredTokenID++
tc.currBlock.Rollup.AddedTokens = append(tc.currBlock.Rollup.AddedTokens, newToken)
@@ -423,13 +413,9 @@ func (tc *Context) calculateIdxForL1Txs(isCoordinatorTxs bool, txs []L1Tx) error
// for each batch.L1CoordinatorTxs of the Queues[ToForgeNum], calculate the Idx
for i := 0; i < len(txs); i++ {
tx := txs[i]
if tx.L1Tx.Type == common.TxTypeCreateAccountDeposit ||
tx.L1Tx.Type == common.TxTypeCreateAccountDepositTransfer {
if tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID] != nil {
// if account already exists, return error
return tracerr.Wrap(fmt.Errorf("Can not create same account twice "+
"(same User (%s) & same TokenID (%d)) (this is a design property of Til)",
tx.fromIdxName, tx.L1Tx.TokenID))
if tx.L1Tx.Type == common.TxTypeCreateAccountDeposit || tx.L1Tx.Type == common.TxTypeCreateAccountDepositTransfer {
if tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID] != nil { // if account already exists, return error
return tracerr.Wrap(fmt.Errorf("Can not create same account twice (same User (%s) & same TokenID (%d)) (this is a design property of Til)", tx.fromIdxName, tx.L1Tx.TokenID))
}
tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID] = &Account{
Idx: common.Idx(tc.idx),
@@ -437,8 +423,7 @@ func (tc *Context) calculateIdxForL1Txs(isCoordinatorTxs bool, txs []L1Tx) error
Nonce: common.Nonce(0),
BatchNum: tc.currBatchNum,
}
tc.l1CreatedAccounts[idxTokenIDToString(tx.fromIdxName, tx.L1Tx.TokenID)] =
tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID]
tc.l1CreatedAccounts[idxTokenIDToString(tx.fromIdxName, tx.L1Tx.TokenID)] = tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID]
tc.accountsByIdx[tc.idx] = tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID]
tc.UsersByIdx[tc.idx] = tc.Users[tx.fromIdxName]
tc.idx++
@@ -457,15 +442,11 @@ func (tc *Context) setIdxs() error {
testTx := &tc.currBatchTest.l2Txs[i]
if tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID] == nil {
return tracerr.Wrap(fmt.Errorf("Line %d: %s from User %s for TokenID %d "+
"while account not created yet",
testTx.lineNum, testTx.L2Tx.Type, testTx.fromIdxName, testTx.tokenID))
return tracerr.Wrap(fmt.Errorf("Line %d: %s from User %s for TokenID %d while account not created yet", testTx.lineNum, testTx.L2Tx.Type, testTx.fromIdxName, testTx.tokenID))
}
if testTx.L2Tx.Type == common.TxTypeTransfer {
if _, ok := tc.l1CreatedAccounts[idxTokenIDToString(testTx.toIdxName, testTx.tokenID)]; !ok {
return tracerr.Wrap(fmt.Errorf("Line %d: Can not create Transfer for a non "+
"existing account. Batch %d, ToIdx name: %s, TokenID: %d",
testTx.lineNum, tc.currBatchNum, testTx.toIdxName, testTx.tokenID))
return tracerr.Wrap(fmt.Errorf("Line %d: Can not create Transfer for a non existing account. Batch %d, ToIdx name: %s, TokenID: %d", testTx.lineNum, tc.currBatchNum, testTx.toIdxName, testTx.tokenID))
}
}
tc.Users[testTx.fromIdxName].Accounts[testTx.tokenID].Nonce++
@@ -517,8 +498,7 @@ func (tc *Context) addToL1UserQueue(tx L1Tx) error {
tx.L1Tx.Position = len(tc.Queues[tc.openToForge])
// When an L1UserTx is generated, all idxs must be available (except when idx == 0 or idx == 1)
if tx.L1Tx.Type != common.TxTypeCreateAccountDeposit &&
tx.L1Tx.Type != common.TxTypeCreateAccountDepositTransfer {
if tx.L1Tx.Type != common.TxTypeCreateAccountDeposit && tx.L1Tx.Type != common.TxTypeCreateAccountDepositTransfer {
tx.L1Tx.FromIdx = tc.Users[tx.fromIdxName].Accounts[tx.L1Tx.TokenID].Idx
}
tx.L1Tx.FromEthAddr = tc.Users[tx.fromIdxName].Addr
@@ -550,16 +530,14 @@ func (tc *Context) addToL1UserQueue(tx L1Tx) error {
func (tc *Context) checkIfAccountExists(tf string, inst Instruction) error {
if tc.Users[tf].Accounts[inst.TokenID] == nil {
return tracerr.Wrap(fmt.Errorf("%s at User: %s, for TokenID: %d, while account not created yet",
inst.Typ, tf, inst.TokenID))
return tracerr.Wrap(fmt.Errorf("%s at User: %s, for TokenID: %d, while account not created yet", inst.Typ, tf, inst.TokenID))
}
return nil
}
func (tc *Context) checkIfTokenIsRegistered(inst Instruction) error {
if inst.TokenID > tc.LastRegisteredTokenID {
return tracerr.Wrap(fmt.Errorf("Can not process %s: TokenID %d not registered, "+
"last registered TokenID: %d", inst.Typ, inst.TokenID, tc.LastRegisteredTokenID))
return tracerr.Wrap(fmt.Errorf("Can not process %s: TokenID %d not registered, last registered TokenID: %d", inst.Typ, inst.TokenID, tc.LastRegisteredTokenID))
}
return nil
}
@@ -573,8 +551,7 @@ func (tc *Context) GeneratePoolL2Txs(set string) ([]common.PoolL2Tx, error) {
return nil, tracerr.Wrap(err)
}
if parsedSet.typ != SetTypePoolL2 {
return nil, tracerr.Wrap(fmt.Errorf("Expected set type: %s, found: %s",
SetTypePoolL2, parsedSet.typ))
return nil, tracerr.Wrap(fmt.Errorf("Expected set type: %s, found: %s", SetTypePoolL2, parsedSet.typ))
}
tc.instructions = parsedSet.instructions
@@ -691,9 +668,7 @@ func (tc *Context) generatePoolL2Txs() ([]common.PoolL2Tx, error) {
tx.Signature = sig.Compress()
txs = append(txs, tx)
default:
return nil,
tracerr.Wrap(fmt.Errorf("Line %d: instruction type unrecognized: %s",
inst.LineNum, inst.Typ))
return nil, tracerr.Wrap(fmt.Errorf("Line %d: instruction type unrecognized: %s", inst.LineNum, inst.Typ))
}
}

View File

@@ -119,66 +119,49 @@ func TestGenerateBlocks(t *testing.T) {
// Check expected values generated by each line
// #0: Deposit(1) A: 10
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[0], common.TxTypeCreateAccountDeposit, 1,
"A", "", big.NewInt(10), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[0], common.TxTypeCreateAccountDeposit, 1, "A", "", big.NewInt(10), nil)
// #1: Deposit(2) A: 20
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[1], common.TxTypeCreateAccountDeposit, 2,
"A", "", big.NewInt(20), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[1], common.TxTypeCreateAccountDeposit, 2, "A", "", big.NewInt(20), nil)
// // #2: Deposit(1) A: 20
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[2], common.TxTypeCreateAccountDeposit, 1,
"B", "", big.NewInt(5), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[2], common.TxTypeCreateAccountDeposit, 1, "B", "", big.NewInt(5), nil)
// // #3: CreateAccountDeposit(1) C: 5
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[3], common.TxTypeCreateAccountDeposit, 1,
"C", "", big.NewInt(5), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[3], common.TxTypeCreateAccountDeposit, 1, "C", "", big.NewInt(5), nil)
// // #4: CreateAccountDeposit(1) D: 5
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[4], common.TxTypeCreateAccountDeposit, 1,
"D", "", big.NewInt(5), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[4], common.TxTypeCreateAccountDeposit, 1, "D", "", big.NewInt(5), nil)
// #5: Transfer(1) A-B: 6 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[2].L2Txs[0], common.TxTypeTransfer, 1, "A",
"B", big.NewInt(6), common.BatchNum(3))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[2].L2Txs[0], common.TxTypeTransfer, 1, "A", "B", big.NewInt(6), common.BatchNum(3))
// #6: Transfer(1) B-D: 3 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[2].L2Txs[1], common.TxTypeTransfer, 1, "B",
"D", big.NewInt(3), common.BatchNum(3))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[2].L2Txs[1], common.TxTypeTransfer, 1, "B", "D", big.NewInt(3), common.BatchNum(3))
// #7: Transfer(1) A-D: 1 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[2].L2Txs[2], common.TxTypeTransfer, 1, "A",
"D", big.NewInt(1), common.BatchNum(3))
// change of Batch #8: CreateAccountDepositTransfer(1) F-A: 15, 10 (3)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[5],
common.TxTypeCreateAccountDepositTransfer, 1, "F", "A", big.NewInt(15), big.NewInt(10))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[2].L2Txs[2], common.TxTypeTransfer, 1, "A", "D", big.NewInt(1), common.BatchNum(3))
// change of Batch
// #8: CreateAccountDepositTransfer(1) F-A: 15, 10 (3)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[5], common.TxTypeCreateAccountDepositTransfer, 1, "F", "A", big.NewInt(15), big.NewInt(10))
// #9: DepositTransfer(1) A-B: 15, 10 (1)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[6], common.TxTypeDepositTransfer, 1, "A",
"B", big.NewInt(15), big.NewInt(10))
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[6], common.TxTypeDepositTransfer, 1, "A", "B", big.NewInt(15), big.NewInt(10))
// #11: Transfer(1) C-A : 3 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[3].L2Txs[0], common.TxTypeTransfer, 1, "C",
"A", big.NewInt(3), common.BatchNum(4))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[3].L2Txs[0], common.TxTypeTransfer, 1, "C", "A", big.NewInt(3), common.BatchNum(4))
// #12: Transfer(2) A-B: 15 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[3].L2Txs[1], common.TxTypeTransfer, 2, "A",
"B", big.NewInt(15), common.BatchNum(4))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[3].L2Txs[1], common.TxTypeTransfer, 2, "A", "B", big.NewInt(15), common.BatchNum(4))
// #13: Deposit(1) User0: 20
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[7], common.TxTypeCreateAccountDeposit, 1,
"User0", "", big.NewInt(20), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[7], common.TxTypeCreateAccountDeposit, 1, "User0", "", big.NewInt(20), nil)
// // #14: Deposit(3) User1: 20
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[8], common.TxTypeCreateAccountDeposit, 3,
"User1", "", big.NewInt(20), nil)
tc.checkL1TxParams(t, blocks[0].Rollup.L1UserTxs[8], common.TxTypeCreateAccountDeposit, 3, "User1", "", big.NewInt(20), nil)
// #15: Transfer(1) User0-User1: 15 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[4].L2Txs[0], common.TxTypeTransfer, 1,
"User0", "User1", big.NewInt(15), common.BatchNum(5))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[4].L2Txs[0], common.TxTypeTransfer, 1, "User0", "User1", big.NewInt(15), common.BatchNum(5))
// #16: Transfer(3) User1-User0: 15 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[4].L2Txs[1], common.TxTypeTransfer, 3,
"User1", "User0", big.NewInt(15), common.BatchNum(5))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[4].L2Txs[1], common.TxTypeTransfer, 3, "User1", "User0", big.NewInt(15), common.BatchNum(5))
// #17: Transfer(1) A-C: 1 (1)
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[4].L2Txs[2], common.TxTypeTransfer, 1, "A",
"C", big.NewInt(1), common.BatchNum(5))
// change of Batch #18: Transfer(1) User1-User0: 1 (1)
tc.checkL2TxParams(t, blocks[1].Rollup.Batches[0].L2Txs[0], common.TxTypeTransfer, 1,
"User1", "User0", big.NewInt(1), common.BatchNum(6))
// change of Block (implies also a change of batch) #19: Transfer(1) A-B: 1 (1)
tc.checkL2TxParams(t, blocks[1].Rollup.Batches[0].L2Txs[1], common.TxTypeTransfer, 1, "A",
"B", big.NewInt(1), common.BatchNum(6))
tc.checkL2TxParams(t, blocks[0].Rollup.Batches[4].L2Txs[2], common.TxTypeTransfer, 1, "A", "C", big.NewInt(1), common.BatchNum(5))
// change of Batch
// #18: Transfer(1) User1-User0: 1 (1)
tc.checkL2TxParams(t, blocks[1].Rollup.Batches[0].L2Txs[0], common.TxTypeTransfer, 1, "User1", "User0", big.NewInt(1), common.BatchNum(6))
// change of Block (implies also a change of batch)
// #19: Transfer(1) A-B: 1 (1)
tc.checkL2TxParams(t, blocks[1].Rollup.Batches[0].L2Txs[1], common.TxTypeTransfer, 1, "A", "B", big.NewInt(1), common.BatchNum(6))
}
func (tc *Context) checkL1TxParams(t *testing.T, tx common.L1Tx, typ common.TxType,
tokenID common.TokenID, from, to string, depositAmount, amount *big.Int) {
func (tc *Context) checkL1TxParams(t *testing.T, tx common.L1Tx, typ common.TxType, tokenID common.TokenID, from, to string, depositAmount, amount *big.Int) {
assert.Equal(t, typ, tx.Type)
if tx.FromIdx != common.Idx(0) {
assert.Equal(t, tc.Users[from].Accounts[tokenID].Idx, tx.FromIdx)
@@ -196,8 +179,7 @@ func (tc *Context) checkL1TxParams(t *testing.T, tx common.L1Tx, typ common.TxTy
}
}
func (tc *Context) checkL2TxParams(t *testing.T, tx common.L2Tx, typ common.TxType,
tokenID common.TokenID, from, to string, amount *big.Int, batchNum common.BatchNum) {
func (tc *Context) checkL2TxParams(t *testing.T, tx common.L2Tx, typ common.TxType, tokenID common.TokenID, from, to string, amount *big.Int, batchNum common.BatchNum) {
assert.Equal(t, typ, tx.Type)
assert.Equal(t, tc.Users[from].Accounts[tokenID].Idx, tx.FromIdx)
if tx.Type != common.TxTypeExit {
@@ -385,9 +367,7 @@ func TestGenerateErrors(t *testing.T) {
`
tc := NewContext(0, common.RollupConstMaxL1UserTx)
_, err := tc.GenerateBlocks(set)
assert.Equal(t,
"Line 2: Can not process CreateAccountDeposit: TokenID 1 not registered, "+
"last registered TokenID: 0", err.Error())
assert.Equal(t, "Line 2: Can not process CreateAccountDeposit: TokenID 1 not registered, last registered TokenID: 0", err.Error())
// ensure AddToken sequentiality and not using 0
set = `
@@ -404,8 +384,7 @@ func TestGenerateErrors(t *testing.T) {
`
tc = NewContext(0, common.RollupConstMaxL1UserTx)
_, err = tc.GenerateBlocks(set)
require.Equal(t, "Line 2: AddToken TokenID should be sequential, expected TokenID: "+
"1, defined TokenID: 2", err.Error())
require.Equal(t, "Line 2: AddToken TokenID should be sequential, expected TokenID: 1, defined TokenID: 2", err.Error())
set = `
Type: Blockchain
@@ -416,8 +395,7 @@ func TestGenerateErrors(t *testing.T) {
`
tc = NewContext(0, common.RollupConstMaxL1UserTx)
_, err = tc.GenerateBlocks(set)
require.Equal(t, "Line 5: AddToken TokenID should be sequential, expected TokenID: "+
"4, defined TokenID: 5", err.Error())
require.Equal(t, "Line 5: AddToken TokenID should be sequential, expected TokenID: 4, defined TokenID: 5", err.Error())
// check transactions when account is not created yet
set = `
@@ -431,8 +409,7 @@ func TestGenerateErrors(t *testing.T) {
`
tc = NewContext(0, common.RollupConstMaxL1UserTx)
_, err = tc.GenerateBlocks(set)
require.Equal(t, "Line 5: CreateAccountDeposit(1)BTransfer(1) A-B: 6 (1)\n, err: "+
"Expected ':', found 'Transfer'", err.Error())
require.Equal(t, "Line 5: CreateAccountDeposit(1)BTransfer(1) A-B: 6 (1)\n, err: Expected ':', found 'Transfer'", err.Error())
set = `
Type: Blockchain
AddToken(1)
@@ -457,8 +434,7 @@ func TestGenerateErrors(t *testing.T) {
CreateAccountCoordinator(1) B
> batchL1
Transfer(1) A-B: 6 (1)
Transfer(1) A-B: 6 (1) // on purpose this is moving more money that
// what it has in the account, Til should not fail
Transfer(1) A-B: 6 (1) // on purpose this is moving more money that what it has in the account, Til should not fail
Transfer(1) B-A: 6 (1)
Exit(1) A: 3 (1)
> batch
@@ -582,8 +558,7 @@ func TestGenerateFromInstructions(t *testing.T) {
CreateAccountCoordinator(1) B
> batchL1
Transfer(1) A-B: 6 (1)
Transfer(1) A-B: 6 (1) // on purpose this is moving more money that
// what it has in the account, Til should not fail
Transfer(1) A-B: 6 (1) // on purpose this is moving more money that what it has in the account, Til should not fail
Transfer(1) B-A: 6 (1)
Exit(1) A: 3 (1)
> batch

View File

@@ -181,8 +181,7 @@ Transfer(1) H-O: 5 (1)
Transfer(1) I-H: 5 (1)
Exit(1) A: 5 (1)
// create CoordinatorTx CreateAccount for D, TokenId 2, used at SetPool0 for
// 'PoolTransfer(2) B-D: 3 (1)'
// create CoordinatorTx CreateAccount for D, TokenId 2, used at SetPool0 for 'PoolTransfer(2) B-D: 3 (1)'
CreateAccountCoordinator(2) D
> batchL1

View File

@@ -25,14 +25,8 @@ import (
func GenerateJsUsers(t *testing.T) []til.User {
// same values than in the js test
// skJsHex is equivalent to the 0000...000i js private key in commonjs
skJsHex := []string{"7eb258e61862aae75c6c1d1f7efae5006ffc9e4d5596a6ff95f3df4ea209ea7f",
"c005700f76f4b4cec710805c21595688648524df0a9d467afae537b7a7118819",
"b373d14c67fb2a517bf4ac831c93341eec8e1b38dbc14e7d725b292a7cf84707",
"2064b68d04a7aaae0ac3b36bf6f1850b380f1423be94a506c531940bd4a48b76"}
addrHex := []string{"0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
"0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
"0x6813eb9362372eef6200f3b1dbc3f819671cba69",
"0x1eff47bc3a10a45d4b230b5d10e37751fe6aa718"}
skJsHex := []string{"7eb258e61862aae75c6c1d1f7efae5006ffc9e4d5596a6ff95f3df4ea209ea7f", "c005700f76f4b4cec710805c21595688648524df0a9d467afae537b7a7118819", "b373d14c67fb2a517bf4ac831c93341eec8e1b38dbc14e7d725b292a7cf84707", "2064b68d04a7aaae0ac3b36bf6f1850b380f1423be94a506c531940bd4a48b76"}
addrHex := []string{"0x7e5f4552091a69125d5dfcb7b8c2659029395bdf", "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf", "0x6813eb9362372eef6200f3b1dbc3f819671cba69", "0x1eff47bc3a10a45d4b230b5d10e37751fe6aa718"}
var users []til.User
for i := 0; i < len(skJsHex); i++ {
skJs, err := hex.DecodeString(skJsHex[i])
@@ -47,14 +41,10 @@ func GenerateJsUsers(t *testing.T) []til.User {
}
users = append(users, user)
}
assert.Equal(t, "d746824f7d0ac5044a573f51b278acb56d823bec39551d1d7bf7378b68a1b021",
users[0].BJJ.Public().String())
assert.Equal(t, "4d05c307400c65795f02db96b1b81c60386fd53e947d9d3f749f3d99b1853909",
users[1].BJJ.Public().String())
assert.Equal(t, "38ffa002724562eb2a952a2503e206248962406cf16392ff32759b6f2a41fe11",
users[2].BJJ.Public().String())
assert.Equal(t, "c719e6401190be7fa7fbfcd3448fe2755233c01575341a3b09edadf5454f760b",
users[3].BJJ.Public().String())
assert.Equal(t, "d746824f7d0ac5044a573f51b278acb56d823bec39551d1d7bf7378b68a1b021", users[0].BJJ.Public().String())
assert.Equal(t, "4d05c307400c65795f02db96b1b81c60386fd53e947d9d3f749f3d99b1853909", users[1].BJJ.Public().String())
assert.Equal(t, "38ffa002724562eb2a952a2503e206248962406cf16392ff32759b6f2a41fe11", users[2].BJJ.Public().String())
assert.Equal(t, "c719e6401190be7fa7fbfcd3448fe2755233c01575341a3b09edadf5454f760b", users[3].BJJ.Public().String())
return users
}
@@ -68,9 +58,7 @@ func signL2Tx(t *testing.T, chainID uint16, user til.User, l2Tx common.PoolL2Tx)
}
// GenerateTxsZKInputsHash0 generates the transactions for the TestZKInputsHash0
func GenerateTxsZKInputsHash0(t *testing.T, chainID uint16) (users []til.User,
coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx,
l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputsHash0(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
@@ -105,9 +93,7 @@ func GenerateTxsZKInputsHash0(t *testing.T, chainID uint16) (users []til.User,
}
// GenerateTxsZKInputsHash1 generates the transactions for the TestZKInputsHash1
func GenerateTxsZKInputsHash1(t *testing.T, chainID uint16) (users []til.User,
coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx,
l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputsHash1(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
l1UserTxs = []common.L1Tx{
@@ -152,9 +138,7 @@ func GenerateTxsZKInputsHash1(t *testing.T, chainID uint16) (users []til.User,
}
// GenerateTxsZKInputs0 generates the transactions for the TestZKInputs0
func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User,
coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx,
l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
@@ -191,8 +175,7 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User,
}
// GenerateTxsZKInputs1 generates the transactions for the TestZKInputs1
func GenerateTxsZKInputs1(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx,
l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputs1(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
@@ -239,8 +222,7 @@ func GenerateTxsZKInputs1(t *testing.T, chainID uint16) (users []til.User, coord
}
// GenerateTxsZKInputs2 generates the transactions for the TestZKInputs2
func GenerateTxsZKInputs2(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx,
l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputs2(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
@@ -319,8 +301,7 @@ func GenerateTxsZKInputs2(t *testing.T, chainID uint16) (users []til.User, coord
}
// GenerateTxsZKInputs3 generates the transactions for the TestZKInputs3
func GenerateTxsZKInputs3(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx,
l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputs3(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
@@ -399,8 +380,7 @@ func GenerateTxsZKInputs3(t *testing.T, chainID uint16) (users []til.User, coord
}
// GenerateTxsZKInputs4 generates the transactions for the TestZKInputs4
func GenerateTxsZKInputs4(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx,
l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputs4(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)
@@ -489,8 +469,7 @@ func GenerateTxsZKInputs4(t *testing.T, chainID uint16) (users []til.User, coord
}
// GenerateTxsZKInputs5 generates the transactions for the TestZKInputs5
func GenerateTxsZKInputs5(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx,
l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
func GenerateTxsZKInputs5(t *testing.T, chainID uint16) (users []til.User, coordIdxs []common.Idx, l1UserTxs []common.L1Tx, l1CoordTxs []common.L1Tx, l2Txs []common.PoolL2Tx) {
// same values than in the js test
users = GenerateJsUsers(t)

Some files were not shown because too many files have changed in this diff Show More