Compare commits

..

2 Commits

Author SHA1 Message Date
Eduard S
3a7e18d113 Make proposal 2021-02-10 13:26:14 +01:00
Arnau B
4661fb196a Add semaphore for API queries to SQL 2021-02-10 13:10:19 +01:00
121 changed files with 2905 additions and 7135 deletions

View File

@@ -4,10 +4,6 @@ Go implementation of the Hermez node.
## Developing ## Developing
### Go version
The `hermez-node` has been tested with go version 1.14
### Unit testing ### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can Running the unit tests requires a connection to a PostgreSQL database. You can
@@ -15,7 +11,7 @@ start PostgreSQL with docker easily this way (where `yourpasswordhere` should
be your password): be your password):
``` ```
POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres POSTGRES_PASS=yourpasswordhere sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
``` ```
Afterwards, run the tests with the password as env var: Afterwards, run the tests with the password as env var:

View File

@@ -4,7 +4,10 @@ import (
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/tracerr"
) )
func (a *API) getAccount(c *gin.Context) { func (a *API) getAccount(c *gin.Context) {
@@ -20,6 +23,16 @@ func (a *API) getAccount(c *gin.Context) {
return return
} }
// Get balance from stateDB
account, err := a.s.LastGetAccount(*idx)
if err != nil {
retSQLErr(err, c)
return
}
apiAccount.Balance = apitypes.NewBigIntStr(account.Balance)
apiAccount.Nonce = account.Nonce
c.JSON(http.StatusOK, apiAccount) c.JSON(http.StatusOK, apiAccount)
} }
@@ -44,7 +57,27 @@ func (a *API) getAccounts(c *gin.Context) {
return return
} }
// Build successful response // Get balances from stateDB
if err := a.s.LastRead(func(sdb *statedb.Last) error {
for x, apiAccount := range apiAccounts {
idx, err := stringToIdx(string(apiAccount.Idx), "Account Idx")
if err != nil {
return tracerr.Wrap(err)
}
account, err := sdb.GetAccount(*idx)
if err != nil {
return tracerr.Wrap(err)
}
apiAccounts[x].Balance = apitypes.NewBigIntStr(account.Balance)
apiAccounts[x].Nonce = account.Nonce
}
return nil
}); err != nil {
retSQLErr(err, c)
return
}
// Build succesfull response
type accountResponse struct { type accountResponse struct {
Accounts []historydb.AccountAPI `json:"accounts"` Accounts []historydb.AccountAPI `json:"accounts"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`

View File

@@ -47,7 +47,7 @@ func (a *API) getAccountCreationAuth(c *gin.Context) {
retSQLErr(err, c) retSQLErr(err, c)
return return
} }
// Build successful response // Build succesfull response
c.JSON(http.StatusOK, auth) c.JSON(http.StatusOK, auth)
} }

View File

@@ -9,6 +9,7 @@ import (
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -21,7 +22,6 @@ const (
// Status define status of the network // Status define status of the network
type Status struct { type Status struct {
sync.RWMutex sync.RWMutex
NodeConfig NodeConfig `json:"nodeConfig"`
Network Network `json:"network"` Network Network `json:"network"`
Metrics historydb.Metrics `json:"metrics"` Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"` Rollup historydb.RollupVariablesAPI `json:"rollup"`
@@ -34,6 +34,7 @@ type Status struct {
type API struct { type API struct {
h *historydb.HistoryDB h *historydb.HistoryDB
cg *configAPI cg *configAPI
s *statedb.StateDB
l2 *l2db.L2DB l2 *l2db.L2DB
status Status status Status
chainID uint16 chainID uint16
@@ -45,9 +46,9 @@ func NewAPI(
coordinatorEndpoints, explorerEndpoints bool, coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *Config, config *Config,
nodeConfig *NodeConfig,
) (*API, error) { ) (*API, error) {
// Check input // Check input
// TODO: is stateDB only needed for explorer endpoints or for both? // TODO: is stateDB only needed for explorer endpoints or for both?
@@ -65,10 +66,9 @@ func NewAPI(
AuctionConstants: config.AuctionConstants, AuctionConstants: config.AuctionConstants,
WDelayerConstants: config.WDelayerConstants, WDelayerConstants: config.WDelayerConstants,
}, },
l2: l2db, s: sdb,
status: Status{ l2: l2db,
NodeConfig: *nodeConfig, status: Status{},
},
chainID: config.ChainID, chainID: config.ChainID,
hermezAddress: config.HermezAddress, hermezAddress: config.HermezAddress,
} }

View File

@@ -8,7 +8,6 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"net"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
@@ -23,6 +22,7 @@ import (
"github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
@@ -39,8 +39,8 @@ type Pendinger interface {
New() Pendinger New() Pendinger
} }
const apiAddr = ":4010" const apiPort = ":4010"
const apiURL = "http://localhost" + apiAddr + "/" const apiURL = "http://localhost" + apiPort + "/"
var SetBlockchain = ` var SetBlockchain = `
Type: Blockchain Type: Blockchain
@@ -201,8 +201,8 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
apiConnCon := db.NewAPIConnectionController(1, time.Second) apiConnCon := db.NewAPICnnectionController(1, time.Second)
hdb := historydb.NewHistoryDB(database, database, apiConnCon) hdb := historydb.NewHistoryDB(database, apiConnCon)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -216,8 +216,12 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
}() }()
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
if err != nil {
panic(err)
}
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants) // Config (smart contract constants)
chainID := uint16(0) chainID := uint16(0)
@@ -235,24 +239,17 @@ func TestMain(m *testing.M) {
true, true,
apiGin, apiGin,
hdb, hdb,
sdb,
l2DB, l2DB,
&_config, &_config,
&NodeConfig{
ForgeDelay: 180,
},
) )
if err != nil { if err != nil {
panic(err) panic(err)
} }
// Start server // Start server
listener, err := net.Listen("tcp", apiAddr) //nolint:gosec server := &http.Server{Addr: apiPort, Handler: apiGin}
if err != nil {
panic(err)
}
server := &http.Server{Handler: apiGin}
go func() { go func() {
if err := server.Serve(listener); err != nil && if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
tracerr.Unwrap(err) != http.ErrServerClosed {
panic(err) panic(err)
} }
}() }()
@@ -260,7 +257,7 @@ func TestMain(m *testing.M) {
// Reset DB // Reset DB
test.WipeDB(api.h.DB()) test.WipeDB(api.h.DB())
// Generate blockchain data with til // Genratre blockchain data with til
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{ tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"), BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
@@ -353,6 +350,19 @@ func TestMain(m *testing.M) {
} }
} }
// lastBlockNum2 := blocksData[len(blocksData)-1].Block.EthBlockNum
// Add accounts to StateDB
for i := 0; i < len(commonAccounts); i++ {
if _, err := api.s.CreateAccount(commonAccounts[i].Idx, &commonAccounts[i]); err != nil {
panic(err)
}
}
// Make a checkpoint to make the accounts available in Last
if err := api.s.MakeCheckpoint(); err != nil {
panic(err)
}
// Generate Coordinators and add them to HistoryDB // Generate Coordinators and add them to HistoryDB
const nCoords = 10 const nCoords = 10
commonCoords := test.GenCoordinators(nCoords, commonBlocks) commonCoords := test.GenCoordinators(nCoords, commonBlocks)
@@ -512,48 +522,20 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000), WithdrawalDelay: uint64(3000),
} }
// Generate test data, as expected to be received/sent from/to the API // Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords) testCoords := genTestCoordinators(commonCoords)
testBids := genTestBids(commonBlocks, testCoords, bids) testBids := genTestBids(commonBlocks, testCoords, bids)
testExits := genTestExits(commonExitTree, testTokens, commonAccounts) testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks) testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks)
testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs) testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs)
poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts) poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts)
// Add balance and nonce to historyDB
accounts := genTestAccounts(commonAccounts, testTokens)
accUpdates := []common.AccountUpdate{}
for i := 0; i < len(accounts); i++ {
balance := new(big.Int)
balance.SetString(string(*accounts[i].Balance), 10)
idx, err := stringToIdx(string(accounts[i].Idx), "foo")
if err != nil {
panic(err)
}
accUpdates = append(accUpdates, common.AccountUpdate{
EthBlockNum: 0,
BatchNum: 1,
Idx: *idx,
Nonce: 0,
Balance: balance,
})
accUpdates = append(accUpdates, common.AccountUpdate{
EthBlockNum: 0,
BatchNum: 1,
Idx: *idx,
Nonce: accounts[i].Nonce,
Balance: balance,
})
}
if err := api.h.AddAccountUpdates(accUpdates); err != nil {
panic(err)
}
tc = testCommon{ tc = testCommon{
blocks: commonBlocks, blocks: commonBlocks,
tokens: testTokens, tokens: testTokens,
batches: testBatches, batches: testBatches,
fullBatches: testFullBatches, fullBatches: testFullBatches,
coordinators: testCoords, coordinators: testCoords,
accounts: accounts, accounts: genTestAccounts(commonAccounts, testTokens),
txs: testTxs, txs: testTxs,
exits: testExits, exits: testExits,
poolTxsToSend: poolTxsToSend, poolTxsToSend: poolTxsToSend,
@@ -599,11 +581,11 @@ func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
apiConnConTO := db.NewAPIConnectionController(1, 100*time.Millisecond) apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO) hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
require.NoError(t, err) require.NoError(t, err)
// L2DB // L2DB
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO) l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
// API // API
apiGinTO := gin.Default() apiGinTO := gin.Default()
@@ -618,12 +600,9 @@ func TestTimeout(t *testing.T) {
<-finishWait <-finishWait
}) })
// Start server // Start server
serverTO := &http.Server{Handler: apiGinTO} serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
listener, err := net.Listen("tcp", ":4444") //nolint:gosec
require.NoError(t, err)
go func() { go func() {
if err := serverTO.Serve(listener); err != nil && if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
tracerr.Unwrap(err) != http.ErrServerClosed {
require.NoError(t, err) require.NoError(t, err)
} }
}() }()
@@ -633,11 +612,9 @@ func TestTimeout(t *testing.T) {
true, true,
apiGinTO, apiGinTO,
hdbTO, hdbTO,
nil,
l2DBTO, l2DBTO,
&_config, &_config,
&NodeConfig{
ForgeDelay: 180,
},
) )
require.NoError(t, err) require.NoError(t, err)

View File

@@ -52,7 +52,7 @@ func (a *API) getBatches(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type batchesResponse struct { type batchesResponse struct {
Batches []historydb.BatchAPI `json:"batches"` Batches []historydb.BatchAPI `json:"batches"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`

View File

@@ -34,7 +34,7 @@ func (a *API) getBids(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type bidsResponse struct { type bidsResponse struct {
Bids []historydb.BidAPI `json:"bids"` Bids []historydb.BidAPI `json:"bids"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`

View File

@@ -32,7 +32,7 @@ func (a *API) getCoordinators(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type coordinatorsResponse struct { type coordinatorsResponse struct {
Coordinators []historydb.CoordinatorAPI `json:"coordinators"` Coordinators []historydb.CoordinatorAPI `json:"coordinators"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`

View File

@@ -43,7 +43,7 @@ func (a *API) getExits(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type exitsResponse struct { type exitsResponse struct {
Exits []historydb.ExitAPI `json:"exits"` Exits []historydb.ExitAPI `json:"exits"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`
@@ -72,6 +72,6 @@ func (a *API) getExit(c *gin.Context) {
retSQLErr(err, c) retSQLErr(err, c)
return return
} }
// Build successful response // Build succesfull response
c.JSON(http.StatusOK, exit) c.JSON(http.StatusOK, exit)
} }

View File

@@ -10,11 +10,10 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/russross/meddler"
) )
const ( const (
// maxLimit is the max permitted items to be returned in paginated responses // maxLimit is the max permited items to be returned in paginated responses
maxLimit uint = 2049 maxLimit uint = 2049
// dfltOrder indicates how paginated endpoints are ordered if not specified // dfltOrder indicates how paginated endpoints are ordered if not specified
@@ -40,40 +39,31 @@ const (
) )
var ( var (
// ErrNilBidderAddr is used when a nil bidderAddr is received in the getCoordinator method // ErrNillBidderAddr is used when a nil bidderAddr is received in the getCoordinator method
ErrNilBidderAddr = errors.New("biderAddr can not be nil") ErrNillBidderAddr = errors.New("biderAddr can not be nil")
) )
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error() errMsg := tracerr.Unwrap(err).Error()
retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
} else {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg,
})
}
}
if errMsg == errCtxTimeout { if errMsg == errCtxTimeout {
c.JSON(http.StatusServiceUnavailable, errorMsg{ c.JSON(http.StatusServiceUnavailable, errorMsg{
Message: errSQLTimeout, Message: errSQLTimeout,
}) })
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok { } else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
retDupKey(sqlErr.Code) // https://www.postgresql.org/docs/current/errcodes-appendix.html
} else if sqlErr, ok := meddler.DriverErr(tracerr.Unwrap(err)); ok { if sqlErr.Code == "23505" {
retDupKey(sqlErr.(*pq.Error).Code) c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
}
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
c.JSON(http.StatusNotFound, errorMsg{ c.JSON(http.StatusNotFound, errorMsg{
Message: errMsg, Message: err.Error(),
}) })
} else { } else {
c.JSON(http.StatusInternalServerError, errorMsg{ c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg, Message: err.Error(),
}) })
} }
} }

View File

@@ -50,19 +50,19 @@ func parsePagination(c querier) (fromItem *uint, order string, limit *uint, err
return fromItem, order, limit, nil return fromItem, order, limit, nil
} }
// nolint reason: res may be not overwritten // nolint reason: res may be not overwriten
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009 func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009
str := c.Query(name) str := c.Query(name)
return stringToUint(str, name, dflt, min, max) return stringToUint(str, name, dflt, min, max)
} }
// nolint reason: res may be not overwritten // nolint reason: res may be not overwriten
func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009 func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009
str := c.Query(name) str := c.Query(name)
return stringToInt64(str, name, dflt, min, max) return stringToInt64(str, name, dflt, min, max)
} }
// nolint reason: res may be not overwritten // nolint reason: res may be not overwriten
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009 func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009
str := c.Query(name) str := c.Query(name)
if str == "" { if str == "" {
@@ -295,13 +295,13 @@ func parseParamIdx(c paramer) (*common.Idx, error) {
return stringToIdx(idxStr, name) return stringToIdx(idxStr, name)
} }
// nolint reason: res may be not overwritten // nolint reason: res may be not overwriten
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009 func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009
str := c.Param(name) str := c.Param(name)
return stringToUint(str, name, dflt, min, max) return stringToUint(str, name, dflt, min, max)
} }
// nolint reason: res may be not overwritten // nolint reason: res may be not overwriten
func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009 func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009
str := c.Param(name) str := c.Param(name)
return stringToInt64(str, name, dflt, min, max) return stringToInt64(str, name, dflt, min, max)

View File

@@ -11,7 +11,7 @@ import (
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// SlotAPI is a representation of a slot information // SlotAPI is a repesentation of a slot information
type SlotAPI struct { type SlotAPI struct {
ItemID uint64 `json:"itemId"` ItemID uint64 `json:"itemId"`
SlotNum int64 `json:"slotNum"` SlotNum int64 `json:"slotNum"`
@@ -316,7 +316,7 @@ func (a *API) getSlots(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type slotsResponse struct { type slotsResponse struct {
Slots []SlotAPI `json:"slots"` Slots []SlotAPI `json:"slots"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`

View File

@@ -3,7 +3,6 @@ package api
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"math"
"math/big" "math/big"
"net/http" "net/http"
"time" "time"
@@ -24,12 +23,6 @@ type Network struct {
NextForgers []NextForger `json:"nextForgers"` NextForgers []NextForger `json:"nextForgers"`
} }
// NodeConfig is the configuration of the node that is exposed via API
type NodeConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// NextForger is a representation of the information of a coordinator and the period will forge // NextForger is a representation of the information of a coordinator and the period will forge
type NextForger struct { type NextForger struct {
Coordinator historydb.CoordinatorAPI `json:"coordinator"` Coordinator historydb.CoordinatorAPI `json:"coordinator"`
@@ -304,17 +297,10 @@ func (a *API) UpdateRecommendedFee() error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
var minFeeUSD float64
if a.l2 != nil {
minFeeUSD = a.l2.MinFeeUSD()
}
a.status.Lock() a.status.Lock()
a.status.RecommendedFee.ExistingAccount = a.status.RecommendedFee.ExistingAccount = feeExistingAccount
math.Max(feeExistingAccount, minFeeUSD) a.status.RecommendedFee.CreatesAccount = createAccountExtraFeePercentage * feeExistingAccount
a.status.RecommendedFee.CreatesAccount = a.status.RecommendedFee.CreatesAccountAndRegister = createAccountInternalExtraFeePercentage * feeExistingAccount
math.Max(createAccountExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.RecommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*feeExistingAccount, minFeeUSD)
a.status.Unlock() a.status.Unlock()
return nil return nil
} }

View File

@@ -131,7 +131,7 @@ func TestUpdateNetworkInfo(t *testing.T) {
func TestUpdateMetrics(t *testing.T) { func TestUpdateMetrics(t *testing.T) {
// Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated // Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err) assert.NoError(t, err)
@@ -149,11 +149,7 @@ func TestUpdateMetrics(t *testing.T) {
func TestUpdateRecommendedFee(t *testing.T) { func TestUpdateRecommendedFee(t *testing.T) {
err := api.UpdateRecommendedFee() err := api.UpdateRecommendedFee()
assert.NoError(t, err) assert.NoError(t, err)
var minFeeUSD float64 assert.Greater(t, api.status.RecommendedFee.ExistingAccount, float64(0))
if api.l2 != nil {
minFeeUSD = api.l2.MinFeeUSD()
}
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, minFeeUSD)
assert.Equal(t, api.status.RecommendedFee.CreatesAccount, assert.Equal(t, api.status.RecommendedFee.CreatesAccount,
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage) api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister, assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister,
@@ -162,7 +158,7 @@ func TestUpdateRecommendedFee(t *testing.T) {
func TestGetState(t *testing.T) { func TestGetState(t *testing.T) {
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
api.SetRollupVariables(tc.rollupVars) api.SetRollupVariables(tc.rollupVars)
api.SetWDelayerVariables(tc.wdelayerVars) api.SetWDelayerVariables(tc.wdelayerVars)

View File

@@ -1329,6 +1329,13 @@ components:
type: string type: string
description: Moment in which the transaction was added to the pool. description: Moment in which the transaction was added to the pool.
format: date-time format: date-time
batchNum:
type: integer
description: Identifier of a batch. Every new forged batch increases by one the batchNum, starting at 0.
minimum: 0
maximum: 4294967295
nullable: true
example: null
requestFromAccountIndex: requestFromAccountIndex:
type: string type: string
description: >- description: >-
@@ -1383,6 +1390,7 @@ components:
$ref: '#/components/schemas/Token' $ref: '#/components/schemas/Token'
example: example:
amount: '100000000000000' amount: '100000000000000'
batchNum:
fee: 0 fee: 0
fromAccountIndex: hez:SCC:256 fromAccountIndex: hez:SCC:256
fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn
@@ -1430,6 +1438,7 @@ components:
- info - info
- signature - signature
- timestamp - timestamp
- batchNum
- requestFromAccountIndex - requestFromAccountIndex
- requestToAccountIndex - requestToAccountIndex
- requestToHezEthereumAddress - requestToHezEthereumAddress
@@ -2569,21 +2578,6 @@ components:
description: List of next coordinators to forge. description: List of next coordinators to forge.
items: items:
$ref: '#/components/schemas/NextForger' $ref: '#/components/schemas/NextForger'
NodeConfig:
type: object
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
properties:
forgeDelay:
type: number
description: |
Delay in seconds after which a batch is forged if the slot is
already committed. If set to 0s, the coordinator will continuously
forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value.
example: 193.4
additionalProperties: false
required:
- forgeDelay
State: State:
type: object type: object
description: Gobal variables of the network description: Gobal variables of the network
@@ -2600,8 +2594,6 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer' $ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee: recommendedFee:
$ref: '#/components/schemas/RecommendedFee' $ref: '#/components/schemas/RecommendedFee'
nodeConfig:
$ref: '#/components/schemas/NodeConfig'
additionalProperties: false additionalProperties: false
required: required:
- network - network
@@ -2610,7 +2602,6 @@ components:
- auction - auction
- withdrawalDelayer - withdrawalDelayer
- recommendedFee - recommendedFee
- nodeConfig
StateNetwork: StateNetwork:
type: object type: object
description: Gobal statistics of the network description: Gobal statistics of the network
@@ -2821,10 +2812,6 @@ components:
type: number type: number
description: Average fee percentage paid for L2 transactions in the last 24 hours. description: Average fee percentage paid for L2 transactions in the last 24 hours.
example: 1.54 example: 1.54
estimatedTimeToForgeL1:
type: number
description: Estimated time needed to forge a L1 transaction, from the time it's added on the smart contract, until it's actualy forged. In seconds.
example: 193.4
additionalProperties: false additionalProperties: false
required: required:
- transactionsPerBatch - transactionsPerBatch
@@ -2833,7 +2820,6 @@ components:
- totalAccounts - totalAccounts
- totalBJJs - totalBJJs
- avgTransactionFee - avgTransactionFee
- estimatedTimeToForgeL1
PendingItems: PendingItems:
type: integer type: integer
description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent. description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent.
@@ -2930,7 +2916,7 @@ components:
example: 101 example: 101
l1UserTotalBytes: l1UserTotalBytes:
type: integer type: integer
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx). description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
example: 72 example: 72
maxL1UserTx: maxL1UserTx:
type: integer type: integer

View File

@@ -53,7 +53,7 @@ func (a *API) getTokens(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type tokensResponse struct { type tokensResponse struct {
Tokens []historydb.TokenWithUSD `json:"tokens"` Tokens []historydb.TokenWithUSD `json:"tokens"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`

View File

@@ -42,7 +42,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
return return
} }
// Build successful response // Build succesfull response
type txsResponse struct { type txsResponse struct {
Txs []historydb.TxAPI `json:"transactions"` Txs []historydb.TxAPI `json:"transactions"`
PendingItems uint64 `json:"pendingItems"` PendingItems uint64 `json:"pendingItems"`
@@ -66,6 +66,6 @@ func (a *API) getHistoryTx(c *gin.Context) {
retSQLErr(err, c) retSQLErr(err, c)
return return
} }
// Build successful response // Build succesfull response
c.JSON(http.StatusOK, tx) c.JSON(http.StatusOK, tx)
} }

View File

@@ -455,7 +455,7 @@ func TestGetHistoryTx(t *testing.T) {
// 400, due invalid TxID // 400, due invalid TxID
err := doBadReq("GET", endpoint+"0x001", nil, 400) err := doBadReq("GET", endpoint+"0x001", nil, 400)
assert.NoError(t, err) assert.NoError(t, err)
// 404, due nonexistent TxID in DB // 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x00eb5e95e1ce5e9f6c4ed402d415e8d0bdd7664769cfd2064d28da04a2c76be432", nil, 404) err = doBadReq("GET", endpoint+"0x00eb5e95e1ce5e9f6c4ed402d415e8d0bdd7664769cfd2064d28da04a2c76be432", nil, 404)
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@@ -2,7 +2,6 @@ package api
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"net/http" "net/http"
@@ -28,7 +27,6 @@ func (a *API) postPoolTx(c *gin.Context) {
retBadReq(err, c) retBadReq(err, c)
return return
} }
writeTx.ClientIP = c.ClientIP()
// Insert to DB // Insert to DB
if err := a.l2.AddTxAPI(writeTx); err != nil { if err := a.l2.AddTxAPI(writeTx); err != nil {
retSQLErr(err, c) retSQLErr(err, c)
@@ -51,7 +49,7 @@ func (a *API) getPoolTx(c *gin.Context) {
retSQLErr(err, c) retSQLErr(err, c)
return return
} }
// Build successful response // Build succesfull response
c.JSON(http.StatusOK, tx) c.JSON(http.StatusOK, tx)
} }
@@ -171,21 +169,16 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Get public key
account, err := a.s.LastGetAccount(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(err)
}
// Validate feeAmount // Validate feeAmount
_, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee) _, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Get public key
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(err)
}
// Validate TokenID
if poolTx.TokenID != account.TokenID {
return tracerr.Wrap(fmt.Errorf("tx.TokenID (%v) != account.TokenID (%v)",
poolTx.TokenID, account.TokenID))
}
// Check signature // Check signature
if !poolTx.VerifySignature(a.chainID, account.BJJ) { if !poolTx.VerifySignature(a.chainID, account.BJJ) {
return tracerr.Wrap(errors.New("wrong signature")) return tracerr.Wrap(errors.New("wrong signature"))

View File

@@ -2,20 +2,14 @@ package api
import ( import (
"bytes" "bytes"
"crypto/ecdsa"
"encoding/binary"
"encoding/hex"
"encoding/json" "encoding/json"
"math/big"
"testing" "testing"
"time" "time"
ethCrypto "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// testPoolTxReceive is a struct to be used to assert the response // testPoolTxReceive is a struct to be used to assert the response
@@ -176,9 +170,9 @@ func TestPoolTxs(t *testing.T) {
fetchedTxID := common.TxID{} fetchedTxID := common.TxID{}
for _, tx := range tc.poolTxsToSend { for _, tx := range tc.poolTxsToSend {
jsonTxBytes, err := json.Marshal(tx) jsonTxBytes, err := json.Marshal(tx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
require.NoError( assert.NoError(
t, doGoodReq( t, doGoodReq(
"POST", "POST",
endpoint, endpoint,
@@ -193,42 +187,42 @@ func TestPoolTxs(t *testing.T) {
badTx.Amount = "99950000000000000" badTx.Amount = "99950000000000000"
badTx.Fee = 255 badTx.Fee = 255
jsonTxBytes, err := json.Marshal(badTx) jsonTxBytes, err := json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong signature // Wrong signature
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
badTx.FromIdx = "hez:foo:1000" badTx.FromIdx = "hez:foo:1000"
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong to // Wrong to
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
badTx.ToEthAddr = &ethAddr badTx.ToEthAddr = &ethAddr
badTx.ToIdx = nil badTx.ToIdx = nil
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong rq // Wrong rq
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
rqFromIdx := "hez:foo:30" rqFromIdx := "hez:foo:30"
badTx.RqFromIdx = &rqFromIdx badTx.RqFromIdx = &rqFromIdx
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// GET // GET
endpoint += "/" endpoint += "/"
for _, tx := range tc.poolTxsToReceive { for _, tx := range tc.poolTxsToReceive {
fetchedTx := testPoolTxReceive{} fetchedTx := testPoolTxReceive{}
require.NoError( assert.NoError(
t, doGoodReq( t, doGoodReq(
"GET", "GET",
endpoint+tx.TxID.String(), endpoint+tx.TxID.String(),
@@ -239,10 +233,10 @@ func TestPoolTxs(t *testing.T) {
} }
// 400, due invalid TxID // 400, due invalid TxID
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400) err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
require.NoError(t, err) assert.NoError(t, err)
// 404, due nonexistent TxID in DB // 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404) err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
require.NoError(t, err) assert.NoError(t, err)
} }
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) { func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {
@@ -262,73 +256,3 @@ func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {
} }
assert.Equal(t, expected, actual) assert.Equal(t, expected, actual)
} }
// TestAllTosNull test that the API doesn't accept txs with all the TOs set to null (to eth, to bjj, to idx)
func TestAllTosNull(t *testing.T) {
// Generate account:
// Ethereum private key
var key ecdsa.PrivateKey
key.D = big.NewInt(int64(4444)) // only for testing
key.PublicKey.X, key.PublicKey.Y = ethCrypto.S256().ScalarBaseMult(key.D.Bytes())
key.Curve = ethCrypto.S256()
addr := ethCrypto.PubkeyToAddress(key.PublicKey)
// BJJ private key
var sk babyjub.PrivateKey
var iBytes [8]byte
binary.LittleEndian.PutUint64(iBytes[:], 4444)
copy(sk[:], iBytes[:]) // only for testing
account := common.Account{
Idx: 4444,
TokenID: 0,
BatchNum: 1,
BJJ: sk.Public().Compress(),
EthAddr: addr,
Nonce: 0,
Balance: big.NewInt(1000000),
}
// Add account to history DB (required to verify signature)
err := api.h.AddAccounts([]common.Account{account})
assert.NoError(t, err)
// Genrate tx with all tos set to nil (to eth, to bjj, to idx)
tx := common.PoolL2Tx{
FromIdx: account.Idx,
TokenID: account.TokenID,
Amount: big.NewInt(1000),
Fee: 200,
Nonce: 0,
}
// Set idx and type manually, and check that the function doesn't allow it
_, err = common.NewPoolL2Tx(&tx)
assert.Error(t, err)
tx.Type = common.TxTypeTransfer
var txID common.TxID
txIDRaw, err := hex.DecodeString("02e66e24f7f25272906647c8fd1d7fe8acf3cf3e9b38ffc9f94bbb5090dc275073")
assert.NoError(t, err)
copy(txID[:], txIDRaw)
tx.TxID = txID
// Sign tx
toSign, err := tx.HashToSign(0)
assert.NoError(t, err)
sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress()
// Transform common.PoolL2Tx ==> testPoolTxSend
txToSend := testPoolTxSend{
TxID: tx.TxID,
Type: tx.Type,
TokenID: tx.TokenID,
FromIdx: idxToHez(tx.FromIdx, "ETH"),
Amount: tx.Amount.String(),
Fee: tx.Fee,
Nonce: tx.Nonce,
Signature: tx.Signature,
}
// Send tx to the API
jsonTxBytes, err := json.Marshal(txToSend)
require.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", apiURL+"transactions-pool", jsonTxReader, 400)
require.NoError(t, err)
// Clean historyDB: the added account shouldn't be there for other tests
_, err = api.h.DB().DB.Exec("delete from account where idx = 4444")
assert.NoError(t, err)
}

View File

@@ -91,7 +91,7 @@ func (c *CollectedFees) UnmarshalJSON(text []byte) error {
return nil return nil
} }
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez format (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs. // HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez fotmat (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
// It assumes that Ethereum Address are inserted/fetched to/from the DB using the default Scan/Value interface // It assumes that Ethereum Address are inserted/fetched to/from the DB using the default Scan/Value interface
type HezEthAddr string type HezEthAddr string
@@ -143,7 +143,7 @@ func (s *StrHezEthAddr) UnmarshalText(text []byte) error {
return nil return nil
} }
// HezBJJ is used to scan/value *babyjub.PublicKeyComp directly into strings that follow the BJJ public key hez format (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs. // HezBJJ is used to scan/value *babyjub.PublicKeyComp directly into strings that follow the BJJ public key hez fotmat (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs.
// It assumes that *babyjub.PublicKeyComp are inserted/fetched to/from the DB using the default Scan/Value interface // It assumes that *babyjub.PublicKeyComp are inserted/fetched to/from the DB using the default Scan/Value interface
type HezBJJ string type HezBJJ string
@@ -216,7 +216,7 @@ func (b HezBJJ) Value() (driver.Value, error) {
// StrHezBJJ is used to unmarshal HezBJJ directly into an alias of babyjub.PublicKeyComp // StrHezBJJ is used to unmarshal HezBJJ directly into an alias of babyjub.PublicKeyComp
type StrHezBJJ babyjub.PublicKeyComp type StrHezBJJ babyjub.PublicKeyComp
// UnmarshalText unmarshalls a StrHezBJJ // UnmarshalText unmarshals a StrHezBJJ
func (s *StrHezBJJ) UnmarshalText(text []byte) error { func (s *StrHezBJJ) UnmarshalText(text []byte) error {
bjj, err := hezStrToBJJ(string(text)) bjj, err := hezStrToBJJ(string(text))
if err != nil { if err != nil {
@@ -226,8 +226,8 @@ func (s *StrHezBJJ) UnmarshalText(text []byte) error {
return nil return nil
} }
// HezIdx is used to value common.Idx directly into strings that follow the Idx key hez format (hez:tokenSymbol:idx) to sql DBs. // HezIdx is used to value common.Idx directly into strings that follow the Idx key hez fotmat (hez:tokenSymbol:idx) to sql DBs.
// Note that this can only be used to insert to DB since there is no way to automatically read from the DB since it needs the tokenSymbol // Note that this can only be used to insert to DB since there is no way to automaticaly read from the DB since it needs the tokenSymbol
type HezIdx string type HezIdx string
// StrHezIdx is used to unmarshal HezIdx directly into an alias of common.Idx // StrHezIdx is used to unmarshal HezIdx directly into an alias of common.Idx

View File

@@ -2,7 +2,6 @@ package batchbuilder
import ( import (
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/kvdb"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
@@ -28,16 +27,9 @@ type ConfigBatch struct {
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset // NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
// method // method
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
nLevels uint64) (*BatchBuilder, error) { localStateDB, err := statedb.NewLocalStateDB(dbpath, 128, synchronizerStateDB,
localStateDB, err := statedb.NewLocalStateDB( statedb.TypeBatchBuilder, int(nLevels))
statedb.Config{
Path: dbpath,
Keep: kvdb.DefaultKeep,
Type: statedb.TypeBatchBuilder,
NLevels: int(nLevels),
},
synchronizerStateDB)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -55,7 +47,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise // copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
// it can just roll back the internal copy. // it can just roll back the internal copy.
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer)) return bb.localStateDB.Reset(batchNum, fromSynchronizer)
} }
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
@@ -65,10 +57,7 @@ func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBa
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig) tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs) ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
if err != nil { return ptOut.ZKInputs, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
return ptOut.ZKInputs, nil
} }
// LocalStateDB returns the underlying LocalStateDB // LocalStateDB returns the underlying LocalStateDB

View File

@@ -15,8 +15,7 @@ func TestBatchBuilder(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir)) defer assert.Nil(t, os.RemoveAll(dir))
synchDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, synchDB, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 0)
Type: statedb.TypeBatchBuilder, NLevels: 0})
assert.Nil(t, err) assert.Nil(t, err)
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB") bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")

1
cli/node/.gitignore vendored
View File

@@ -1,3 +1,2 @@
cfg.example.secret.toml cfg.example.secret.toml
cfg.toml cfg.toml
node

View File

@@ -2,10 +2,6 @@
This is the main cli for the node This is the main cli for the node
## Go version
The `hermez-node` has been tested with go version 1.14
## Usage ## Usage
``` ```
@@ -69,64 +65,29 @@ when running the coordinator in sync mode
- The node requires a PostgreSQL database. The parameters of the server and - The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section. database must be set in the `PostgreSQL` section.
## Building
*All commands assume you are at the `cli/node` directory.*
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
```
The executable is `node`.
## Usage Examples ## Usage Examples
The following commands assume you have built the node previously. You can also
run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer: Run the node in mode synchronizer:
``` ```
./node --mode sync --cfg cfg.buidler.toml run go run . --mode sync --cfg cfg.buidler.toml run
``` ```
Run the node in mode coordinator: Run the node in mode coordinator:
``` ```
./node --mode coord --cfg cfg.buidler.toml run go run . --mode coord --cfg cfg.buidler.toml run
``` ```
Import an ethereum private key into the keystore: Import an ethereum private key into the keystore:
``` ```
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
``` ```
Generate a new BabyJubJub key pair: Generate a new BabyJubJub key pair:
``` ```
./node --mode coord --cfg cfg.buidler.toml genbjj go run . --mode coord --cfg cfg.buidler.toml genbjj
``` ```
Wipe the entier SQL database (this will destroy all synchronized and pool Wipe the entier SQL database (this will destroy all synchronized and pool data):
data):
``` ```
./node --mode coord --cfg cfg.buidler.toml wipesql go run . --mode coord --cfg cfg.buidler.toml wipesql
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
``` ```

View File

@@ -10,29 +10,21 @@ SQLConnectionTimeout = "2s"
Interval = "10s" Interval = "10s"
URL = "https://api-pub.bitfinex.com/v2/" URL = "https://api-pub.bitfinex.com/v2/"
Type = "bitfinexV2" Type = "bitfinexV2"
# URL = "https://api.coingecko.com/api/v3/"
# Type = "coingeckoV3"
[Debug] [Debug]
APIAddress = "localhost:12345" APIAddress = "localhost:12345"
MeddlerLogs = true MeddlerLogs = true
GinDebugMode = true
[StateDB] [StateDB]
Path = "/tmp/iden3-test/hermez/statedb" Path = "/tmp/iden3-test/hermez/statedb"
Keep = 256 Keep = 256
[PostgreSQL] [PostgreSQL]
PortWrite = 5432 Port = 5432
HostWrite = "localhost" Host = "localhost"
UserWrite = "hermez" User = "hermez"
PasswordWrite = "yourpasswordhere" Password = "yourpasswordhere"
NameWrite = "hermez" Name = "hermez"
# PortRead = 5432
# HostRead = "localhost"
# UserRead = "hermez"
# PasswordRead = "yourpasswordhere"
# NameRead = "hermez"
[Web3] [Web3]
URL = "http://localhost:8545" URL = "http://localhost:8545"
@@ -49,22 +41,15 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZName = "Hermez Network Token" TokenHEZName = "Hermez Network Token"
[Coordinator] [Coordinator]
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator # ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
MinimumForgeAddressBalance = "0"
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6 L1BatchTimeoutPerc = 0.999
StartSlotBlocksDelay = 2
ScheduleBatchBlocksAheadCheck = 3
SendBatchBlocksMarginCheck = 1
ProofServerPollInterval = "1s" ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms" ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s" SyncRetryInterval = "1s"
ForgeDelay = "10s"
ForgeNoTxsDelay = "0s"
PurgeByExtDelInterval = "1m"
[Coordinator.FeeAccount] [Coordinator.FeeAccount]
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E" Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
@@ -75,7 +60,6 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
[Coordinator.L2DB] [Coordinator.L2DB]
SafetyPeriod = 10 SafetyPeriod = 10
MaxTxs = 512 MaxTxs = 512
MinFeeUSD = 0.0
TTL = "24h" TTL = "24h"
PurgeBatchDelay = 10 PurgeBatchDelay = 10
InvalidateBatchDelay = 20 InvalidateBatchDelay = 20
@@ -96,28 +80,21 @@ MaxTx = 512
NLevels = 32 NLevels = 32
[Coordinator.EthClient] [Coordinator.EthClient]
ReceiptTimeout = "60s"
ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms" CheckLoopInterval = "500ms"
Attempts = 4 Attempts = 4
AttemptsDelay = "500ms" AttemptsDelay = "500ms"
TxResendTimeout = "2m" CallGasLimit = 300000
NoReuseNonce = false GasPriceDiv = 100
MaxGasPrice = "5000000000"
GasPriceIncPerc = 10
[Coordinator.EthClient.Keystore] [Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"
Password = "yourpasswordhere" Password = "yourpasswordhere"
[Coordinator.EthClient.ForgeBatchGasCost]
Fixed = 600000
L1UserTx = 15000
L1CoordTx = 8000
L2Tx = 250
[Coordinator.API] [Coordinator.API]
Coordinator = true Coordinator = true
[Coordinator.Debug] [Coordinator.Debug]
BatchPath = "/tmp/iden3-test/hermez/batchesdebug" BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
LightScrypt = true LightScrypt = true
# RollupVerifierIndex = 0

View File

@@ -5,22 +5,16 @@ import (
"fmt" "fmt"
"os" "os"
"os/signal" "os/signal"
"path"
"strings" "strings"
ethKeystore "github.com/ethereum/go-ethereum/accounts/keystore" ethKeystore "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config" "github.com/hermeznetwork/hermez-node/config"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/kvdb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node" "github.com/hermeznetwork/hermez-node/node"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@@ -29,7 +23,6 @@ const (
flagMode = "mode" flagMode = "mode"
flagSK = "privatekey" flagSK = "privatekey"
flagYes = "yes" flagYes = "yes"
flagBlock = "block"
modeSync = "sync" modeSync = "sync"
modeCoord = "coord" modeCoord = "coord"
) )
@@ -75,86 +68,6 @@ func cmdImportKey(c *cli.Context) error {
return nil return nil
} }
func resetStateDBs(cfg *Config, batchNum common.BatchNum) error {
log.Infof("Reset Synchronizer StateDB to batchNum %v...", batchNum)
// Manually make a checkpoint from batchNum to current to force current
// to be a valid checkpoint. This is useful because in case of a
// crash, current can be corrupted and the first thing that
// `kvdb.NewKVDB` does is read the current checkpoint, which wouldn't
// succeed in case of corruption.
dbPath := cfg.node.StateDB.Path
source := path.Join(dbPath, fmt.Sprintf("%s%d", kvdb.PathBatchNum, batchNum))
current := path.Join(dbPath, kvdb.PathCurrent)
last := path.Join(dbPath, kvdb.PathLast)
if err := os.RemoveAll(last); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
if batchNum == 0 {
if err := os.RemoveAll(current); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
} else {
if err := kvdb.PebbleMakeCheckpoint(source, current); err != nil {
return tracerr.Wrap(fmt.Errorf("kvdb.PebbleMakeCheckpoint: %w", err))
}
}
db, err := kvdb.NewKVDB(kvdb.Config{
Path: dbPath,
NoGapsCheck: true,
NoLast: true,
})
if err != nil {
return tracerr.Wrap(fmt.Errorf("kvdb.NewKVDB: %w", err))
}
if err := db.Reset(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("db.Reset: %w", err))
}
if cfg.mode == node.ModeCoordinator {
log.Infof("Wipe Coordinator StateDBs...")
// We wipe the Coordinator StateDBs entirely (by deleting
// current and resetting to batchNum 0) because the Coordinator
// StateDBs are always reset from Synchronizer when the
// coordinator pipeline starts.
dbPath := cfg.node.Coordinator.TxSelector.Path
current := path.Join(dbPath, kvdb.PathCurrent)
if err := os.RemoveAll(current); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
db, err := kvdb.NewKVDB(kvdb.Config{
Path: dbPath,
NoGapsCheck: true,
NoLast: true,
})
if err != nil {
return tracerr.Wrap(fmt.Errorf("kvdb.NewKVDB: %w", err))
}
if err := db.Reset(0); err != nil {
return tracerr.Wrap(fmt.Errorf("db.Reset: %w", err))
}
dbPath = cfg.node.Coordinator.BatchBuilder.Path
current = path.Join(dbPath, kvdb.PathCurrent)
if err := os.RemoveAll(current); err != nil {
return tracerr.Wrap(fmt.Errorf("os.RemoveAll: %w", err))
}
db, err = kvdb.NewKVDB(kvdb.Config{
Path: dbPath,
NoGapsCheck: true,
NoLast: true,
})
if err != nil {
return tracerr.Wrap(fmt.Errorf("statedb.NewKVDB: %w", err))
}
if err := db.Reset(0); err != nil {
return tracerr.Wrap(fmt.Errorf("db.Reset: %w", err))
}
}
return nil
}
func cmdWipeSQL(c *cli.Context) error { func cmdWipeSQL(c *cli.Context) error {
_cfg, err := parseCli(c) _cfg, err := parseCli(c)
if err != nil { if err != nil {
@@ -163,8 +76,7 @@ func cmdWipeSQL(c *cli.Context) error {
cfg := _cfg.node cfg := _cfg.node
yes := c.Bool(flagYes) yes := c.Bool(flagYes)
if !yes { if !yes {
fmt.Print("*WARNING* Are you sure you want to delete " + fmt.Print("*WARNING* Are you sure you want to delete the SQL DB? [y/N]: ")
"the SQL DB and StateDBs? [y/N]: ")
var input string var input string
if _, err := fmt.Scanln(&input); err != nil { if _, err := fmt.Scanln(&input); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -175,23 +87,18 @@ func cmdWipeSQL(c *cli.Context) error {
} }
} }
db, err := dbUtils.ConnectSQLDB( db, err := dbUtils.ConnectSQLDB(
cfg.PostgreSQL.PortWrite, cfg.PostgreSQL.Port,
cfg.PostgreSQL.HostWrite, cfg.PostgreSQL.Host,
cfg.PostgreSQL.UserWrite, cfg.PostgreSQL.User,
cfg.PostgreSQL.PasswordWrite, cfg.PostgreSQL.Password,
cfg.PostgreSQL.NameWrite, cfg.PostgreSQL.Name,
) )
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
log.Info("Wiping SQL DB...") log.Info("Wiping SQL DB...")
if err := dbUtils.MigrationsDown(db.DB); err != nil { if err := dbUtils.MigrationsDown(db.DB); err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.MigrationsDown: %w", err)) return tracerr.Wrap(err)
}
log.Info("Wiping StateDBs...")
if err := resetStateDBs(_cfg, 0); err != nil {
return tracerr.Wrap(fmt.Errorf("resetStateDBs: %w", err))
} }
return nil return nil
} }
@@ -232,72 +139,6 @@ func cmdRun(c *cli.Context) error {
return nil return nil
} }
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
cfg := _cfg.node
blockNum := c.Int64(flagBlock)
log.Infof("Discarding all blocks up to block %v...", blockNum)
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, nil)
if err := historyDB.Reorg(blockNum); err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
}
batchNum, err := historyDB.GetLastBatchNum()
if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
l2DB := l2db.NewL2DB(
dbRead, dbWrite,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)
if err := l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
}
log.Info("Resetting StateDBs...")
if err := resetStateDBs(_cfg, batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("resetStateDBs: %w", err))
}
return nil
}
// Config is the configuration of the hermez node execution // Config is the configuration of the hermez node execution
type Config struct { type Config struct {
mode node.Mode mode node.Mode
@@ -382,7 +223,7 @@ func main() {
{ {
Name: "wipesql", Name: "wipesql",
Aliases: []string{}, Aliases: []string{},
Usage: "Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, " + Usage: "Wipe the SQL DB (HistoryDB and L2DB), " +
"leaving the DB in a clean state", "leaving the DB in a clean state",
Action: cmdWipeSQL, Action: cmdWipeSQL,
Flags: []cli.Flag{ Flags: []cli.Flag{
@@ -398,18 +239,6 @@ func main() {
Usage: "Run the hermez-node in the indicated mode", Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun, Action: cmdRun,
}, },
{
Name: "discard",
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: []cli.Flag{
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}},
},
} }
err := app.Run(os.Args) err := app.Run(os.Args)

View File

@@ -72,8 +72,7 @@ func (idx Idx) BigInt() *big.Int {
// IdxFromBytes returns Idx from a byte array // IdxFromBytes returns Idx from a byte array
func IdxFromBytes(b []byte) (Idx, error) { func IdxFromBytes(b []byte) (Idx, error) {
if len(b) != IdxBytesLen { if len(b) != IdxBytesLen {
return 0, tracerr.Wrap(fmt.Errorf("can not parse Idx, bytes len %d, expected %d", return 0, tracerr.Wrap(fmt.Errorf("can not parse Idx, bytes len %d, expected %d", len(b), IdxBytesLen))
len(b), IdxBytesLen))
} }
var idxBytes [8]byte var idxBytes [8]byte
copy(idxBytes[2:], b[:]) copy(idxBytes[2:], b[:])
@@ -195,8 +194,7 @@ func (a *Account) BigInts() ([NLeafElems]*big.Int, error) {
return e, nil return e, nil
} }
// HashValue returns the value of the Account, which is the Poseidon hash of its // HashValue returns the value of the Account, which is the Poseidon hash of its *big.Int representation
// *big.Int representation
func (a *Account) HashValue() (*big.Int, error) { func (a *Account) HashValue() (*big.Int, error) {
bi, err := a.BigInts() bi, err := a.BigInts()
if err != nil { if err != nil {
@@ -265,13 +263,3 @@ type IdxNonce struct {
Idx Idx `db:"idx"` Idx Idx `db:"idx"`
Nonce Nonce `db:"nonce"` Nonce Nonce `db:"nonce"`
} }
// AccountUpdate represents an account balance and/or nonce update after a
// processed batch
type AccountUpdate struct {
EthBlockNum int64 `meddler:"eth_block_num"`
BatchNum BatchNum `meddler:"batch_num"`
Idx Idx `meddler:"idx"`
Nonce Nonce `meddler:"nonce"`
Balance *big.Int `meddler:"balance,bigint"`
}

View File

@@ -76,8 +76,7 @@ func TestNonceParser(t *testing.T) {
func TestAccount(t *testing.T) { func TestAccount(t *testing.T) {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
pk := sk.Public() pk := sk.Public()
@@ -116,8 +115,7 @@ func TestAccountLoop(t *testing.T) {
// check that for different deterministic BabyJubJub keys & random Address there is no problem // check that for different deterministic BabyJubJub keys & random Address there is no problem
for i := 0; i < 256; i++ { for i := 0; i < 256; i++ {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
pk := sk.Public() pk := sk.Public()
@@ -201,8 +199,7 @@ func bigFromStr(h string, u int) *big.Int {
func TestAccountHashValue(t *testing.T) { func TestAccountHashValue(t *testing.T) {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
pk := sk.Public() pk := sk.Public()
@@ -215,16 +212,13 @@ func TestAccountHashValue(t *testing.T) {
} }
v, err := account.HashValue() v, err := account.HashValue()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, assert.Equal(t, "16297758255249203915951182296472515138555043617458222397753168518282206850764", v.String())
"447675324273474410516096114710387312413478475468606444107594732044698919451",
v.String())
} }
func TestAccountHashValueTestVectors(t *testing.T) { func TestAccountHashValueTestVectors(t *testing.T) {
// values from js test vectors // values from js test vectors
ay := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1)) ay := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", (hex.EncodeToString(ay.Bytes())))
(hex.EncodeToString(ay.Bytes())))
bjjPoint, err := babyjub.PointFromSignAndY(true, ay) bjjPoint, err := babyjub.PointFromSignAndY(true, ay)
require.NoError(t, err) require.NoError(t, err)
bjj := babyjub.PublicKey(*bjjPoint) bjj := babyjub.PublicKey(*bjjPoint)
@@ -242,22 +236,16 @@ func TestAccountHashValueTestVectors(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "9444732965739290427391", e[0].String()) assert.Equal(t, "9444732965739290427391", e[0].String())
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895", e[1].String()) assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895", e[1].String())
assert.Equal(t, assert.Equal(t, "14474011154664524427946373126085988481658748083205070504932198000989141204991", e[2].String())
"14474011154664524427946373126085988481658748083205070504932198000989141204991",
e[2].String())
assert.Equal(t, "1461501637330902918203684832716283019655932542975", e[3].String()) assert.Equal(t, "1461501637330902918203684832716283019655932542975", e[3].String())
h, err := poseidon.Hash(e[:]) h, err := poseidon.Hash(e[:])
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, assert.Equal(t, "4550823210217540218403400309533329186487982452461145263910122718498735057257", h.String())
"13265203488631320682117942952393454767418777767637549409684833552016769103047",
h.String())
v, err := account.HashValue() v, err := account.HashValue()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, assert.Equal(t, "4550823210217540218403400309533329186487982452461145263910122718498735057257", v.String())
"13265203488631320682117942952393454767418777767637549409684833552016769103047",
v.String())
// second account // second account
ay = big.NewInt(0) ay = big.NewInt(0)
@@ -273,9 +261,7 @@ func TestAccountHashValueTestVectors(t *testing.T) {
} }
v, err = account.HashValue() v, err = account.HashValue()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, assert.Equal(t, "7750253361301235345986002241352365187241910378619330147114280396816709365657", v.String())
"2351654555892372227640888372176282444150254868378439619268573230312091195718",
v.String())
// third account // third account
ay = bigFromStr("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", 16) ay = bigFromStr("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", 16)
@@ -293,15 +279,11 @@ func TestAccountHashValueTestVectors(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "554050781187", e[0].String()) assert.Equal(t, "554050781187", e[0].String())
assert.Equal(t, "42000000000000000000", e[1].String()) assert.Equal(t, "42000000000000000000", e[1].String())
assert.Equal(t, assert.Equal(t, "15238403086306505038849621710779816852318505119327426213168494964113886299863", e[2].String())
"15238403086306505038849621710779816852318505119327426213168494964113886299863",
e[2].String())
assert.Equal(t, "935037732739828347587684875151694054123613453305", e[3].String()) assert.Equal(t, "935037732739828347587684875151694054123613453305", e[3].String())
v, err = account.HashValue() v, err = account.HashValue()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, assert.Equal(t, "10565754214047872850889045989683221123564392137456000481397520902594455245517", v.String())
"15036148928138382129196903417666258171042923749783835283230591475172197254845",
v.String())
} }
func TestAccountErrNotInFF(t *testing.T) { func TestAccountErrNotInFF(t *testing.T) {
@@ -330,8 +312,7 @@ func TestAccountErrNotInFF(t *testing.T) {
func TestAccountErrNumOverflowNonce(t *testing.T) { func TestAccountErrNumOverflowNonce(t *testing.T) {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
pk := sk.Public() pk := sk.Public()
@@ -358,8 +339,7 @@ func TestAccountErrNumOverflowNonce(t *testing.T) {
func TestAccountErrNumOverflowBalance(t *testing.T) { func TestAccountErrNumOverflowBalance(t *testing.T) {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
pk := sk.Public() pk := sk.Public()
@@ -371,16 +351,14 @@ func TestAccountErrNumOverflowBalance(t *testing.T) {
BJJ: pk.Compress(), BJJ: pk.Compress(),
EthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"), EthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
} }
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895", assert.Equal(t, "6277101735386680763835789423207666416102355444464034512895", account.Balance.String())
account.Balance.String())
_, err = account.Bytes() _, err = account.Bytes()
assert.NoError(t, err) assert.NoError(t, err)
// force value overflow // force value overflow
account.Balance = new(big.Int).Exp(big.NewInt(2), big.NewInt(192), nil) account.Balance = new(big.Int).Exp(big.NewInt(2), big.NewInt(192), nil)
assert.Equal(t, "6277101735386680763835789423207666416102355444464034512896", assert.Equal(t, "6277101735386680763835789423207666416102355444464034512896", account.Balance.String())
account.Balance.String())
b, err := account.Bytes() b, err := account.Bytes()
assert.NotNil(t, err) assert.NotNil(t, err)
assert.Equal(t, fmt.Errorf("%s Balance", ErrNumOverflow), tracerr.Unwrap(err)) assert.Equal(t, fmt.Errorf("%s Balance", ErrNumOverflow), tracerr.Unwrap(err))

View File

@@ -1,30 +1,21 @@
package common package common
import ( import (
"encoding/binary"
"strconv"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
ethMath "github.com/ethereum/go-ethereum/common/math"
ethCrypto "github.com/ethereum/go-ethereum/crypto" ethCrypto "github.com/ethereum/go-ethereum/crypto"
ethSigner "github.com/ethereum/go-ethereum/signer/core"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez // AccountCreationAuthMsg is the message that is signed to authorize a Hermez
// account creation // account creation
const AccountCreationAuthMsg = "Account creation" const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollup account creation"
// EIP712Version is the used version of the EIP-712 // EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
const EIP712Version = "1" const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
// EIP712Provider defines the Provider for the EIP-712
const EIP712Provider = "Hermez Network"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for // AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary // account creations when necessary
@@ -35,82 +26,44 @@ type AccountCreationAuth struct {
Timestamp time.Time `meddler:"timestamp,utctime"` Timestamp time.Time `meddler:"timestamp,utctime"`
} }
// toHash returns a byte array to be hashed from the AccountCreationAuth, which
// follows the EIP-712 encoding
func (a *AccountCreationAuth) toHash(chainID uint16, func (a *AccountCreationAuth) toHash(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) { hermezContractAddr ethCommon.Address) []byte {
chainIDFormatted := ethMath.NewHexOrDecimal256(int64(chainID)) var chainIDBytes [2]byte
binary.BigEndian.PutUint16(chainIDBytes[:], chainID)
// [EthPrefix | AccountCreationAuthMsg | compressedBJJ | chainID | hermezContractAddr]
var b []byte
b = append(b, []byte(AccountCreationAuthMsg)...)
b = append(b, SwapEndianness(a.BJJ[:])...) // for js implementation compatibility
b = append(b, chainIDBytes[:]...)
b = append(b, hermezContractAddr[:]...)
signerData := ethSigner.TypedData{ ethPrefix := EthMsgPrefix + strconv.Itoa(len(b))
Types: ethSigner.Types{ return append([]byte(ethPrefix), b...)
"EIP712Domain": []ethSigner.Type{
{Name: "name", Type: "string"},
{Name: "version", Type: "string"},
{Name: "chainId", Type: "uint256"},
{Name: "verifyingContract", Type: "address"},
},
"Authorise": []ethSigner.Type{
{Name: "Provider", Type: "string"},
{Name: "Authorisation", Type: "string"},
{Name: "BJJKey", Type: "bytes32"},
},
},
PrimaryType: "Authorise",
Domain: ethSigner.TypedDataDomain{
Name: EIP712Provider,
Version: EIP712Version,
ChainId: chainIDFormatted,
VerifyingContract: hermezContractAddr.Hex(),
},
Message: ethSigner.TypedDataMessage{
"Provider": EIP712Provider,
"Authorisation": AccountCreationAuthMsg,
"BJJKey": SwapEndianness(a.BJJ[:]),
},
}
domainSeparator, err := signerData.HashStruct("EIP712Domain", signerData.Domain.Map())
if err != nil {
return nil, tracerr.Wrap(err)
}
typedDataHash, err := signerData.HashStruct(signerData.PrimaryType, signerData.Message)
if err != nil {
return nil, tracerr.Wrap(err)
}
rawData := []byte{0x19, 0x01} // "\x19\x01"
rawData = append(rawData, domainSeparator...)
rawData = append(rawData, typedDataHash...)
return rawData, nil
} }
// HashToSign returns the hash to be signed by the Ethereum address to authorize // HashToSign returns the hash to be signed by the Etherum address to authorize
// the account creation, which follows the EIP-712 encoding // the account creation
func (a *AccountCreationAuth) HashToSign(chainID uint16, func (a *AccountCreationAuth) HashToSign(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) { hermezContractAddr ethCommon.Address) ([]byte, error) {
b, err := a.toHash(chainID, hermezContractAddr) b := a.toHash(chainID, hermezContractAddr)
if err != nil { return ethCrypto.Keccak256Hash(b).Bytes(), nil
return nil, tracerr.Wrap(err)
}
return ethCrypto.Keccak256(b), nil
} }
// Sign signs the account creation authorization message using the provided // Sign signs the account creation authorization message using the provided
// `signHash` function, and stores the signature in `a.Signature`. `signHash` // `signHash` function, and stores the signaure in `a.Signature`. `signHash`
// should do an ethereum signature using the account corresponding to // should do an ethereum signature using the account corresponding to
// `a.EthAddr`. The `signHash` function is used to make signing flexible: in // `a.EthAddr`. The `signHash` function is used to make signig flexible: in
// tests we sign directly using the private key, outside tests we sign using // tests we sign directly using the private key, outside tests we sign using
// the keystore (which never exposes the private key). Sign follows the EIP-712 // the keystore (which never exposes the private key).
// encoding.
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error), func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
chainID uint16, hermezContractAddr ethCommon.Address) error { chainID uint16, hermezContractAddr ethCommon.Address) error {
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
if err != nil { if err != nil {
return tracerr.Wrap(err) return err
} }
sig, err := signHash(hash) sig, err := signHash(hash)
if err != nil { if err != nil {
return tracerr.Wrap(err) return err
} }
sig[64] += 27 sig[64] += 27
a.Signature = sig a.Signature = sig
@@ -119,8 +72,7 @@ func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
} }
// VerifySignature ensures that the Signature is done with the EthAddr, for the // VerifySignature ensures that the Signature is done with the EthAddr, for the
// chainID and hermezContractAddress passed by parameter. VerifySignature // chainID and hermezContractAddress passed by parameter
// follows the EIP-712 encoding.
func (a *AccountCreationAuth) VerifySignature(chainID uint16, func (a *AccountCreationAuth) VerifySignature(chainID uint16,
hermezContractAddr ethCommon.Address) bool { hermezContractAddr ethCommon.Address) bool {
// Calculate hash to be signed // Calculate hash to be signed

View File

@@ -13,8 +13,7 @@ import (
func TestAccountCreationAuthSignVerify(t *testing.T) { func TestAccountCreationAuthSignVerify(t *testing.T) {
// Ethereum key // Ethereum key
ethSk, err := ethSk, err := ethCrypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
ethCrypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err) require.NoError(t, err)
ethAddr := ethCrypto.PubkeyToAddress(ethSk.PublicKey) ethAddr := ethCrypto.PubkeyToAddress(ethSk.PublicKey)
@@ -40,7 +39,7 @@ func TestAccountCreationAuthSignVerify(t *testing.T) {
// Hash and sign manually and compare the generated signature // Hash and sign manually and compare the generated signature
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "9414667457e658dd31949b82996b75c65a055512244c3bbfd22ff56add02ba65", assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e",
hex.EncodeToString(hash)) hex.EncodeToString(hash))
sig, err := ethCrypto.Sign(hash, ethSk) sig, err := ethCrypto.Sign(hash, ethSk)
require.NoError(t, err) require.NoError(t, err)
@@ -70,38 +69,35 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
sigExpected string sigExpected string
} }
var tvs []testVector var tvs []testVector
//nolint:lll
tv0 := testVector{ tv0 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000001", ethSk: "0000000000000000000000000000000000000000000000000000000000000001",
expectedAddress: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf", expectedAddress: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf",
pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7",
chainID: uint16(4), chainID: uint16(4),
hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf", hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
toHashExpected: "190189658bba487e11c7da602676ee32bc90b77d3f32a305b147e4f3c3b35f19672e5d84ccc38d0ab245c469b719549d837113465c2abf9972c49403ca6fd10ed3dc", toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700047e5f4552091a69125d5dfcb7b8c2659029395bdf",
hashExpected: "c56eba41e511df100c804c5c09288f35887efea4f033be956481af335df3bea2", hashExpected: "39afea52d843a4de905b6b5ebb0ee8c678141f711d96d9b429c4aec10ef9911f",
sigExpected: "dbedcc5ce02db8f48afbdb2feba9a3a31848eaa8fca5f312ce37b01db45d2199208335330d4445bd2f51d1db68dbc0d0bf3585c4a07504b4efbe46a69eaae5a21b", sigExpected: "73d10d6ecf06ee8a5f60ac90f06b78bef9c650f414ba3ac73e176dc32e896159147457e9c86f0b4bd60fdaf2c0b2aec890a7df993d69a4805e242a6b845ebf231c",
} }
//nolint:lll
tv1 := testVector{ tv1 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000002", ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
expectedAddress: "0x2B5AD5c4795c026514f8317c7a215E218DcCD6cF", expectedAddress: "0x2B5AD5c4795c026514f8317c7a215E218DcCD6cF",
pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d", pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d",
chainID: uint16(0), chainID: uint16(0),
hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf", hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
toHashExpected: "1901dafbc253dedf90d6421dc6e25d5d9efc6985133cb2a8d363d0a081a0e3eddddc65f603a88de36aaeabd3b4cf586538c7f3fd50c94780530a3707c8c14ad9fd11", toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00002b5ad5c4795c026514f8317c7a215e218dccd6cf",
hashExpected: "deb9afa479282cf27b442ce8ba86b19448aa87eacef691521a33db5d0feb9959", hashExpected: "89a3895993a4736232212e59566294feb3da227af44375daf3307dcad5451d5d",
sigExpected: "6a0da90ba2d2b1be679a28ebe54ee03082d44b836087391cd7d2607c1e4dafe04476e6e88dccb8707c68312512f16c947524b35c80f26c642d23953e9bb84c701c", sigExpected: "bb4156156c705494ad5f99030342c64657e51e2994750f92125717c40bf56ad632044aa6bd00979feea92c417b552401e65fe5f531f15010d9d1c278da8be1df1b",
} }
//nolint:lll
tv2 := testVector{ tv2 := testVector{
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122", ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
expectedAddress: "0xc783df8a850f42e7F7e57013759C285caa701eB6", expectedAddress: "0xc783df8a850f42e7F7e57013759C285caa701eB6",
pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52", pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52",
chainID: uint16(31337), // =0x7a69 chainID: uint16(31337), // =0x7a69
hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c", hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c",
toHashExpected: "190167617949b934d7e01add4009cd3d47415a26727b7d6288e5dce33fb3721d5a1a9ce511b19b694c9aaf8183f4987ed752f24884c54c003d11daa2e98c7547a79e", toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b527a69f4e77e5da47ac3125140c470c71cbca77b5c638c",
hashExpected: "157b570c597e615b8356ce008ac39f43bc9b6d50080bc07d968031b9378acbbb", hashExpected: "4f6ead01278ba4597d4720e37482f585a713497cea994a95209f4c57a963b4a7",
sigExpected: "a0766181102428b5672e523dc4b905c10ddf025c10dbd0b3534ef864632a14652737610041c670b302fc7dca28edd5d6eac42b72d69ce58da8ce21287b244e381b", sigExpected: "43b5818802a137a72a190c1d8d767ca507f7a4804b1b69b5e055abf31f4f2b476c80bb1ba63260d95610f6f831420d32130e7f22fec5d76e16644ddfcedd0d441c",
} }
tvs = append(tvs, tv0) tvs = append(tvs, tv0)
tvs = append(tvs, tv1) tvs = append(tvs, tv1)
@@ -126,10 +122,10 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
BJJ: pkComp, BJJ: pkComp,
} }
toHash, err := a.toHash(chainID, hermezContractAddr) toHash := a.toHash(chainID, hermezContractAddr)
require.NoError(t, err)
assert.Equal(t, tv.toHashExpected, assert.Equal(t, tv.toHashExpected,
hex.EncodeToString(toHash)) hex.EncodeToString(toHash))
assert.Equal(t, 120+len(EthMsgPrefix)+len([]byte("120")), len(toHash))
msg, err := a.HashToSign(chainID, hermezContractAddr) msg, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -13,9 +13,8 @@ const batchNumBytesLen = 8
// Batch is a struct that represents Hermez network batch // Batch is a struct that represents Hermez network batch
type Batch struct { type Batch struct {
BatchNum BatchNum `meddler:"batch_num"` BatchNum BatchNum `meddler:"batch_num"`
// Ethereum block in which the batch is forged EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
EthBlockNum int64 `meddler:"eth_block_num"`
ForgerAddr ethCommon.Address `meddler:"forger_addr"` ForgerAddr ethCommon.Address `meddler:"forger_addr"`
CollectedFees map[TokenID]*big.Int `meddler:"fees_collected,json"` CollectedFees map[TokenID]*big.Int `meddler:"fees_collected,json"`
FeeIdxsCoordinator []Idx `meddler:"fee_idxs_coordinator,json"` FeeIdxsCoordinator []Idx `meddler:"fee_idxs_coordinator,json"`
@@ -23,29 +22,9 @@ type Batch struct {
NumAccounts int `meddler:"num_accounts"` NumAccounts int `meddler:"num_accounts"`
LastIdx int64 `meddler:"last_idx"` LastIdx int64 `meddler:"last_idx"`
ExitRoot *big.Int `meddler:"exit_root,bigint"` ExitRoot *big.Int `meddler:"exit_root,bigint"`
// ForgeL1TxsNum is optional, Only when the batch forges L1 txs. Identifier that corresponds ForgeL1TxsNum *int64 `meddler:"forge_l1_txs_num"` // optional, Only when the batch forges L1 txs. Identifier that corresponds to the group of L1 txs forged in the current batch.
// to the group of L1 txs forged in the current batch. SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
ForgeL1TxsNum *int64 `meddler:"forge_l1_txs_num"` TotalFeesUSD *float64 `meddler:"total_fees_usd"`
SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
}
// NewEmptyBatch creates a new empty batch
func NewEmptyBatch() *Batch {
return &Batch{
BatchNum: 0,
EthBlockNum: 0,
ForgerAddr: ethCommon.Address{},
CollectedFees: make(map[TokenID]*big.Int),
FeeIdxsCoordinator: make([]Idx, 0),
StateRoot: big.NewInt(0),
NumAccounts: 0,
LastIdx: 0,
ExitRoot: big.NewInt(0),
ForgeL1TxsNum: nil,
SlotNum: 0,
TotalFeesUSD: nil,
}
} }
// BatchNum identifies a batch // BatchNum identifies a batch
@@ -66,9 +45,7 @@ func (bn BatchNum) BigInt() *big.Int {
// BatchNumFromBytes returns BatchNum from a []byte // BatchNumFromBytes returns BatchNum from a []byte
func BatchNumFromBytes(b []byte) (BatchNum, error) { func BatchNumFromBytes(b []byte) (BatchNum, error) {
if len(b) != batchNumBytesLen { if len(b) != batchNumBytesLen {
return 0, return 0, tracerr.Wrap(fmt.Errorf("can not parse BatchNumFromBytes, bytes len %d, expected %d", len(b), batchNumBytesLen))
tracerr.Wrap(fmt.Errorf("can not parse BatchNumFromBytes, bytes len %d, expected %d",
len(b), batchNumBytesLen))
} }
batchNum := binary.BigEndian.Uint64(b[:batchNumBytesLen]) batchNum := binary.BigEndian.Uint64(b[:batchNumBytesLen])
return BatchNum(batchNum), nil return BatchNum(batchNum), nil
@@ -82,7 +59,6 @@ type BatchData struct {
L1CoordinatorTxs []L1Tx L1CoordinatorTxs []L1Tx
L2Txs []L2Tx L2Txs []L2Tx
CreatedAccounts []Account CreatedAccounts []Account
UpdatedAccounts []AccountUpdate
ExitTree []ExitInfo ExitTree []ExitInfo
Batch Batch Batch Batch
} }

View File

@@ -34,7 +34,7 @@ type Slot struct {
// BatchesLen int // BatchesLen int
BidValue *big.Int BidValue *big.Int
BootCoord bool BootCoord bool
// Bidder, Forger and URL correspond to the winner of the slot (which is // Bidder, Forer and URL correspond to the winner of the slot (which is
// not always the highest bidder). These are the values of the // not always the highest bidder). These are the values of the
// coordinator that is able to forge exclusively before the deadline. // coordinator that is able to forge exclusively before the deadline.
Bidder ethCommon.Address Bidder ethCommon.Address

View File

@@ -5,15 +5,10 @@ import (
) )
// Coordinator represents a Hermez network coordinator who wins an auction for an specific slot // Coordinator represents a Hermez network coordinator who wins an auction for an specific slot
// WARNING: this is strongly based on the previous implementation, once the new spec is done, this // WARNING: this is strongly based on the previous implementation, once the new spec is done, this may change a lot.
// may change a lot.
type Coordinator struct { type Coordinator struct {
// Bidder is the address of the bidder Bidder ethCommon.Address `meddler:"bidder_addr"` // address of the bidder
Bidder ethCommon.Address `meddler:"bidder_addr"` Forger ethCommon.Address `meddler:"forger_addr"` // address of the forger
// Forger is the address of the forger EthBlockNum int64 `meddler:"eth_block_num"` // block in which the coordinator was registered
Forger ethCommon.Address `meddler:"forger_addr"` URL string `meddler:"url"` // URL of the coordinators API
// EthBlockNum is the block in which the coordinator was registered
EthBlockNum int64 `meddler:"eth_block_num"`
// URL of the coordinators API
URL string `meddler:"url"`
} }

View File

@@ -33,8 +33,7 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum { if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot) return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// This result will be negative return -1
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// SlotBlocks returns the first and the last block numbers included in that slot // SlotBlocks returns the first and the last block numbers included in that slot
@@ -68,13 +67,11 @@ type AuctionVariables struct {
ClosedAuctionSlots uint16 `meddler:"closed_auction_slots" validate:"required"` ClosedAuctionSlots uint16 `meddler:"closed_auction_slots" validate:"required"`
// Distance (#slots) to the farthest slot to which you can bid (30 days = 4320 slots ) // Distance (#slots) to the farthest slot to which you can bid (30 days = 4320 slots )
OpenAuctionSlots uint16 `meddler:"open_auction_slots" validate:"required"` OpenAuctionSlots uint16 `meddler:"open_auction_slots" validate:"required"`
// How the HEZ tokens deposited by the slot winner are distributed (Burn: 40% - Donation: // How the HEZ tokens deposited by the slot winner are distributed (Burn: 40% - Donation: 40% - HGT: 20%)
// 40% - HGT: 20%)
AllocationRatio [3]uint16 `meddler:"allocation_ratio,json" validate:"required"` AllocationRatio [3]uint16 `meddler:"allocation_ratio,json" validate:"required"`
// Minimum outbid (percentage) over the previous one to consider it valid // Minimum outbid (percentage) over the previous one to consider it valid
Outbidding uint16 `meddler:"outbidding" validate:"required"` Outbidding uint16 `meddler:"outbidding" validate:"required"`
// Number of blocks at the end of a slot in which any coordinator can forge if the winner // Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
// has not forged one before
SlotDeadline uint8 `meddler:"slot_deadline" validate:"required"` SlotDeadline uint8 `meddler:"slot_deadline" validate:"required"`
} }

View File

@@ -20,22 +20,19 @@ const (
// RollupConstExitIDx IDX 1 is reserved for exits // RollupConstExitIDx IDX 1 is reserved for exits
RollupConstExitIDx = 1 RollupConstExitIDx = 1
// RollupConstLimitTokens Max number of tokens allowed to be registered inside the rollup // RollupConstLimitTokens Max number of tokens allowed to be registered inside the rollup
RollupConstLimitTokens = (1 << 32) //nolint:gomnd RollupConstLimitTokens = (1 << 32)
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] // RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
// compressedSignature
RollupConstL1CoordinatorTotalBytes = 101 RollupConstL1CoordinatorTotalBytes = 101
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 // RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
// bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] // [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
// tokenId + [6 bytes] toIdx RollupConstL1UserTotalBytes = 72
RollupConstL1UserTotalBytes = 78
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch // RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
RollupConstMaxL1UserTx = 128 RollupConstMaxL1UserTx = 128
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch // RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch
RollupConstMaxL1Tx = 256 RollupConstMaxL1Tx = 256
// RollupConstInputSHAConstantBytes [6 bytes] lastIdx + [6 bytes] newLastIdx + [32 bytes] // RollupConstInputSHAConstantBytes [6 bytes] lastIdx + [6 bytes] newLastIdx + [32 bytes] stateRoot + [32 bytes] newStRoot + [32 bytes] newExitRoot +
// stateRoot + [32 bytes] newStRoot + [32 bytes] newExitRoot + [_MAX_L1_TX * // [_MAX_L1_TX * _L1_USER_TOTALBYTES bytes] l1TxsData + totalL2TxsDataLength + feeIdxCoordinatorLength + [2 bytes] chainID =
// _L1_USER_TOTALBYTES bytes] l1TxsData + totalL2TxsDataLength + feeIdxCoordinatorLength + // 18542 bytes + totalL2TxsDataLength + feeIdxCoordinatorLength
// [2 bytes] chainID = 18542 bytes + totalL2TxsDataLength + feeIdxCoordinatorLength
RollupConstInputSHAConstantBytes = 18546 RollupConstInputSHAConstantBytes = 18546
// RollupConstNumBuckets Number of buckets // RollupConstNumBuckets Number of buckets
RollupConstNumBuckets = 5 RollupConstNumBuckets = 5
@@ -47,18 +44,14 @@ const (
var ( var (
// RollupConstLimitDepositAmount Max deposit amount allowed (depositAmount: L1 --> L2) // RollupConstLimitDepositAmount Max deposit amount allowed (depositAmount: L1 --> L2)
RollupConstLimitDepositAmount, _ = new(big.Int).SetString( RollupConstLimitDepositAmount, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10)
"340282366920938463463374607431768211456", 10)
// RollupConstLimitL2TransferAmount Max amount allowed (amount L2 --> L2) // RollupConstLimitL2TransferAmount Max amount allowed (amount L2 --> L2)
RollupConstLimitL2TransferAmount, _ = new(big.Int).SetString( RollupConstLimitL2TransferAmount, _ = new(big.Int).SetString("6277101735386680763835789423207666416102355444464034512896", 10)
"6277101735386680763835789423207666416102355444464034512896", 10)
// RollupConstEthAddressInternalOnly This ethereum address is used internally for rollup // RollupConstEthAddressInternalOnly This ethereum address is used internally for rollup accounts that don't have ethereum address, only Babyjubjub
// accounts that don't have ethereum address, only Babyjubjub. // This non-ethereum accounts can be created by the coordinator and allow users to have a rollup
// This non-ethereum accounts can be created by the coordinator and allow users to have a // account without needing an ethereum address
// rollup account without needing an ethereum address RollupConstEthAddressInternalOnly = ethCommon.HexToAddress("0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF")
RollupConstEthAddressInternalOnly = ethCommon.HexToAddress(
"0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF")
// RollupConstRfield Modulus zkSNARK // RollupConstRfield Modulus zkSNARK
RollupConstRfield, _ = new(big.Int).SetString( RollupConstRfield, _ = new(big.Int).SetString(
"21888242871839275222246405745257275088548364400416034343698204186575808495617", 10) "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10)
@@ -70,32 +63,24 @@ var (
// RollupConstRecipientInterfaceHash ERC777 recipient interface hash // RollupConstRecipientInterfaceHash ERC777 recipient interface hash
RollupConstRecipientInterfaceHash = crypto.Keccak256([]byte("ERC777TokensRecipient")) RollupConstRecipientInterfaceHash = crypto.Keccak256([]byte("ERC777TokensRecipient"))
// RollupConstPerformL1UserTxSignature the signature of the function that can be called thru // RollupConstPerformL1UserTxSignature the signature of the function that can be called thru an ERC777 `send`
// an ERC777 `send` RollupConstPerformL1UserTxSignature = crypto.Keccak256([]byte("addL1Transaction(uint256,uint48,uint16,uint16,uint32,uint48)"))
RollupConstPerformL1UserTxSignature = crypto.Keccak256([]byte( // RollupConstAddTokenSignature the signature of the function that can be called thru an ERC777 `send`
"addL1Transaction(uint256,uint48,uint16,uint16,uint32,uint48)"))
// RollupConstAddTokenSignature the signature of the function that can be called thru an
// ERC777 `send`
RollupConstAddTokenSignature = crypto.Keccak256([]byte("addToken(address)")) RollupConstAddTokenSignature = crypto.Keccak256([]byte("addToken(address)"))
// RollupConstSendSignature ERC777 Signature // RollupConstSendSignature ERC777 Signature
RollupConstSendSignature = crypto.Keccak256([]byte("send(address,uint256,bytes)")) RollupConstSendSignature = crypto.Keccak256([]byte("send(address,uint256,bytes)"))
// RollupConstERC777Granularity ERC777 Signature // RollupConstERC777Granularity ERC777 Signature
RollupConstERC777Granularity = crypto.Keccak256([]byte("granularity()")) RollupConstERC777Granularity = crypto.Keccak256([]byte("granularity()"))
// RollupConstWithdrawalDelayerDeposit This constant are used to deposit tokens from ERC77 // RollupConstWithdrawalDelayerDeposit This constant are used to deposit tokens from ERC77 tokens into withdrawal delayer
// tokens into withdrawal delayer
RollupConstWithdrawalDelayerDeposit = crypto.Keccak256([]byte("deposit(address,address,uint192)")) RollupConstWithdrawalDelayerDeposit = crypto.Keccak256([]byte("deposit(address,address,uint192)"))
// ERC20 signature // ERC20 signature
// RollupConstTransferSignature This constant is used in the _safeTransfer internal method // RollupConstTransferSignature This constant is used in the _safeTransfer internal method in order to safe GAS.
// in order to safe GAS.
RollupConstTransferSignature = crypto.Keccak256([]byte("transfer(address,uint256)")) RollupConstTransferSignature = crypto.Keccak256([]byte("transfer(address,uint256)"))
// RollupConstTransferFromSignature This constant is used in the _safeTransfer internal // RollupConstTransferFromSignature This constant is used in the _safeTransfer internal method in order to safe GAS.
// method in order to safe GAS. RollupConstTransferFromSignature = crypto.Keccak256([]byte("transferFrom(address,address,uint256)"))
RollupConstTransferFromSignature = crypto.Keccak256([]byte( // RollupConstApproveSignature This constant is used in the _safeTransfer internal method in order to safe GAS.
"transferFrom(address,address,uint256)"))
// RollupConstApproveSignature This constant is used in the _safeTransfer internal method in
// order to safe GAS.
RollupConstApproveSignature = crypto.Keccak256([]byte("approve(address,uint256)")) RollupConstApproveSignature = crypto.Keccak256([]byte("approve(address,uint256)"))
// RollupConstERC20Signature ERC20 decimals signature // RollupConstERC20Signature ERC20 decimals signature
RollupConstERC20Signature = crypto.Keccak256([]byte("decimals()")) RollupConstERC20Signature = crypto.Keccak256([]byte("decimals()"))
@@ -156,7 +141,6 @@ type TokenExchange struct {
} }
// RollupVariables are the variables of the Rollup Smart Contract // RollupVariables are the variables of the Rollup Smart Contract
//nolint:lll
type RollupVariables struct { type RollupVariables struct {
EthBlockNum int64 `meddler:"eth_block_num"` EthBlockNum int64 `meddler:"eth_block_num"`
FeeAddToken *big.Int `meddler:"fee_add_token,bigint" validate:"required"` FeeAddToken *big.Int `meddler:"fee_add_token,bigint" validate:"required"`

View File

@@ -27,7 +27,6 @@ type WDelayerEscapeHatchWithdrawal struct {
} }
// WDelayerVariables are the variables of the Withdrawal Delayer Smart Contract // WDelayerVariables are the variables of the Withdrawal Delayer Smart Contract
//nolint:lll
type WDelayerVariables struct { type WDelayerVariables struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"` EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
// HermezRollupAddress ethCommon.Address `json:"hermezRollupAddress" meddler:"rollup_address"` // HermezRollupAddress ethCommon.Address `json:"hermezRollupAddress" meddler:"rollup_address"`

131
common/float16.go Normal file
View File

@@ -0,0 +1,131 @@
// Package common Float16 provides methods to work with Hermez custom half float
// precision, 16 bits, codification internally called Float16 has been adopted
// to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
var (
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
ErrRoundingLoss = errors.New("input value causes rounding loss")
)
// Float16 represents a float in a 16 bit format
type Float16 uint16
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
func (f16 Float16) Bytes() []byte {
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(f16))
return b[:]
}
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 {
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16
}
// BigInt converts the Float16 to a *big.Int integer
func (f16 *Float16) BigInt() *big.Int {
fl := int64(*f16)
m := big.NewInt(fl & 0x3FF)
e := big.NewInt(fl >> 11)
e5 := (fl >> 10) & 0x01
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
res := m.Mul(m, exp)
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
res.Add(res, exp.Div(exp, big.NewInt(2)))
}
return res
}
// floorFix2Float converts a fix to a float, always rounding down
func floorFix2Float(_f *big.Int) Float16 {
zero := big.NewInt(0)
ten := big.NewInt(10)
e := int64(0)
m := big.NewInt(0)
m.Set(_f)
if m.Cmp(zero) == 0 {
return 0
}
s := big.NewInt(0).Rsh(m, 10)
for s.Cmp(zero) != 0 {
m.Div(m, ten)
s.Rsh(m, 10)
e++
}
return Float16(m.Int64() | e<<11)
}
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
// case of loss during the encoding.
func NewFloat16(f *big.Int) (Float16, error) {
fl1 := floorFix2Float(f)
fi1 := fl1.BigInt()
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
m3 := (fl1 & 0x3FF) + 1
e3 := fl1 >> 11
if m3&0x400 == 0 {
m3 = 0x66
e3++
}
fl3 := m3 + e3<<11
fi3 := fl3.BigInt()
res := fl1
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
if d.Cmp(d2) == 1 {
res = fl2
d = d2
}
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
if d.Cmp(d3) == 1 {
res = fl3
}
// Do rounding check
if res.BigInt().Cmp(f) == 0 {
return res, nil
}
return res, tracerr.Wrap(ErrRoundingLoss)
}
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
// case of loss during the encoding.
func NewFloat16Floor(f *big.Int) Float16 {
fl1 := floorFix2Float(f)
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
if fi2.Cmp(f) < 1 {
return fl2
}
return fl1
}

132
common/float16_test.go Normal file
View File

@@ -0,0 +1,132 @@
package common
import (
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
)
func TestConversions(t *testing.T) {
testVector := map[Float16]string{
0x307B: "123000000",
0x1DC6: "454500",
0xFFFF: "10235000000000000000000000000000000",
0x0000: "0",
0x0400: "0",
0x0001: "1",
0x0401: "1",
0x0800: "0",
0x0c00: "5",
0x0801: "10",
0x0c01: "15",
}
for test := range testVector {
fix := test.BigInt()
assert.Equal(t, fix.String(), testVector[test])
bi := big.NewInt(0)
bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi)
assert.Equal(t, nil, err)
fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestFloorFix2Float(t *testing.T) {
testVector := map[string]Float16{
"87999990000000000": 0x776f,
"87950000000000001": 0x776f,
"87950000000000000": 0x776f,
"87949999999999999": 0x736f,
}
for test := range testVector {
bi := big.NewInt(0)
bi.SetString(test, 10)
testFloat := NewFloat16Floor(bi)
assert.Equal(t, testFloat, testVector[test])
}
}
func TestConversionLosses(t *testing.T) {
a := big.NewInt(1000)
b, err := NewFloat16(a)
assert.Equal(t, nil, err)
c := b.BigInt()
assert.Equal(t, c, a)
a = big.NewInt(1024)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32767)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32768)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(65536000)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
}
func BenchmarkFloat16(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Bad big int")
}
return bigInt
}
type pair struct {
Float16 Float16
BigInt *big.Int
}
testVector := []pair{
{0x307B, newBigInt("123000000")},
{0x1DC6, newBigInt("454500")},
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
{0x0000, newBigInt("0")},
{0x0400, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1")},
{0x0800, newBigInt("0")},
{0x0c00, newBigInt("5")},
{0x0801, newBigInt("10")},
{0x0c01, newBigInt("15")},
}
b.Run("floorFix2Float()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
}
})
b.Run("NewFloat16()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float16.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
testVector[i%len(testVector)].Float16.BigInt()
}
})
}

View File

@@ -1,128 +0,0 @@
// Package common float40.go provides methods to work with Hermez custom half
// float precision, 40 bits, codification internally called Float40 has been
// adopted to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
const (
// maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
)
var (
// ErrFloat40Overflow is used when a given Float40 overflows the
// maximum capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40
ErrFloat40E31 = errors.New("Float40 error, e > 31")
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
// not be represented as Float40 due not enough precission
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
thres = big.NewInt(0x08_00_00_00_00)
)
// Float40 represents a float in a 64 bit format
type Float40 uint64
// Bytes return a byte array of length 5 with the Float40 value encoded in
// BigEndian
func (f40 Float40) Bytes() ([]byte, error) {
if f40 > maxFloat40Value {
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
}
var f40Bytes [8]byte
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
var b [5]byte
copy(b[:], f40Bytes[3:])
return b[:], nil
}
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
// representation.
func Float40FromBytes(b []byte) Float40 {
var f40Bytes [8]byte
copy(f40Bytes[3:], b[:])
f40 := binary.BigEndian.Uint64(f40Bytes[:])
return Float40(f40)
}
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
// [ e | m ]
// [ 5 bits | 35 bits ]
func (f40 Float40) BigInt() (*big.Int, error) {
// take the 5 used bytes (FF * 5)
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
f40Bytes, err := f40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
return r, nil
}
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
// of loss during the encoding.
func NewFloat40(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
zero := big.NewInt(0)
ten := big.NewInt(10)
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, tracerr.Wrap(ErrFloat40E31)
}
if m.Cmp(thres) >= 0 {
return 0, tracerr.Wrap(ErrFloat40NotEnoughPrecission)
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}
// NewFloat40Floor encodes a *big.Int integer as a Float40, rounding down in
// case of loss during the encoding. It returns an error in case that the number
// is too big (e>31). Warning: this method should not be used inside the
// hermez-node, it's a helper for external usage to generate valid Float40
// values.
func NewFloat40Floor(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
// zero := big.NewInt(0)
ten := big.NewInt(10)
for m.Cmp(thres) >= 0 {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, tracerr.Wrap(ErrFloat40E31)
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}

View File

@@ -1,146 +0,0 @@
package common
import (
"fmt"
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConversionsFloat40(t *testing.T) {
testVector := map[Float40]string{
6*0x800000000 + 123: "123000000",
2*0x800000000 + 4545: "454500",
30*0x800000000 + 10235: "10235000000000000000000000000000000",
0x000000000: "0",
0x800000000: "0",
0x0001: "1",
0x0401: "1025",
0x800000000 + 1: "10",
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
}
for test := range testVector {
fix, err := test.BigInt()
require.NoError(t, err)
assert.Equal(t, fix.String(), testVector[test])
bi, ok := new(big.Int).SetString(testVector[test], 10)
require.True(t, ok)
fl, err := NewFloat40(bi)
assert.NoError(t, err)
fx2, err := fl.BigInt()
require.NoError(t, err)
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestExpectError(t *testing.T) {
testVector := map[string]error{
"9922334455000000000000000000000000000000": nil,
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
"42949672950000000000000000000000000000000": nil,
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
"992233445500000000000000000000000000000000": ErrFloat40E31,
"343597383670000000000000000000000000000000": nil,
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383700000000000000000000000000000000": ErrFloat40E31,
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
_, err := NewFloat40(bi)
assert.Equal(t, testVector[test], tracerr.Unwrap(err))
}
}
func TestNewFloat40Floor(t *testing.T) {
testVector := map[string][]string{
// []int contains [Float40 value, Flot40 Floor value], when
// Float40 value is expected to be 0, is because is expected to
// be an error
"9922334455000000000000000000000000000000": {
"1040714485495", "1040714485495", "9922334455000000000000000000000000000000"},
"9922334455000000000000000000000000000001": { // Floor [2] will be same as prev line
"0", "1040714485495", "9922334455000000000000000000000000000000"},
"9922334454999999999999999999999999999999": {
"0", "1040714485494", "9922334454000000000000000000000000000000"},
"42949672950000000000000000000000000000000": {
"1069446856703", "1069446856703", "42949672950000000000000000000000000000000"},
"99223344556573838487575": {
"0", "456598933239", "99223344550000000000000"},
"992233445500000000000000000000000000000000": {
"0", "0", "0"}, // e>31, returns 0, err
"343597383670000000000000000000000000000000": {
"1099511627775", "1099511627775", "343597383670000000000000000000000000000000"},
"343597383680000000000000000000000000000000": {
"0", "0", "0"}, // e>31, returns 0, err
"1157073197879933027": {
"0", "286448638922", "1157073197800000000"},
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
f40, err := NewFloat40(bi)
if f40 == 0 {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, testVector[test][0], fmt.Sprint(uint64(f40)))
f40, err = NewFloat40Floor(bi)
if f40 == 0 {
assert.Equal(t, ErrFloat40E31, tracerr.Unwrap(err))
} else {
assert.NoError(t, err)
}
assert.Equal(t, testVector[test][1], fmt.Sprint(uint64(f40)))
bi2, err := f40.BigInt()
require.NoError(t, err)
assert.Equal(t, fmt.Sprint(testVector[test][2]), bi2.String())
}
}
func BenchmarkFloat40(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Can not convert string to *big.Int")
}
return bigInt
}
type pair struct {
Float40 Float40
BigInt *big.Int
}
testVector := []pair{
{6*0x800000000 + 123, newBigInt("123000000")},
{2*0x800000000 + 4545, newBigInt("454500")},
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
{0x000000000, newBigInt("0")},
{0x800000000, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1025")},
{0x800000000 + 1, newBigInt("10")},
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
}
b.Run("NewFloat40()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float40.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = testVector[i%len(testVector)].Float40.BigInt()
}
})
}

View File

@@ -11,43 +11,42 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
const (
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
L1UserTxBytesLen = 72
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
L1CoordinatorTxBytesLen = 101
)
// L1Tx is a struct that represents a L1 tx // L1Tx is a struct that represents a L1 tx
type L1Tx struct { type L1Tx struct {
// Stored in DB: mandatory fileds // Stored in DB: mandatory fileds
// TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of: // TxID (12 bytes) for L1Tx is:
// bytes: | 1 | 8 | 2 | 1 | // bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) | // values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type: // where type:
// - L1UserTx: 0 // - L1UserTx: 0
// - L1CoordinatorTx: 1 // - L1CoordinatorTx: 1
TxID TxID `meddler:"id"` TxID TxID `meddler:"id"`
// ToForgeL1TxsNum indicates in which the tx was forged / will be forged ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` Position int `meddler:"position"`
Position int `meddler:"position"` UserOrigin bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
// UserOrigin is set to true if the tx was originated by a user, false if it was FromIdx Idx `meddler:"from_idx,zeroisnull"` // FromIdx is used by L1Tx/Deposit to indicate the Idx receiver of the L1Tx.DepositAmount (deposit)
// aoriginated by a coordinator. Note that this differ from the spec for implementation
// simplification purpposes
UserOrigin bool `meddler:"user_origin"`
// FromIdx is used by L1Tx/Deposit to indicate the Idx receiver of the L1Tx.DepositAmount
// (deposit)
FromIdx Idx `meddler:"from_idx,zeroisnull"`
EffectiveFromIdx Idx `meddler:"effective_from_idx,zeroisnull"` EffectiveFromIdx Idx `meddler:"effective_from_idx,zeroisnull"`
FromEthAddr ethCommon.Address `meddler:"from_eth_addr,zeroisnull"` FromEthAddr ethCommon.Address `meddler:"from_eth_addr,zeroisnull"`
FromBJJ babyjub.PublicKeyComp `meddler:"from_bjj,zeroisnull"` FromBJJ babyjub.PublicKeyComp `meddler:"from_bjj,zeroisnull"`
// ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer ToIdx Idx `meddler:"to_idx"` // ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer
ToIdx Idx `meddler:"to_idx"` TokenID TokenID `meddler:"token_id"`
TokenID TokenID `meddler:"token_id"` Amount *big.Int `meddler:"amount,bigint"`
Amount *big.Int `meddler:"amount,bigint"`
// EffectiveAmount only applies to L1UserTx. // EffectiveAmount only applies to L1UserTx.
EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"` EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"`
DepositAmount *big.Int `meddler:"deposit_amount,bigint"` DepositAmount *big.Int `meddler:"deposit_amount,bigint"`
// EffectiveDepositAmount only applies to L1UserTx. // EffectiveDepositAmount only applies to L1UserTx.
EffectiveDepositAmount *big.Int `meddler:"effective_deposit_amount,bigintnull"` EffectiveDepositAmount *big.Int `meddler:"effective_deposit_amount,bigintnull"`
// Ethereum Block Number in which this L1Tx was added to the queue EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
EthBlockNum int64 `meddler:"eth_block_num"` Type TxType `meddler:"type"`
Type TxType `meddler:"type"` BatchNum *BatchNum `meddler:"batch_num"`
BatchNum *BatchNum `meddler:"batch_num"`
} }
// NewL1Tx returns the given L1Tx with the TxId & Type parameters calculated // NewL1Tx returns the given L1Tx with the TxId & Type parameters calculated
@@ -180,38 +179,45 @@ func (tx L1Tx) Tx() Tx {
// [ 8 bits ] empty (userFee) // 1 byte // [ 8 bits ] empty (userFee) // 1 byte
// [ 40 bits ] empty (nonce) // 5 bytes // [ 40 bits ] empty (nonce) // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes // [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation // Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
// b[0:7] empty: no ToBJJSign, no fee, no nonce // b[0:7] empty: no ToBJJSign, no fee, no nonce
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[11:17], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[17:23], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID) binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[25:29], SignatureConstantBytes[:]) copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
} }
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability // BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat40 | Fee ] // [ fromIdx | toIdx | amountFloat16 | Fee ]
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
@@ -225,17 +231,13 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
if tx.EffectiveAmount != nil { if tx.EffectiveAmount != nil {
amountFloat40, err := NewFloat40(tx.EffectiveAmount) amountFloat16, err := NewFloat16(tx.EffectiveAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes() copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
} }
// fee = 0 (as is L1Tx) // fee = 0 (as is L1Tx) b[10:11]
return b[:], nil return b[:], nil
} }
@@ -245,7 +247,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen] fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2] toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength] amountBytes := b[idxLen*2 : idxLen*2+2]
l1tx := L1Tx{} l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6)) fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
@@ -258,8 +260,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
l1tx.ToIdx = toIdx l1tx.ToIdx = toIdx
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt() l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
return &l1tx, tracerr.Wrap(err) return &l1tx, nil
} }
// BytesGeneric returns the generic representation of a L1Tx. This method is // BytesGeneric returns the generic representation of a L1Tx. This method is
@@ -267,7 +269,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method // the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case). // for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
func (tx *L1Tx) BytesGeneric() ([]byte, error) { func (tx *L1Tx) BytesGeneric() ([]byte, error) {
var b [RollupConstL1UserTotalBytes]byte var b [L1UserTxBytesLen]byte
copy(b[0:20], tx.FromEthAddr.Bytes()) copy(b[0:20], tx.FromEthAddr.Bytes())
if tx.FromBJJ != EmptyBJJComp { if tx.FromBJJ != EmptyBJJComp {
pkCompL := tx.FromBJJ pkCompL := tx.FromBJJ
@@ -279,33 +281,22 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[52:58], fromIdxBytes[:]) copy(b[52:58], fromIdxBytes[:])
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes() copy(b[58:60], depositAmountFloat16.Bytes())
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[58:63], depositAmountFloat40Bytes) copy(b[60:62], amountFloat16.Bytes())
copy(b[62:66], tx.TokenID.Bytes())
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[63:68], amountFloat40Bytes)
copy(b[68:72], tx.TokenID.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[72:78], toIdxBytes[:]) copy(b[66:72], toIdxBytes[:])
return b[:], nil return b[:], nil
} }
@@ -322,7 +313,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
if tx.UserOrigin { if tx.UserOrigin {
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx")) return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
} }
var b [RollupConstL1CoordinatorTotalBytes]byte var b [L1CoordinatorTxBytesLen]byte
v := compressedSignatureBytes[64] v := compressedSignatureBytes[64]
s := compressedSignatureBytes[32:64] s := compressedSignatureBytes[32:64]
r := compressedSignatureBytes[0:32] r := compressedSignatureBytes[0:32]
@@ -338,10 +329,8 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
// L1UserTxFromBytes decodes a L1Tx from []byte // L1UserTxFromBytes decodes a L1Tx from []byte
func L1UserTxFromBytes(b []byte) (*L1Tx, error) { func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
if len(b) != RollupConstL1UserTotalBytes { if len(b) != L1UserTxBytesLen {
return nil, return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d",
68, len(b)))
} }
tx := &L1Tx{ tx := &L1Tx{
@@ -358,19 +347,13 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.FromIdx = fromIdx tx.FromIdx = fromIdx
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt() tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
tx.TokenID, err = TokenIDFromBytes(b[62:66])
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt() tx.ToIdx, err = IdxFromBytes(b[66:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.TokenID, err = TokenIDFromBytes(b[68:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.ToIdx, err = IdxFromBytes(b[72:78])
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -378,15 +361,19 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return tx, nil return tx, nil
} }
func signHash(data []byte) []byte {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
return ethCrypto.Keccak256([]byte(msg))
}
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte // L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
error) { if len(b) != L1CoordinatorTxBytesLen {
if len(b) != RollupConstL1CoordinatorTotalBytes { return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
return nil, tracerr.Wrap(
fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d",
101, len(b)))
} }
bytesMessage := []byte("I authorize this babyjubjub key for hermez rollup account creation")
tx := &L1Tx{ tx := &L1Tx{
UserOrigin: false, UserOrigin: false,
} }
@@ -407,20 +394,18 @@ func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommo
// L1CoordinatorTX ETH // L1CoordinatorTX ETH
// Ethereum adds 27 to v // Ethereum adds 27 to v
v = b[0] - byte(27) //nolint:gomnd v = b[0] - byte(27) //nolint:gomnd
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
var data []byte
data = append(data, bytesMessage...)
data = append(data, pkCompB...)
data = append(data, chainIDBytes[:]...)
data = append(data, hermezAddress.Bytes()...)
var signature []byte var signature []byte
signature = append(signature, r[:]...) signature = append(signature, r[:]...)
signature = append(signature, s[:]...) signature = append(signature, s[:]...)
signature = append(signature, v) signature = append(signature, v)
hash := signHash(data)
accCreationAuth := AccountCreationAuth{ pubKeyBytes, err := ethCrypto.Ecrecover(hash, signature)
BJJ: tx.FromBJJ,
}
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
if err != nil {
return nil, tracerr.Wrap(err)
}
pubKeyBytes, err := ethCrypto.Ecrecover(h, signature)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -29,8 +29,7 @@ func TestNewL1UserTx(t *testing.T) {
} }
l1Tx, err := NewL1Tx(l1Tx) l1Tx, err := NewL1Tx(l1Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x00a6cbae3b8661fb75b0919ca6605a02cfb04d9c6dd16870fa0fcdf01befa32768", assert.Equal(t, "0x00a6cbae3b8661fb75b0919ca6605a02cfb04d9c6dd16870fa0fcdf01befa32768", l1Tx.TxID.String())
l1Tx.TxID.String())
} }
func TestNewL1CoordinatorTx(t *testing.T) { func TestNewL1CoordinatorTx(t *testing.T) {
@@ -47,115 +46,68 @@ func TestNewL1CoordinatorTx(t *testing.T) {
} }
l1Tx, err := NewL1Tx(l1Tx) l1Tx, err := NewL1Tx(l1Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x01274482d73df4dab34a1b6740adfca347a462513aa14e82f27b12f818d1b68c84", assert.Equal(t, "0x01274482d73df4dab34a1b6740adfca347a462513aa14e82f27b12f818d1b68c84", l1Tx.TxID.String())
l1Tx.TxID.String())
} }
func TestL1TxCompressedData(t *testing.T) { func TestL1TxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation (using
// PoolL2Tx values)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{ tx := L1Tx{
FromIdx: (1 << 48) - 1, FromIdx: 2,
ToIdx: (1 << 48) - 1, ToIdx: 3,
Amount: amount, Amount: big.NewInt(4),
TokenID: (1 << 32) - 1, TokenID: 5,
} }
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1)) chainID := uint16(0)
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err) assert.NoError(t, err)
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{ // test vector value generated from javascript implementation
FromIdx: 0, expectedStr := "7307597389635308713748674793997299267459594577423"
ToIdx: 0, assert.Equal(t, expectedStr, txCompressedData.String())
Amount: big.NewInt(0), assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
TokenID: 0,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = L1Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
assert.NoError(t, err)
expectedStr = "7b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestBytesDataAvailability(t *testing.T) { func TestBytesDataAvailability(t *testing.T) {
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{ tx := L1Tx{
ToIdx: (1 << 16) - 1, FromIdx: 2,
FromIdx: (1 << 16) - 1, ToIdx: 3,
EffectiveAmount: amount, Amount: big.NewInt(4),
TokenID: 5,
} }
txCompressedData, err := tx.BytesDataAvailability(16) txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData)) assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{ tx = L1Tx{
ToIdx: (1 << 32) - 1, FromIdx: 2,
FromIdx: (1 << 32) - 1, ToIdx: 3,
EffectiveAmount: amount, EffectiveAmount: big.NewInt(4),
TokenID: 5,
} }
txCompressedData, err = tx.BytesDataAvailability(32) txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData)) assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32) }
func TestL1TxFromDataAvailability(t *testing.T) {
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
}
txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err)
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx) assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx) assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{ tx = L1Tx{
ToIdx: 0, FromIdx: 2,
FromIdx: 0, ToIdx: 3,
EffectiveAmount: big.NewInt(0), EffectiveAmount: big.NewInt(4),
} }
txCompressedData, err = tx.BytesDataAvailability(32) txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 635,
FromIdx: 296,
EffectiveAmount: big.NewInt(1000000000000000000),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32) l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx) assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
@@ -201,8 +153,7 @@ func TestL1userTxByteParsers(t *testing.T) {
func TestL1TxByteParsersCompatibility(t *testing.T) { func TestL1TxByteParsersCompatibility(t *testing.T) {
// Data from compatibility test // Data from compatibility test
var pkComp babyjub.PublicKeyComp var pkComp babyjub.PublicKeyComp
pkCompB, err := pkCompB, err := hex.DecodeString("0dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a")
hex.DecodeString("0dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a")
require.NoError(t, err) require.NoError(t, err)
pkCompL := SwapEndianness(pkCompB) pkCompL := SwapEndianness(pkCompB)
err = pkComp.UnmarshalText([]byte(hex.EncodeToString(pkCompL))) err = pkComp.UnmarshalText([]byte(hex.EncodeToString(pkCompL)))
@@ -221,19 +172,20 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
UserOrigin: true, UserOrigin: true,
} }
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
require.NoError(t, err)
encodedData, err := l1Tx.BytesUser() encodedData, err := l1Tx.BytesUser()
require.NoError(t, err) require.NoError(t, err)
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79e" + assert.Equal(t, expected, encodedData)
"e1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
assert.Equal(t, expected, hex.EncodeToString(encodedData))
} }
func TestL1CoordinatorTxByteParsers(t *testing.T) { func TestL1CoordinatorTxByteParsers(t *testing.T) {
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe") hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
chainID := big.NewInt(1337) chainID := big.NewInt(1337)
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
privateKey, err := privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err) require.NoError(t, err)
publicKey := privateKey.Public() publicKey := privateKey.Public()
@@ -249,16 +201,18 @@ func TestL1CoordinatorTxByteParsers(t *testing.T) {
pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c") pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
err = pkComp.UnmarshalText(pkCompL) err = pkComp.UnmarshalText(pkCompL)
require.NoError(t, err) require.NoError(t, err)
bytesMessage1 := []byte("\x19Ethereum Signed Message:\n120")
bytesMessage2 := []byte("I authorize this babyjubjub key for hermez rollup account creation")
accCreationAuth := AccountCreationAuth{ babyjubB := SwapEndianness(pkComp[:])
EthAddr: fromEthAddr, var data []byte
BJJ: pkComp, data = append(data, bytesMessage1...)
} data = append(data, bytesMessage2...)
data = append(data, babyjubB[:]...)
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress) data = append(data, chainIDBytes...)
require.NoError(t, err) data = append(data, hermezAddress.Bytes()...)
hash := crypto.Keccak256Hash(data)
signature, err := crypto.Sign(h, privateKey) signature, err := crypto.Sign(hash.Bytes(), privateKey)
require.NoError(t, err) require.NoError(t, err)
// Ethereum adds 27 to v // Ethereum adds 27 to v
v := int(signature[64]) v := int(signature[64])
@@ -305,8 +259,7 @@ func TestL1CoordinatorTxByteParsersCompatibility(t *testing.T) {
signature = append(signature, v[:]...) signature = append(signature, v[:]...)
var pkComp babyjub.PublicKeyComp var pkComp babyjub.PublicKeyComp
pkCompB, err := pkCompB, err := hex.DecodeString("a2c2807ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c")
hex.DecodeString("a2c2807ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c")
require.NoError(t, err) require.NoError(t, err)
pkCompL := SwapEndianness(pkCompB) pkCompL := SwapEndianness(pkCompB)
err = pkComp.UnmarshalText([]byte(hex.EncodeToString(pkCompL))) err = pkComp.UnmarshalText([]byte(hex.EncodeToString(pkCompL)))
@@ -321,9 +274,7 @@ func TestL1CoordinatorTxByteParsersCompatibility(t *testing.T) {
encodeData, err := l1Tx.BytesCoordinatorTx(signature) encodeData, err := l1Tx.BytesCoordinatorTx(signature)
require.NoError(t, err) require.NoError(t, err)
expected, err := utils.HexDecode("1b186d7122ff7f654cfed3156719774898d573900c86599a885a706" + expected, err := utils.HexDecode("1b186d7122ff7f654cfed3156719774898d573900c86599a885a706dbdffe5ea8cda71e5eb097e115405d84d1e7b464009b434b32c014a2df502d1f065ced8bc3ba2c2807ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c000000e7")
"dbdffe5ea8cda71e5eb097e115405d84d1e7b464009b434b32c014a2df502d1f065ced8bc3ba2c28" +
"07ee39c3b3378738cff85a46a9465bb8fcf44ea597c33da9719be7c259c000000e7")
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, encodeData) assert.Equal(t, expected, encodeData)

View File

@@ -10,7 +10,7 @@ import (
// L2Tx is a struct that represents an already forged L2 tx // L2Tx is a struct that represents an already forged L2 tx
type L2Tx struct { type L2Tx struct {
// Stored in DB: mandatory fields // Stored in DB: mandatory fileds
TxID TxID `meddler:"id"` TxID TxID `meddler:"id"`
BatchNum BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. BatchNum BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged.
Position int `meddler:"position"` Position int `meddler:"position"`
@@ -21,10 +21,9 @@ type L2Tx struct {
Amount *big.Int `meddler:"amount,bigint"` Amount *big.Int `meddler:"amount,bigint"`
Fee FeeSelector `meddler:"fee"` Fee FeeSelector `meddler:"fee"`
// Nonce is filled by the TxProcessor // Nonce is filled by the TxProcessor
Nonce Nonce `meddler:"nonce"` Nonce Nonce `meddler:"nonce"`
Type TxType `meddler:"type"` Type TxType `meddler:"type"`
// EthBlockNum in which this L2Tx was added to the queue EthBlockNum int64 `meddler:"eth_block_num"` // EthereumBlockNumber in which this L2Tx was added to the queue
EthBlockNum int64 `meddler:"eth_block_num"`
} }
// NewL2Tx returns the given L2Tx with the TxId & Type parameters calculated // NewL2Tx returns the given L2Tx with the TxId & Type parameters calculated
@@ -90,15 +89,11 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
// TokenID // TokenID
b = append(b, tx.TokenID.Bytes()[:]...) b = append(b, tx.TokenID.Bytes()[:]...)
// Amount // Amount
amountFloat40, err := NewFloat40(tx.Amount) amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount)) return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
} }
amountFloat40Bytes, err := amountFloat40.Bytes() b = append(b, amountFloat16.Bytes()...)
if err != nil {
return txID, tracerr.Wrap(err)
}
b = append(b, amountFloat40Bytes...)
// Nonce // Nonce
nonceBytes, err := tx.Nonce.Bytes() nonceBytes, err := tx.Nonce.Bytes()
if err != nil { if err != nil {
@@ -175,11 +170,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
} }
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability // BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat40 | Fee ] // [ fromIdx | toIdx | amountFloat16 | Fee ]
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
@@ -193,16 +188,13 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
} }
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
amountFloat40, err := NewFloat40(tx.Amount) amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil { copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
return nil, tracerr.Wrap(err) b[idxLen*2+2] = byte(tx.Fee)
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
return b[:], nil return b[:], nil
} }
@@ -227,10 +219,7 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt() tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
if err != nil { tx.Fee = FeeSelector(b[idxLen*2+2])
return nil, tracerr.Wrap(err)
}
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
return tx, nil return tx, nil
} }

View File

@@ -19,8 +19,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err := NewL2Tx(l2Tx) l2Tx, err := NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -31,8 +30,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -44,8 +42,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -57,8 +54,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 1, FromIdx: 1,
@@ -70,8 +66,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 999, FromIdx: 999,
@@ -83,8 +78,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 4444, FromIdx: 4444,
@@ -96,86 +90,25 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
l2Tx.TxID.String())
} }
func TestL2TxByteParsers(t *testing.T) { func TestL2TxByteParsers(t *testing.T) {
// test vectors values generated from javascript implementation amount := new(big.Int)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10) amount.SetString("79000000", 10)
require.True(t, ok)
l2Tx := &L2Tx{ l2Tx := &L2Tx{
ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected := "ffffffffffffffffffff"
encodedData, err := l2Tx.BytesDataAvailability(16)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected = "ffffffffffffffffffffffffffff"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 0,
Amount: big.NewInt(0),
Fee: 0,
}
expected = "0000000000000000000000000000"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 1061,
Amount: big.NewInt(420000000000),
Fee: 127,
}
expected = "000004250000000010fa56ea007f"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 256, ToIdx: 256,
Amount: amount,
FromIdx: 257, FromIdx: 257,
Amount: big.NewInt(79000000),
Fee: 201, Fee: 201,
} }
expected = "00000101000001000004b571c0c9" // Data from the compatibility test
encodedData, err = l2Tx.BytesDataAvailability(32) expected := "00000101000001002b16c9"
encodedData, err := l2Tx.BytesDataAvailability(32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData)) assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32) decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData) assert.Equal(t, l2Tx, decodedData)
} }

View File

@@ -16,8 +16,7 @@ import (
// EmptyBJJComp contains the 32 byte array of a empty BabyJubJub PublicKey // EmptyBJJComp contains the 32 byte array of a empty BabyJubJub PublicKey
// Compressed. It is a valid point in the BabyJubJub curve, so does not give // Compressed. It is a valid point in the BabyJubJub curve, so does not give
// errors when being decompressed. // errors when being decompressed.
var EmptyBJJComp = babyjub.PublicKeyComp([32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, var EmptyBJJComp = babyjub.PublicKeyComp([32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
// PoolL2Tx is a struct that represents a L2Tx sent by an account to the // PoolL2Tx is a struct that represents a L2Tx sent by an account to the
// coordinator that is waiting to be forged // coordinator that is waiting to be forged
@@ -37,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"` ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"` ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"` TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"` Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
Fee FeeSelector `meddler:"fee"` Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"` State PoolL2TxState `meddler:"state"`
@@ -54,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"` RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"` RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"` RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"` RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"` AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -74,7 +73,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original Type doesn't match the correct one, return error // If original Type doesn't match the correct one, return error
if txTypeOld != "" && txTypeOld != tx.Type { if txTypeOld != "" && txTypeOld != tx.Type {
return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s", return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s",
txTypeOld, tx.Type)) tx.Type, txTypeOld))
} }
txIDOld := tx.TxID txIDOld := tx.TxID
@@ -84,7 +83,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original TxID doesn't match the correct one, return error // If original TxID doesn't match the correct one, return error
if txIDOld != (TxID{}) && txIDOld != tx.TxID { if txIDOld != (TxID{}) && txIDOld != tx.TxID {
return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s", return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s",
txIDOld.String(), tx.TxID.String())) tx.TxID.String(), txIDOld.String()))
} }
return tx, nil return tx, nil
@@ -101,8 +100,6 @@ func (tx *PoolL2Tx) SetType() error {
tx.Type = TxTypeTransferToBJJ tx.Type = TxTypeTransferToBJJ
} else if tx.ToEthAddr != FFAddr && tx.ToEthAddr != EmptyAddr { } else if tx.ToEthAddr != FFAddr && tx.ToEthAddr != EmptyAddr {
tx.Type = TxTypeTransferToEthAddr tx.Type = TxTypeTransferToEthAddr
} else {
return tracerr.Wrap(errors.New("malformed transaction"))
} }
} else { } else {
return tracerr.Wrap(errors.New("malformed transaction")) return tracerr.Wrap(errors.New("malformed transaction"))
@@ -125,13 +122,18 @@ func (tx *PoolL2Tx) SetID() error {
// [ 8 bits ] userFee // 1 byte // [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes // [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes // [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation // Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
toBJJSign := byte(0) toBJJSign := byte(0)
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ) pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -147,18 +149,19 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[11:17], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[17:23], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID) binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[25:29], SignatureConstantBytes[:]) copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -167,9 +170,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
// TxCompressedDataEmpty calculates the TxCompressedData of an empty // TxCompressedDataEmpty calculates the TxCompressedData of an empty
// transaction // transaction
func TxCompressedDataEmpty(chainID uint16) *big.Int { func TxCompressedDataEmpty(chainID uint16) *big.Int {
var b [29]byte var b [31]byte
binary.BigEndian.PutUint16(b[23:25], chainID) binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[25:29], SignatureConstantBytes[:]) copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi return bi
} }
@@ -179,24 +182,19 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 8 bits ] userFee // 1 byte // [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes // [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 40 bits ] amountFloat40 // 5 bytes // [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil { if tx.Amount == nil {
tx.Amount = big.NewInt(0) tx.Amount = big.NewInt(0)
} }
amountFloat40, err := NewFloat40(tx.Amount) amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes() var b [25]byte
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
toBJJSign := byte(0) toBJJSign := byte(0)
if tx.ToBJJ != EmptyBJJComp { if tx.ToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.ToBJJ) sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -212,17 +210,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:16], amountFloat40Bytes) copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[16:22], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[22:28], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -238,24 +236,19 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 8 bits ] rqUserFee // 1 byte // [ 8 bits ] rqUserFee // 1 byte
// [ 40 bits ] rqNonce // 5 bytes // [ 40 bits ] rqNonce // 5 bytes
// [ 32 bits ] rqTokenID // 4 bytes // [ 32 bits ] rqTokenID // 4 bytes
// [ 40 bits ] rqAmountFloat40 // 5 bytes // [ 16 bits ] rqAmountFloat16 // 2 bytes
// [ 48 bits ] rqToIdx // 6 bytes // [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes // [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil { if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0) tx.RqAmount = big.NewInt(0)
} }
amountFloat40, err := NewFloat40(tx.RqAmount) amountFloat16, err := NewFloat16(tx.RqAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes() var b [25]byte
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
rqToBJJSign := byte(0) rqToBJJSign := byte(0)
if tx.RqToBJJ != EmptyBJJComp { if tx.RqToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ) sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
@@ -271,17 +264,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.RqTokenID.Bytes()) copy(b[7:11], tx.RqTokenID.Bytes())
copy(b[11:16], amountFloat40Bytes) copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.RqToIdx.Bytes() toIdxBytes, err := tx.RqToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[16:22], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.RqFromIdx.Bytes() fromIdxBytes, err := tx.RqFromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[22:28], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -294,20 +287,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
var e1B [25]byte
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(e1B[0:5], amountFloat40Bytes)
copy(e1B[5:25], tx.ToEthAddr[:])
e1 := new(big.Int).SetBytes(e1B[:])
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr) rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ) _, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
@@ -319,8 +299,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ) _, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
rqToEthAddr, rqToBJJY})
} }
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp // VerifySignature returns true if the signature verification is correct for the given PublicKeyComp

View File

@@ -21,113 +21,85 @@ func TestNewPoolL2Tx(t *testing.T) {
} }
poolL2Tx, err := NewPoolL2Tx(poolL2Tx) poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
poolL2Tx.TxID.String())
} }
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) { func TestTxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation chainID := uint16(0)
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign var sk babyjub.PrivateKey
_, err := hex.Decode(skPositive[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
_, err = hex.Decode(skNegative[:],
[]byte("0001020304050607080900010203040506070809000102030405060708090002"))
assert.NoError(t, err)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: (1 << 48) - 1,
ToIdx: (1 << 48) - 1,
Amount: amount,
TokenID: (1 << 32) - 1,
Nonce: (1 << 40) - 1,
Fee: (1 << 3) - 1,
ToBJJ: skPositive.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
require.NoError(t, err)
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err := tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 0,
ToIdx: 0,
Amount: big.NewInt(0),
TokenID: 0,
Nonce: 0,
Fee: 0,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
assert.Equal(t, "0", txCompressedDataV2.String())
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = PoolL2Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
Nonce: 76,
Fee: 214,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
require.NoError(t, err)
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
Nonce: 4,
Fee: 5,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
FromIdx: 2, FromIdx: 2,
ToIdx: 3, ToIdx: 3,
TokenID: 4, Amount: big.NewInt(4),
Nonce: 5, TokenID: 5,
Fee: 6, Nonce: 6,
ToBJJ: skPositive.Public().Compress(), ToBJJ: sk.Public().Compress(),
} }
txCompressedData, err = tx.TxCompressedData(uint16(0)) txCompressedData, err := tx.TxCompressedData(chainID)
require.NoError(t, err) assert.NoError(t, err)
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f" // test vector value generated from javascript implementation
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes())) expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
// using a different chainID
txCompressedData, err = tx.TxCompressedData(uint16(100))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err = tx.TxCompressedData(uint16(65535))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
RqFromIdx: 7,
RqToIdx: 8,
RqAmount: big.NewInt(9),
RqTokenID: 10,
RqNonce: 11,
RqFee: 12,
RqToBJJ: sk.Public().Compress(),
}
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, rqTxCompressedData.String())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
}
func TestTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 7,
ToIdx: 8,
Amount: big.NewInt(9),
TokenID: 10,
Nonce: 11,
Fee: 12,
ToBJJ: sk.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestRqTxCompressedDataV2(t *testing.T) { func TestRqTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
tx := PoolL2Tx{ tx := PoolL2Tx{
RqFromIdx: 7, RqFromIdx: 7,
@@ -141,17 +113,19 @@ func TestRqTxCompressedDataV2(t *testing.T) {
txCompressedData, err := tx.RqTxCompressedDataV2() txCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err) assert.NoError(t, err)
// test vector value generated from javascript implementation // test vector value generated from javascript implementation
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663" expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String()) assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10) expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes()) assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestHashToSign(t *testing.T) { func TestHashToSign(t *testing.T) {
chainID := uint16(0) chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: 2, FromIdx: 2,
ToIdx: 3, ToIdx: 3,
@@ -162,15 +136,13 @@ func TestHashToSign(t *testing.T) {
} }
toSign, err := tx.HashToSign(chainID) toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0b8abaf6b7933464e4450df2514da8b72606c02bf7f89bf6e54816fbda9d9d57", assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
hex.EncodeToString(toSign.Bytes()))
} }
func TestVerifyTxSignature(t *testing.T) { func TestVerifyTxSignature(t *testing.T) {
chainID := uint16(0) chainID := uint16(0)
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: 2, FromIdx: 2,
@@ -184,49 +156,18 @@ func TestVerifyTxSignature(t *testing.T) {
} }
toSign, err := tx.HashToSign(chainID) toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
"3144939470626721092564692894890580265754250231349521601298746071096761507003",
toSign.String())
sig := sk.SignPoseidon(toSign) sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress() tx.Signature = sig.Compress()
assert.True(t, tx.VerifySignature(chainID, sk.Public().Compress())) assert.True(t, tx.VerifySignature(chainID, sk.Public().Compress()))
} }
func TestVerifyTxSignatureEthAddrWith0(t *testing.T) {
chainID := uint16(5)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:],
[]byte("02f0b4f87065af3797aaaf934e8b5c31563c17f2272fa71bd0146535bfbb4184"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 10659,
ToIdx: 0,
ToEthAddr: ethCommon.HexToAddress("0x0004308BD15Ead4F1173624dC289DBdcC806a309"),
Amount: big.NewInt(5000),
TokenID: 0,
Nonce: 946,
Fee: 231,
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
sig := sk.SignPoseidon(toSign)
assert.Equal(t,
"f208b8298d5f37148ac3c0c03703272ea47b9f836851bcf8dd5f7e4e3b336ca1d2f6e92ad85dc25f174daf7a0abfd5f71dead3f059b783f4c4b2f56a18a47000",
sig.Compress().String(),
)
tx.Signature = sig.Compress()
assert.True(t, tx.VerifySignature(chainID, sk.Public().Compress()))
}
func TestDecompressEmptyBJJComp(t *testing.T) { func TestDecompressEmptyBJJComp(t *testing.T) {
pkComp := EmptyBJJComp pkComp := EmptyBJJComp
pk, err := pkComp.Decompress() pk, err := pkComp.Decompress()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, assert.Equal(t, "2957874849018779266517920829765869116077630550401372566248359756137677864698", pk.X.String())
"2957874849018779266517920829765869116077630550401372566248359756137677864698",
pk.X.String())
assert.Equal(t, "0", pk.Y.String()) assert.Equal(t, "0", pk.Y.String())
} }

View File

@@ -15,9 +15,8 @@ const tokenIDBytesLen = 4
// Token is a struct that represents an Ethereum token that is supported in Hermez network // Token is a struct that represents an Ethereum token that is supported in Hermez network
type Token struct { type Token struct {
TokenID TokenID `json:"id" meddler:"token_id"` TokenID TokenID `json:"id" meddler:"token_id"`
// EthBlockNum indicates the Ethereum block number in which this token was registered EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"` // Ethereum block number in which this token was registered
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
EthAddr ethCommon.Address `json:"ethereumAddress" meddler:"eth_addr"` EthAddr ethCommon.Address `json:"ethereumAddress" meddler:"eth_addr"`
Name string `json:"name" meddler:"name"` Name string `json:"name" meddler:"name"`
Symbol string `json:"symbol" meddler:"symbol"` Symbol string `json:"symbol" meddler:"symbol"`
@@ -49,8 +48,7 @@ func (t TokenID) BigInt() *big.Int {
// TokenIDFromBytes returns TokenID from a byte array // TokenIDFromBytes returns TokenID from a byte array
func TokenIDFromBytes(b []byte) (TokenID, error) { func TokenIDFromBytes(b []byte) (TokenID, error) {
if len(b) != tokenIDBytesLen { if len(b) != tokenIDBytesLen {
return 0, tracerr.Wrap(fmt.Errorf("can not parse TokenID, bytes len %d, expected 4", return 0, tracerr.Wrap(fmt.Errorf("can not parse TokenID, bytes len %d, expected 4", len(b)))
len(b)))
} }
tid := binary.BigEndian.Uint32(b[:4]) tid := binary.BigEndian.Uint32(b[:4])
return TokenID(tid), nil return TokenID(tid), nil

View File

@@ -15,12 +15,12 @@ import (
) )
const ( const (
// TxIDPrefixL1UserTx is the prefix that determines that the TxID is for // TXIDPrefixL1UserTx is the prefix that determines that the TxID is
// a L1UserTx // for a L1UserTx
//nolinter:gomnd //nolinter:gomnd
TxIDPrefixL1UserTx = byte(0) TxIDPrefixL1UserTx = byte(0)
// TxIDPrefixL1CoordTx is the prefix that determines that the TxID is // TXIDPrefixL1CoordTx is the prefix that determines that the TxID is
// for a L1CoordinatorTx // for a L1CoordinatorTx
//nolinter:gomnd //nolinter:gomnd
TxIDPrefixL1CoordTx = byte(1) TxIDPrefixL1CoordTx = byte(1)
@@ -51,8 +51,7 @@ func (txid *TxID) Scan(src interface{}) error {
return tracerr.Wrap(fmt.Errorf("can't scan %T into TxID", src)) return tracerr.Wrap(fmt.Errorf("can't scan %T into TxID", src))
} }
if len(srcB) != TxIDLen { if len(srcB) != TxIDLen {
return tracerr.Wrap(fmt.Errorf("can't scan []byte of len %d into TxID, need %d", return tracerr.Wrap(fmt.Errorf("can't scan []byte of len %d into TxID, need %d", len(srcB), TxIDLen))
len(srcB), TxIDLen))
} }
copy(txid[:], srcB) copy(txid[:], srcB)
return nil return nil
@@ -88,7 +87,7 @@ func (txid TxID) MarshalText() ([]byte, error) {
return []byte(txid.String()), nil return []byte(txid.String()), nil
} }
// UnmarshalText unmarshalls a TxID // UnmarshalText unmarshals a TxID
func (txid *TxID) UnmarshalText(data []byte) error { func (txid *TxID) UnmarshalText(data []byte) error {
idStr := string(data) idStr := string(data)
id, err := NewTxIDFromString(idStr) id, err := NewTxIDFromString(idStr)
@@ -103,15 +102,13 @@ func (txid *TxID) UnmarshalText(data []byte) error {
type TxType string type TxType string
const ( const (
// TxTypeExit represents L2->L1 token transfer. A leaf for this account appears in the exit // TxTypeExit represents L2->L1 token transfer. A leaf for this account appears in the exit tree of the block
// tree of the block
TxTypeExit TxType = "Exit" TxTypeExit TxType = "Exit"
// TxTypeTransfer represents L2->L2 token transfer // TxTypeTransfer represents L2->L2 token transfer
TxTypeTransfer TxType = "Transfer" TxTypeTransfer TxType = "Transfer"
// TxTypeDeposit represents L1->L2 transfer // TxTypeDeposit represents L1->L2 transfer
TxTypeDeposit TxType = "Deposit" TxTypeDeposit TxType = "Deposit"
// TxTypeCreateAccountDeposit represents creation of a new leaf in the state tree // TxTypeCreateAccountDeposit represents creation of a new leaf in the state tree (newAcconut) + L1->L2 transfer
// (newAcconut) + L1->L2 transfer
TxTypeCreateAccountDeposit TxType = "CreateAccountDeposit" TxTypeCreateAccountDeposit TxType = "CreateAccountDeposit"
// TxTypeCreateAccountDepositTransfer represents L1->L2 transfer + L2->L2 transfer // TxTypeCreateAccountDepositTransfer represents L1->L2 transfer + L2->L2 transfer
TxTypeCreateAccountDepositTransfer TxType = "CreateAccountDepositTransfer" TxTypeCreateAccountDepositTransfer TxType = "CreateAccountDepositTransfer"
@@ -127,31 +124,24 @@ const (
TxTypeTransferToBJJ TxType = "TransferToBJJ" TxTypeTransferToBJJ TxType = "TransferToBJJ"
) )
// Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx & // Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx & PoolL2Tx
// PoolL2Tx
type Tx struct { type Tx struct {
// Generic // Generic
IsL1 bool `meddler:"is_l1"` IsL1 bool `meddler:"is_l1"`
TxID TxID `meddler:"id"` TxID TxID `meddler:"id"`
Type TxType `meddler:"type"` Type TxType `meddler:"type"`
Position int `meddler:"position"` Position int `meddler:"position"`
FromIdx Idx `meddler:"from_idx"` FromIdx Idx `meddler:"from_idx"`
ToIdx Idx `meddler:"to_idx"` ToIdx Idx `meddler:"to_idx"`
Amount *big.Int `meddler:"amount,bigint"` Amount *big.Int `meddler:"amount,bigint"`
AmountFloat float64 `meddler:"amount_f"` AmountFloat float64 `meddler:"amount_f"`
TokenID TokenID `meddler:"token_id"` TokenID TokenID `meddler:"token_id"`
USD *float64 `meddler:"amount_usd"` USD *float64 `meddler:"amount_usd"`
// BatchNum in which this tx was forged. If the tx is L2, this must be != 0 BatchNum *BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0
BatchNum *BatchNum `meddler:"batch_num"` EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
// Ethereum Block Number in which this L1Tx was added to the queue
EthBlockNum int64 `meddler:"eth_block_num"`
// L1 // L1
// ToForgeL1TxsNum in which the tx was forged / will be forged ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` UserOrigin *bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes
// UserOrigin is set to true if the tx was originated by a user, false if it was aoriginated
// by a coordinator. Note that this differ from the spec for implementation simplification
// purpposes
UserOrigin *bool `meddler:"user_origin"`
FromEthAddr ethCommon.Address `meddler:"from_eth_addr"` FromEthAddr ethCommon.Address `meddler:"from_eth_addr"`
FromBJJ babyjub.PublicKeyComp `meddler:"from_bjj"` FromBJJ babyjub.PublicKeyComp `meddler:"from_bjj"`
DepositAmount *big.Int `meddler:"deposit_amount,bigintnull"` DepositAmount *big.Int `meddler:"deposit_amount,bigintnull"`

View File

@@ -21,10 +21,8 @@ func TestSignatureConstant(t *testing.T) {
func TestTxIDScannerValue(t *testing.T) { func TestTxIDScannerValue(t *testing.T) {
txid0 := &TxID{} txid0 := &TxID{}
txid1 := &TxID{} txid1 := &TxID{}
txid0B := [TxIDLen]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, txid0B := [TxIDLen]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2}
3, 4, 5, 6, 7, 8, 9, 0, 1, 2} txid1B := [TxIDLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
txid1B := [TxIDLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
copy(txid0[:], txid0B[:]) copy(txid0[:], txid0B[:])
copy(txid1[:], txid1B[:]) copy(txid1[:], txid1B[:])

View File

@@ -62,17 +62,3 @@ func RmEndingZeroes(siblings []*merkletree.Hash) []*merkletree.Hash {
} }
return siblings[:pos] return siblings[:pos]
} }
// TokensToUSD is a helper function to calculate the USD value of a certain
// amount of tokens considering the normalized token price (which is the price
// commonly reported by exhanges)
func TokensToUSD(amount *big.Int, decimals uint64, valueUSD float64) float64 {
amountF := new(big.Float).SetInt(amount)
// Divide by 10^decimals to normalize the amount
baseF := new(big.Float).SetInt(new(big.Int).Exp(
big.NewInt(10), big.NewInt(int64(decimals)), nil)) //nolint:gomnd
amountF.Mul(amountF, big.NewFloat(valueUSD))
amountF.Quo(amountF, baseF)
amountUSD, _ := amountF.Float64()
return amountUSD
}

View File

@@ -21,23 +21,16 @@ func TestBJJFromStringWithChecksum(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// expected values computed with js implementation // expected values computed with js implementation
assert.Equal(t, assert.Equal(t, "2492816973395423007340226948038371729989170225696553239457870892535792679622", pk.X.String())
"2492816973395423007340226948038371729989170225696553239457870892535792679622", assert.Equal(t, "15238403086306505038849621710779816852318505119327426213168494964113886299863", pk.Y.String())
pk.X.String())
assert.Equal(t,
"15238403086306505038849621710779816852318505119327426213168494964113886299863",
pk.Y.String())
} }
func TestRmEndingZeroes(t *testing.T) { func TestRmEndingZeroes(t *testing.T) {
s0, err := s0, err := merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000000")
merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err) require.NoError(t, err)
s1, err := s1, err := merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000001")
merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000001")
require.NoError(t, err) require.NoError(t, err)
s2, err := s2, err := merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000002")
merkletree.NewHashFromHex("0x0000000000000000000000000000000000000000000000000000000000000002")
require.NoError(t, err) require.NoError(t, err)
// expect cropped last zeroes // expect cropped last zeroes

View File

@@ -1,4 +1,4 @@
// Package common zk.go contains all the common data structures used at the // Package common contains all the common data structures used at the
// hermez-node, zk.go contains the zkSnark inputs used to generate the proof // hermez-node, zk.go contains the zkSnark inputs used to generate the proof
package common package common
@@ -67,7 +67,7 @@ type ZKInputs struct {
// accumulate fees // accumulate fees
// FeePlanTokens contains all the tokenIDs for which the fees are being // FeePlanTokens contains all the tokenIDs for which the fees are being
// accumulated and those fees accumulated will be paid to the FeeIdxs // accumulated and those fees accoumulated will be paid to the FeeIdxs
// array. The order of FeeIdxs & FeePlanTokens & State3 must match. // array. The order of FeeIdxs & FeePlanTokens & State3 must match.
// Coordinator fees are processed correlated such as: // Coordinator fees are processed correlated such as:
// [FeePlanTokens[i], FeeIdxs[i]] // [FeePlanTokens[i], FeeIdxs[i]]
@@ -102,8 +102,6 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx] ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr // ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx] ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
// OnChain determines if is L1 (1/true) or L2 (0/false) // OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx] OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -114,8 +112,8 @@ type ZKInputs struct {
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new // NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
// account (fromIdx==0) // account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx] NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float40 // DepositAmountF encoded as float16
DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx] DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
// FromEthAddr // FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx] FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int // FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -130,8 +128,8 @@ type ZKInputs struct {
RqOffset []*big.Int `json:"rqOffset"` // uint8 (max 3 bits), len: [maxTx] RqOffset []*big.Int `json:"rqOffset"` // uint8 (max 3 bits), len: [maxTx]
// transaction L2 request data // transaction L2 request data
// RqTxCompressedDataV2 big.Int (max 251 bits), len: [maxTx] // RqTxCompressedDataV2
RqTxCompressedDataV2 []*big.Int `json:"rqTxCompressedDataV2"` RqTxCompressedDataV2 []*big.Int `json:"rqTxCompressedDataV2"` // big.Int (max 251 bits), len: [maxTx]
// RqToEthAddr // RqToEthAddr
RqToEthAddr []*big.Int `json:"rqToEthAddr"` // ethCommon.Address, len: [maxTx] RqToEthAddr []*big.Int `json:"rqToEthAddr"` // ethCommon.Address, len: [maxTx]
// RqToBJJAy // RqToBJJAy
@@ -301,8 +299,7 @@ func (z ZKInputs) MarshalJSON() ([]byte, error) {
} }
// NewZKInputs returns a pointer to an initialized struct of ZKInputs // NewZKInputs returns a pointer to an initialized struct of ZKInputs
func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, currentNumBatch *big.Int) *ZKInputs {
currentNumBatch *big.Int) *ZKInputs {
zki := &ZKInputs{} zki := &ZKInputs{}
zki.Metadata.MaxFeeIdxs = maxFeeIdxs zki.Metadata.MaxFeeIdxs = maxFeeIdxs
zki.Metadata.MaxLevels = uint32(48) //nolint:gomnd zki.Metadata.MaxLevels = uint32(48) //nolint:gomnd
@@ -329,7 +326,6 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32,
zki.AuxToIdx = newSlice(maxTx) zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx) zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx) zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx) zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx) zki.NewAccount = newSlice(maxTx)
@@ -480,8 +476,8 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes()) copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
b = append(b, newExitRoot...) b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData // [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 528) //nolint:gomnd l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen) l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ { for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -498,23 +494,20 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
} }
b = append(b, l1TxsDataAvailability...) b = append(b, l1TxsDataAvailability...)
// [MAX_TX*(2*NLevels + 48) bits] L2TxsData // [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen) l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ { for i := 0; i < len(z.Metadata.L2TxsData); i++ {
l2TxsData = append(l2TxsData, z.Metadata.L2TxsData[i]...) l2TxsData = append(l2TxsData, z.Metadata.L2TxsData[i]...)
} }
if len(l2TxsData) > int(expectedL2TxsDataLen) { if len(l2TxsData) > int(expectedL2TxsDataLen) {
return nil, tracerr.Wrap(fmt.Errorf("len(l2TxsData): %d, expected: %d", return nil, tracerr.Wrap(fmt.Errorf("len(l2TxsData): %d, expected: %d", len(l2TxsData), expectedL2TxsDataLen))
len(l2TxsData), expectedL2TxsDataLen))
} }
b = append(b, l2TxsData...) b = append(b, l2TxsData...)
l2TxsPadding := make([]byte, l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L1TxsDataAvailability)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
(int(z.Metadata.MaxTx)-len(z.Metadata.L1TxsDataAvailability)-
len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
b = append(b, l2TxsPadding...) b = append(b, l2TxsPadding...)
// [NLevels * MAX_TOKENS_FEE bits] feeTxsData // [NLevels * MAX_TOKENS_FEE bits] feeTxsData

View File

@@ -3,7 +3,6 @@ package config
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big"
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
@@ -35,23 +34,10 @@ type ServerProof struct {
URL string `validate:"required"` URL string `validate:"required"`
} }
// ForgeBatchGasCost is the costs associated to a ForgeBatch transaction, split
// into different parts to be used in a formula.
type ForgeBatchGasCost struct {
Fixed uint64 `validate:"required"`
L1UserTx uint64 `validate:"required"`
L1CoordTx uint64 `validate:"required"`
L2Tx uint64 `validate:"required"`
}
// Coordinator is the coordinator specific configuration. // Coordinator is the coordinator specific configuration.
type Coordinator struct { type Coordinator struct {
// ForgerAddress is the address under which this coordinator is forging // ForgerAddress is the address under which this coordinator is forging
ForgerAddress ethCommon.Address `validate:"required"` ForgerAddress ethCommon.Address `validate:"required"`
// MinimumForgeAddressBalance is the minimum balance the forger address
// needs to start the coordinator in wei. Of set to 0, the coordinator
// will not check the balance before starting.
MinimumForgeAddressBalance *big.Int
// FeeAccount is the Hermez account that the coordinator uses to receive fees // FeeAccount is the Hermez account that the coordinator uses to receive fees
FeeAccount struct { FeeAccount struct {
// Address is the ethereum address of the account to receive fees // Address is the ethereum address of the account to receive fees
@@ -65,85 +51,33 @@ type Coordinator struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"` L1BatchTimeoutPerc float64 `validate:"required"`
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, even though at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, even though at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the // ProofServerPollInterval is the waiting interval between polling the
// ProofServer while waiting for a particular status // ProofServer while waiting for a particular status
ProofServerPollInterval Duration `validate:"required"` ProofServerPollInterval Duration `validate:"required"`
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval Duration `validate:"required"` ForgeRetryInterval Duration `validate:"required"`
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay Duration `validate:"-"`
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay Duration `validate:"-"`
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"` SyncRetryInterval Duration `validate:"required"`
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval Duration `validate:"required"`
// L2DB is the DB that holds the pool of L2Txs // L2DB is the DB that holds the pool of L2Txs
L2DB struct { L2DB struct {
// SafetyPeriod is the number of batches after which // SafetyPeriod is the number of batches after which
// non-pending L2Txs are deleted from the pool // non-pending L2Txs are deleted from the pool
SafetyPeriod common.BatchNum `validate:"required"` SafetyPeriod common.BatchNum `validate:"required"`
// MaxTxs is the maximum number of pending L2Txs that can be // MaxTxs is the number of L2Txs that once reached triggers
// stored in the pool. Once this number of pending L2Txs is // deletion of old L2Txs
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"` MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs // TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted. // L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"` TTL Duration `validate:"required"`
// PurgeBatchDelay is the delay between batches to purge // PurgeBatchDelay is the delay between batches to purge outdated transactions
// outdated transactions. Outdated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBatchDelay int64 `validate:"required"` PurgeBatchDelay int64 `validate:"required"`
// InvalidateBatchDelay is the delay between batches to mark // InvalidateBatchDelay is the delay between batches to mark invalid transactions
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBatchDelay int64 `validate:"required"` InvalidateBatchDelay int64 `validate:"required"`
// PurgeBlockDelay is the delay between blocks to purge // PurgeBlockDelay is the delay between blocks to purge outdated transactions
// outdated transactions. Outdated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBlockDelay int64 `validate:"required"` PurgeBlockDelay int64 `validate:"required"`
// InvalidateBlockDelay is the delay between blocks to mark // InvalidateBlockDelay is the delay between blocks to mark invalid transactions
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBlockDelay int64 `validate:"required"` InvalidateBlockDelay int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
TxSelector struct { TxSelector struct {
@@ -156,6 +90,7 @@ type Coordinator struct {
} `validate:"required"` } `validate:"required"`
ServerProofs []ServerProof `validate:"required"` ServerProofs []ServerProof `validate:"required"`
Circuit struct { Circuit struct {
// VerifierIdx uint8 `validate:"required"`
// MaxTx is the maximum number of txs supported by the circuit // MaxTx is the maximum number of txs supported by the circuit
MaxTx int64 `validate:"required"` MaxTx int64 `validate:"required"`
// NLevels is the maximum number of merkle tree levels // NLevels is the maximum number of merkle tree levels
@@ -163,13 +98,12 @@ type Coordinator struct {
NLevels int64 `validate:"required"` NLevels int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
EthClient struct { EthClient struct {
// MaxGasPrice is the maximum gas price allowed for ethereum // CallGasLimit is the default gas limit set for ethereum
// transactions // calls, except for methods where a particular gas limit is
MaxGasPrice *big.Int `validate:"required"` // harcoded because it's known to be a big value
// GasPriceIncPerc is the percentage increase of gas price set CallGasLimit uint64 `validate:"required"`
// in an ethereum transaction from the suggested gas price by // GasPriceDiv is the gas price division
// the ethereum node GasPriceDiv uint64 `validate:"required"`
GasPriceIncPerc int64
// CheckLoopInterval is the waiting interval between receipt // CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
CheckLoopInterval Duration `validate:"required"` CheckLoopInterval Duration `validate:"required"`
@@ -179,13 +113,6 @@ type Coordinator struct {
// AttemptsDelay is delay between attempts do do an eth client // AttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
AttemptsDelay Duration `validate:"required"` AttemptsDelay Duration `validate:"required"`
// TxResendTimeout is the timeout after which a non-mined
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept // Keystore is the ethereum keystore where private keys are kept
Keystore struct { Keystore struct {
// Path to the keystore // Path to the keystore
@@ -193,9 +120,6 @@ type Coordinator struct {
// Password used to decrypt the keys in the keystore // Password used to decrypt the keys in the keystore
Password string `validate:"required"` Password string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
} `validate:"required"` } `validate:"required"`
API struct { API struct {
// Coordinator enables the coordinator API endpoints // Coordinator enables the coordinator API endpoints
@@ -208,10 +132,6 @@ type Coordinator struct {
// LightScrypt if set, uses light parameters for the ethereum // LightScrypt if set, uses light parameters for the ethereum
// keystore encryption algorithm. // keystore encryption algorithm.
LightScrypt bool LightScrypt bool
// RollupVerifierIndex is the index of the verifier to use in
// the Rollup smart contract. The verifier chosen by index
// must match with the Circuit parameters.
RollupVerifierIndex *int
} }
} }
@@ -231,30 +151,17 @@ type Node struct {
// Keep is the number of checkpoints to keep // Keep is the number of checkpoints to keep
Keep int `validate:"required"` Keep int `validate:"required"`
} `validate:"required"` } `validate:"required"`
// It's possible to use diferentiated SQL connections for read/write.
// If the read configuration is not provided, the write one it's going to be used
// for both reads and writes
PostgreSQL struct { PostgreSQL struct {
// Port of the PostgreSQL write server // Port of the PostgreSQL server
PortWrite int `validate:"required"` Port int `validate:"required"`
// Host of the PostgreSQL write server // Host of the PostgreSQL server
HostWrite string `validate:"required"` Host string `validate:"required"`
// User of the PostgreSQL write server // User of the PostgreSQL server
UserWrite string `validate:"required"` User string `validate:"required"`
// Password of the PostgreSQL write server // Password of the PostgreSQL server
PasswordWrite string `validate:"required"` Password string `validate:"required"`
// Name of the PostgreSQL write server database // Name of the PostgreSQL server database
NameWrite string `validate:"required"` Name string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
} `validate:"required"` } `validate:"required"`
Web3 struct { Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server // URL is the URL of the web3 ethereum-node RPC server
@@ -310,9 +217,6 @@ type Node struct {
// MeddlerLogs enables meddler debug mode, where unused columns and struct // MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged // fields will be logged
MeddlerLogs bool MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
} }
Coordinator Coordinator `validate:"-"` Coordinator Coordinator `validate:"-"`
} }

View File

@@ -8,7 +8,6 @@ import (
"path" "path"
"time" "time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
@@ -48,8 +47,6 @@ type Debug struct {
MineBlockNum int64 MineBlockNum int64
// SendBlockNum is the blockNum when the batch was sent to ethereum // SendBlockNum is the blockNum when the batch was sent to ethereum
SendBlockNum int64 SendBlockNum int64
// ResendNum is the number of times the tx has been resent
ResendNum int
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch // LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
// was scheduled // was scheduled
LastScheduledL1BatchBlockNum int64 LastScheduledL1BatchBlockNum int64
@@ -67,17 +64,10 @@ type Debug struct {
// StartToSendDelay is the delay between starting a batch and sending // StartToSendDelay is the delay between starting a batch and sending
// it to ethereum, in seconds // it to ethereum, in seconds
StartToSendDelay float64 StartToSendDelay float64
// StartToMineDelay is the delay between starting a batch and having
// it mined in seconds
StartToMineDelay float64
// SendToMineDelay is the delay between sending a batch tx and having
// it mined in seconds
SendToMineDelay float64
} }
// BatchInfo contans the Batch information // BatchInfo contans the Batch information
type BatchInfo struct { type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
@@ -91,17 +81,10 @@ type BatchInfo struct {
L2Txs []common.L2Tx L2Txs []common.L2Tx
CoordIdxs []common.Idx CoordIdxs []common.Idx
ForgeBatchArgs *eth.RollupForgeBatchArgs ForgeBatchArgs *eth.RollupForgeBatchArgs
Auth *bind.TransactOpts `json:"-"` // FeesInfo
EthTx *types.Transaction EthTx *types.Transaction
EthTxErr error Receipt *types.Receipt
// SendTimestamp the time of batch sent to ethereum Debug Debug
SendTimestamp time.Time
Receipt *types.Receipt
// Fail is true if:
// - The receipt status is failed
// - A previous parent batch is failed
Fail bool
Debug Debug
} }
// DebugStore is a debug function to store the BatchInfo as a json text file in // DebugStore is a debug function to store the BatchInfo as a json text file in

View File

@@ -3,15 +3,14 @@ package coordinator
import ( import (
"context" "context"
"fmt" "fmt"
"math/big"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
@@ -24,10 +23,7 @@ import (
) )
var ( var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet") errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf(
"no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
) )
const ( const (
@@ -46,81 +42,26 @@ type Config struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 L1BatchTimeoutPerc float64
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, even though at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, even though at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
SendBatchBlocksMarginCheck int64
// EthClientAttempts is the number of attempts to do an eth client RPC // EthClientAttempts is the number of attempts to do an eth client RPC
// call before giving up // call before giving up
EthClientAttempts int EthClientAttempts int
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval time.Duration ForgeRetryInterval time.Duration
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay time.Duration
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay time.Duration
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval time.Duration SyncRetryInterval time.Duration
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval time.Duration
// EthClientAttemptsDelay is delay between attempts do do an eth client // EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
EthClientAttemptsDelay time.Duration EthClientAttemptsDelay time.Duration
// EthTxResendTimeout is the timeout after which a non-mined ethereum
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int
// GasPriceIncPerc is the percentage increase of gas price set in an
// ethereum transaction from the suggested gas price by the ehtereum
// node
GasPriceIncPerc int64
// TxManagerCheckInterval is the waiting interval between receipt // TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration TxManagerCheckInterval time.Duration
// DebugBatchPath if set, specifies the path where batchInfo is stored // DebugBatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
DebugBatchPath string DebugBatchPath string
Purger PurgerCfg Purger PurgerCfg
// VerifierIdx is the index of the verifier contract registered in the VerifierIdx uint8
// smart contract
VerifierIdx uint8
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost config.ForgeBatchGasCost
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -133,22 +74,15 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
} }
} }
type fromBatch struct {
BatchNum common.BatchNum
ForgerAddr ethCommon.Address
StateRoot *big.Int
}
// Coordinator implements the Coordinator type // Coordinator implements the Coordinator type
type Coordinator struct { type Coordinator struct {
// State // State
pipelineNum int // Pipeline sequential number. The first pipeline is 1 pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
pipelineFromBatch fromBatch // batch from which we started the pipeline provers []prover.Client
provers []prover.Client consts synchronizer.SCConsts
consts synchronizer.SCConsts vars synchronizer.SCVariables
vars synchronizer.SCVariables stats synchronizer.Stats
stats synchronizer.Stats started bool
started bool
cfg Config cfg Config
@@ -162,17 +96,7 @@ type Coordinator struct {
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
// mutexL2DBUpdateDelete protects updates to the L2DB so that pipeline *Pipeline
// these two processes always happen exclusively:
// - Pipeline taking pending txs, running through the TxProcessor and
// marking selected txs as forging
// - Coordinator deleting pending txs that have been marked with
// `external_delete`.
// Without this mutex, the coordinator could delete a pending txs that
// has just been selected by the TxProcessor in the pipeline.
mutexL2DBUpdateDelete sync.Mutex
pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
purger *Purger purger *Purger
txManager *TxManager txManager *TxManager
@@ -215,15 +139,10 @@ func NewCoordinator(cfg Config,
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{ c := Coordinator{
pipelineNum: 0, pipelineBatchNum: -1,
pipelineFromBatch: fromBatch{ provers: serverProofs,
BatchNum: 0, consts: *scConsts,
ForgerAddr: ethCommon.Address{}, vars: *initSCVars,
StateRoot: big.NewInt(0),
},
provers: serverProofs,
consts: *scConsts,
vars: *initSCVars,
cfg: cfg, cfg: cfg,
@@ -264,10 +183,8 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
} }
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) { func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
c.pipelineNum++ return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector, c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager,
c.provers, &c.consts)
} }
// MsgSyncBlock indicates an update to the Synchronizer stats // MsgSyncBlock indicates an update to the Synchronizer stats
@@ -288,9 +205,6 @@ type MsgSyncReorg struct {
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct { type MsgStopPipeline struct {
Reason string Reason string
// FailedBatchNum indicates the first batchNum that failed in the
// pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum
} }
// SendMsg is a thread safe method to pass a message to the Coordinator // SendMsg is a thread safe method to pass a message to the Coordinator
@@ -301,36 +215,27 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
if update.Auction != nil {
vars.Auction = *update.Auction
}
if update.WDelayer != nil {
vars.WDelayer = *update.WDelayer
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) { func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&c.vars, vars) if vars.Rollup != nil {
c.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
c.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
c.vars.WDelayer = *vars.WDelayer
}
} }
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables, func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool { currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
return false
}
var slot *common.Slot var slot *common.Slot
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock { if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
slot = currentSlot slot = currentSlot
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock { } else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
slot = nextSlot slot = nextSlot
} else { } else {
log.Warnw("canForge: requested blockNum is outside current and next slot", log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
"blockNum", blockNum, "currentSlot", currentSlot, "blockNum", blockNum, "currentSlot", currentSlot,
"nextSlot", nextSlot, "nextSlot", nextSlot,
) )
@@ -339,23 +244,16 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
anyoneForge := false anyoneForge := false
if !slot.ForgerCommitment && if !slot.ForgerCommitment &&
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) { auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)", log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
"block", blockNum) "block", blockNum)
anyoneForge = true anyoneForge = true
} }
if slot.Forger == addr || anyoneForge { if slot.Forger == addr || anyoneForge {
return true return true
} }
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
return false return false
} }
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool { func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1 blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction, return canForge(&c.consts.Auction, &c.vars.Auction,
@@ -364,51 +262,21 @@ func (c *Coordinator) canForge() bool {
} }
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error { func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
nextBlock := c.stats.Eth.LastBlock.Num + 1 canForge := c.canForge()
canForge := c.canForgeAt(nextBlock)
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
}
if c.pipeline == nil { if c.pipeline == nil {
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock) if canForge {
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugf("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge {
log.Infow("Coordinator: forging state begin", "block", log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum) stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
fromBatch := fromBatch{ batchNum := common.BatchNum(stats.Sync.LastBatch)
BatchNum: stats.Sync.LastBatch.BatchNum,
ForgerAddr: stats.Sync.LastBatch.ForgerAddr,
StateRoot: stats.Sync.LastBatch.StateRoot,
}
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
fromBatch.BatchNum = c.lastNonFailedBatchNum
fromBatch.ForgerAddr = c.cfg.ForgerAddress
fromBatch.StateRoot = big.NewInt(0)
}
// Before starting the pipeline make sure we reset any
// l2tx from the pool that was forged in a batch that
// didn't end up being mined. We are already doing
// this in handleStopPipeline, but we do it again as a
// failsafe in case the last synced batchnum is
// different than in the previous call to l2DB.Reorg,
// or in case the node was restarted when there was a
// started batch that included l2txs but was not mined.
if err := c.l2DB.Reorg(fromBatch.BatchNum); err != nil {
return tracerr.Wrap(err)
}
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineFromBatch = fromBatch if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil {
// Start the pipeline
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineBatchNum = batchNum
} }
} else { } else {
if !canForge { if !canForge {
@@ -418,12 +286,25 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
} }
} }
if c.pipeline == nil { if c.pipeline == nil {
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(), // Mark invalid in Pool due to forged L2Txs
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil { // for _, batch := range batches {
// if err := c.l2DB.InvalidateOldNonces(
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
// return err
// }
// }
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err)
}
}
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, _, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
int64(stats.Sync.LastBatch.BatchNum)); err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -450,44 +331,33 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars) c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
} }
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress && if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
(c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil || // There's been a reorg and the batch from which the pipeline
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) { // was started was in a block that was discarded. The batch
// There's been a reorg and the batch state root from which the // may not be in the main chain, so we stop the pipeline as a
// pipeline was started has changed (probably because it was in // precaution (it will be started again once the node is in
// a block that was discarded), and it was sent by a different // sync).
// coordinator than us. That batch may never be in the main log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
// chain, so we stop the pipeline (it will be started again "sync.LastBatch", c.stats.Sync.LastBatch,
// once the node is in sync). "c.pipelineBatchNum", c.pipelineBatchNum)
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+ if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0, func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
// the next pipeline will start from the last state of the synchronizer, if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
// otherwise, it will state from failedBatchNum-1. return tracerr.Wrap(err)
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string,
failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1
} }
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.Stop(c.ctx) c.pipeline.Stop(c.ctx)
c.pipeline = nil c.pipeline = nil
} }
if err := c.l2DB.Reorg(batchNum); err != nil { if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
return tracerr.Wrap(err) // TODO: Check that we are in a slot in which we can't forge
} }
c.lastNonFailedBatchNum = batchNum
return nil return nil
} }
@@ -503,7 +373,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
} }
case MsgStopPipeline: case MsgStopPipeline:
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason) log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil { if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err)) return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
} }
default: default:
@@ -526,7 +396,7 @@ func (c *Coordinator) Start() {
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
timer := time.NewTimer(longWaitDuration) waitDuration := longWaitDuration
for { for {
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
@@ -538,45 +408,23 @@ func (c *Coordinator) Start() {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.handleMsg", "err", err) log.Errorw("Coordinator.handleMsg", "err", err)
if !timer.Stop() { waitDuration = c.cfg.SyncRetryInterval
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
case <-timer.C: waitDuration = longWaitDuration
timer.Reset(longWaitDuration) case <-time.After(waitDuration):
if !c.stats.Synced() { if !c.stats.Synced() {
waitDuration = longWaitDuration
continue continue
} }
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil { if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.syncStats", "err", err) log.Errorw("Coordinator.syncStats", "err", err)
if !timer.Stop() { waitDuration = c.cfg.SyncRetryInterval
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
} waitDuration = longWaitDuration
}
}()
c.wg.Add(1)
go func() {
for {
select {
case <-c.ctx.Done():
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
c.wg.Done()
return
case <-time.After(c.cfg.PurgeByExtDelInterval):
c.mutexL2DBUpdateDelete.Lock()
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
}
c.mutexL2DBUpdateDelete.Unlock()
} }
} }
}() }()

View File

@@ -97,16 +97,15 @@ func newTestModules(t *testing.T) modules {
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB") syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)
deleteme = append(deleteme, syncDBPath) deleteme = append(deleteme, syncDBPath)
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128, syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
Type: statedb.TypeSynchronizer, NLevels: 48})
assert.NoError(t, err) assert.NoError(t, err)
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil) historyDB := historydb.NewHistoryDB(db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB") txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
require.NoError(t, err) require.NoError(t, err)
@@ -126,8 +125,7 @@ func newTestModules(t *testing.T) modules {
batchBuilderDBPath, err = ioutil.TempDir("", "tmpBatchBuilderDB") batchBuilderDBPath, err = ioutil.TempDir("", "tmpBatchBuilderDB")
require.NoError(t, err) require.NoError(t, err)
deleteme = append(deleteme, batchBuilderDBPath) deleteme = append(deleteme, batchBuilderDBPath)
batchBuilder, err := batchbuilder.NewBatchBuilder(batchBuilderDBPath, syncStateDB, 0, batchBuilder, err := batchbuilder.NewBatchBuilder(batchBuilderDBPath, syncStateDB, 0, uint64(nLevels))
uint64(nLevels))
assert.NoError(t, err) assert.NoError(t, err)
return modules{ return modules{
@@ -206,7 +204,7 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
func newTestSynchronizer(t *testing.T, ethClient *test.Client, ethClientSetup *test.ClientSetup, func newTestSynchronizer(t *testing.T, ethClient *test.Client, ethClientSetup *test.ClientSetup,
modules modules) *synchronizer.Synchronizer { modules modules) *synchronizer.Synchronizer {
sync, err := synchronizer.NewSynchronizer(ethClient, modules.historyDB, modules.l2DB, modules.stateDB, sync, err := synchronizer.NewSynchronizer(ethClient, modules.historyDB, modules.stateDB,
synchronizer.Config{ synchronizer.Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
}) })
@@ -262,8 +260,8 @@ func TestCoordinatorFlow(t *testing.T) {
var stats synchronizer.Stats var stats synchronizer.Stats
stats.Eth.LastBlock = *ethClient.CtlLastBlock() stats.Eth.LastBlock = *ethClient.CtlLastBlock()
stats.Sync.LastBlock = stats.Eth.LastBlock stats.Sync.LastBlock = stats.Eth.LastBlock
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch() stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum) stats.Sync.LastBatch = stats.Eth.LastBatch
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1) canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
require.NoError(t, err) require.NoError(t, err)
var slot common.Slot var slot common.Slot
@@ -280,7 +278,7 @@ func TestCoordinatorFlow(t *testing.T) {
// Copy stateDB to synchronizer if there was a new batch // Copy stateDB to synchronizer if there was a new batch
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch) source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch) dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
if stats.Sync.LastBatch.BatchNum != 0 { if stats.Sync.LastBatch != 0 {
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
log.Infow("Making pebble checkpoint for sync", log.Infow("Making pebble checkpoint for sync",
"source", source, "dest", dest) "source", source, "dest", dest)
@@ -518,7 +516,7 @@ func TestCoordinatorStress(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
for { for {
blockData, _, err := syn.Sync(ctx, nil) blockData, _, err := syn.Sync2(ctx, nil)
if ctx.Err() != nil { if ctx.Err() != nil {
wg.Done() wg.Done()
return return

View File

@@ -2,7 +2,6 @@ package coordinator
import ( import (
"context" "context"
"database/sql"
"fmt" "fmt"
"math/big" "math/big"
"sync" "sync"
@@ -25,36 +24,25 @@ type statsVars struct {
Vars synchronizer.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
type state struct {
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
lastSlotForged int64
}
// Pipeline manages the forging of batches with parallel server proofs // Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct { type Pipeline struct {
num int
cfg Config cfg Config
consts synchronizer.SCConsts consts synchronizer.SCConsts
// state // state
state state batchNum common.BatchNum
started bool lastScheduledL1BatchBlockNum int64
rw sync.RWMutex lastForgeL1TxsNum int64
errAtBatchNum common.BatchNum started bool
lastForgeTime time.Time
proversPool *ProversPool proversPool *ProversPool
provers []prover.Client provers []prover.Client
coord *Coordinator txManager *TxManager
txManager *TxManager historyDB *historydb.HistoryDB
historyDB *historydb.HistoryDB l2DB *l2db.L2DB
l2DB *l2db.L2DB txSelector *txselector.TxSelector
txSelector *txselector.TxSelector batchBuilder *batchbuilder.BatchBuilder
batchBuilder *batchbuilder.BatchBuilder purger *Purger
mutexL2DBUpdateDelete *sync.Mutex
purger *Purger
stats synchronizer.Stats stats synchronizer.Stats
vars synchronizer.SCVariables vars synchronizer.SCVariables
@@ -65,29 +53,14 @@ type Pipeline struct {
cancel context.CancelFunc cancel context.CancelFunc
} }
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
p.rw.Lock()
defer p.rw.Unlock()
p.errAtBatchNum = batchNum
}
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
p.rw.RLock()
defer p.rw.RUnlock()
return p.errAtBatchNum
}
// NewPipeline creates a new Pipeline // NewPipeline creates a new Pipeline
func NewPipeline(ctx context.Context, func NewPipeline(ctx context.Context,
cfg Config, cfg Config,
num int, // Pipeline sequential number
historyDB *historydb.HistoryDB, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, l2DB *l2db.L2DB,
txSelector *txselector.TxSelector, txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
mutexL2DBUpdateDelete *sync.Mutex,
purger *Purger, purger *Purger,
coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *synchronizer.SCConsts, scConsts *synchronizer.SCConsts,
@@ -106,26 +79,22 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool")) return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
} }
return &Pipeline{ return &Pipeline{
num: num, cfg: cfg,
cfg: cfg, historyDB: historyDB,
historyDB: historyDB, l2DB: l2DB,
l2DB: l2DB, txSelector: txSelector,
txSelector: txSelector, batchBuilder: batchBuilder,
batchBuilder: batchBuilder, provers: provers,
provers: provers, proversPool: proversPool,
proversPool: proversPool, purger: purger,
mutexL2DBUpdateDelete: mutexL2DBUpdateDelete, txManager: txManager,
purger: purger, consts: *scConsts,
coord: coord, statsVarsCh: make(chan statsVars, queueLen),
txManager: txManager,
consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen),
}, nil }, nil
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
vars *synchronizer.SCVariablesPtr) {
select { select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
@@ -135,121 +104,68 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.state = state{ p.batchNum = batchNum
batchNum: batchNum, p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0,
lastSlotForged: -1,
}
p.stats = *stats p.stats = *stats
p.vars = *vars p.vars = *vars
p.lastScheduledL1BatchBlockNum = 0
// Reset the StateDB in TxSelector and BatchBuilder from the err := p.txSelector.Reset(p.batchNum)
// synchronizer only if the checkpoint we reset from either:
// a. Doesn't exist in the TxSelector/BatchBuilder
// b. The batch has already been synced by the synchronizer and has a
// different MTRoot than the BatchBuilder
// Otherwise, reset from the local checkpoint.
// First attempt to reset from local checkpoint if such checkpoint exists
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerTxSelector := !existsTxSelector err = p.batchBuilder.Reset(p.batchNum, true)
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
return tracerr.Wrap(err)
}
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerBatchBuilder := !existsBatchBuilder
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
return tracerr.Wrap(err)
}
// After reset, check that if the batch exists in the historyDB, the
// stateRoot matches with the local one, if not, force a reset from
// synchronizer
batch, err := p.historyDB.GetBatch(p.state.batchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
// nothing to do
} else if err != nil {
return tracerr.Wrap(err)
} else {
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
if batch.StateRoot.Cmp(localStateRoot) != 0 {
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
// StateRoot from synchronizer doesn't match StateRoot
// from batchBuilder, force a reset from synchronizer
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
}
}
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&p.vars, vars) if vars.Rollup != nil {
p.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
p.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
p.vars.WDelayer = *vars.WDelayer
}
} }
// handleForgeBatch waits for an available proof server, calls p.forgeBatch to // handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
// forge the batch and get the zkInputs, and then sends the zkInputs to the // and then waits for an available proof server and sends the zkInputs to it so
// selected proof server so that the proof computation begins. // that the proof computation begins.
func (p *Pipeline) handleForgeBatch(ctx context.Context, func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
batchNum common.BatchNum) (batchInfo *BatchInfo, err error) { batchInfo, err := p.forgeBatch(batchNum)
// 1. Wait for an available serverProof (blocking call)
serverProof, err := p.proversPool.Get(ctx)
if ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {
log.Errorw("proversPool.Get", "err", err)
return nil, tracerr.Wrap(err)
}
defer func() {
// If we encounter any error (notice that this function returns
// errors to notify that a batch is not forged not only because
// of unexpected errors but also due to benign causes), add the
// serverProof back to the pool
if err != nil {
p.proversPool.Add(ctx, serverProof)
}
}()
// 2. Forge the batch internally (make a selection of txs and prepare
// all the smart contract arguments)
p.mutexL2DBUpdateDelete.Lock()
batchInfo, err = p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil { if ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced { if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err, log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum, "lastForgeL1TxsNum", p.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum) "syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else { } else {
log.Errorw("forgeBatch", "err", err) log.Errorw("forgeBatch", "err", err)
} }
return nil, tracerr.Wrap(err) return nil, err
}
// 6. Wait for an available server proof (blocking call)
serverProof, err := p.proversPool.Get(ctx)
if ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {
log.Errorw("proversPool.Get", "err", err)
return nil, err
} }
// 3. Send the ZKInputs to the proof server
batchInfo.ServerProof = serverProof batchInfo.ServerProof = serverProof
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil { if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
log.Errorw("sendServerProof", "err", err) log.Errorw("sendServerProof", "err", err)
return nil, tracerr.Wrap(err) batchInfo.ServerProof = nil
p.proversPool.Add(ctx, serverProof)
return nil, err
} }
return batchInfo, nil return batchInfo, nil
} }
@@ -272,7 +188,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Add(1) p.wg.Add(1)
go func() { go func() {
timer := time.NewTimer(zeroDuration) waitDuration := zeroDuration
for { for {
select { select {
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -282,42 +198,18 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case statsVars := <-p.statsVarsCh: case statsVars := <-p.statsVarsCh:
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-timer.C: case <-time.After(waitDuration):
timer.Reset(p.cfg.ForgeRetryInterval) batchNum = p.batchNum + 1
// Once errAtBatchNum != 0, we stop forging if batchInfo, err := p.handleForgeBatch(p.ctx, batchNum); err != nil {
// batches because there's been an error and we waitDuration = p.cfg.SyncRetryInterval
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue continue
} else {
p.batchNum = batchNum
select {
case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done():
}
} }
batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil {
continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
continue
} else if err != nil {
p.setErrAtBatchNum(batchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum,
})
continue
}
p.lastForgeTime = time.Now()
p.state.batchNum = batchNum
select {
case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done():
}
if !timer.Stop() {
<-timer.C
}
timer.Reset(zeroDuration)
} }
} }
}() }()
@@ -331,27 +223,16 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Done() p.wg.Done()
return return
case batchInfo := <-batchChSentServerProof: case batchInfo := <-batchChSentServerProof:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
err := p.waitServerProof(p.ctx, batchInfo) err := p.waitServerProof(p.ctx, batchInfo)
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
batchInfo.ServerProof = nil
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("waitServerProof", "err", err) log.Errorw("waitServerProof", "err", err)
p.setErrAtBatchNum(batchInfo.BatchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.waitServerProof: %v", err),
FailedBatchNum: batchInfo.BatchNum,
})
continue continue
} }
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
p.txManager.AddBatch(p.ctx, batchInfo) p.txManager.AddBatch(p.ctx, batchInfo)
} }
} }
@@ -401,94 +282,56 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Structure to accumulate data and metadata of the batch
now := time.Now() batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum} batchInfo.Debug.StartTimestamp = time.Now()
batchInfo.Debug.StartTimestamp = now
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1 batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
selectionCfg := &txselector.SelectionConfig{
MaxL1UserTxs: common.RollupConstMaxL1UserTx,
TxProcessorConfig: p.cfg.TxProcessorConfig,
}
var poolL2Txs []common.PoolL2Tx var poolL2Txs []common.PoolL2Tx
var discardedL2Txs []common.PoolL2Tx var discardedL2Txs []common.PoolL2Tx
var l1UserTxsExtra, l1CoordTxs []common.L1Tx var l1UserTxsExtra, l1CoordTxs []common.L1Tx
var auths [][]byte var auths [][]byte
var coordIdxs []common.Idx var coordIdxs []common.Idx
// Check if the slot is not yet fulfilled
slotCommitted := false
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, tracerr.Wrap(errForgeBeforeDelay)
}
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) { if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true batchInfo.L1Batch = true
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum { defer func() {
// If there's no error, update the parameters related
// to the last L1Batch forged
if err == nil {
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.lastForgeL1TxsNum++
}
}()
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced) return nil, tracerr.Wrap(errLastL1BatchNotSynced)
} }
// 2a: L1+L2 txs // 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1) l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, l1UserTxs) p.txSelector.GetL1L2TxSelection(selectionCfg, l1UserTxs)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
} else { } else {
// 2b: only L2 txs // 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig) p.txSelector.GetL2TxSelection(selectionCfg)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
l1UserTxsExtra = nil l1UserTxsExtra = nil
} }
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the number of unforged L1UserTxs
// (either in a open queue or in a frozen
// not-yet-forged queue).
count, err := p.historyDB.GetUnforgedL1UserTxsCount()
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues to be able to
// forge the L1UserTxs in the future.
// Otherwise, skip.
if count == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, tracerr.Wrap(errForgeNoTxsBeforeDelay)
}
}
if batchInfo.L1Batch {
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
}
// 3. Save metadata from TxSelector output for BatchNum // 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxsExtra = l1UserTxsExtra batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs batchInfo.L1CoordTxs = l1CoordTxs
@@ -496,15 +339,14 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
batchInfo.CoordIdxs = coordIdxs batchInfo.CoordIdxs = coordIdxs
batchInfo.VerifierIdx = p.cfg.VerifierIdx batchInfo.VerifierIdx = p.cfg.VerifierIdx
if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), if err := p.l2DB.StartForging(common.TxIDsFromPoolL2Txs(poolL2Txs), batchInfo.BatchNum); err != nil {
batchInfo.BatchNum); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil { if err := p.l2DB.UpdateTxsInfo(discardedL2Txs); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Invalidate transactions that become invalid because of // Invalidate transactions that become invalid beause of
// the poolL2Txs selected. Will mark as invalid the txs that have a // the poolL2Txs selected. Will mark as invalid the txs that have a
// (fromIdx, nonce) which already appears in the selected txs (includes // (fromIdx, nonce) which already appears in the selected txs (includes
// all the nonces smaller than the current one) // all the nonces smaller than the current one)
@@ -534,15 +376,12 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
p.cfg.debugBatchStore(batchInfo) p.cfg.debugBatchStore(batchInfo)
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum) log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil return batchInfo, nil
} }
// waitServerProof gets the generated zkProof & sends it to the SmartContract // waitServerProof gets the generated zkProof & sends it to the SmartContract
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error { func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, until not resolved don't continue. Returns when the proof server has calculated the proof
// until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -558,12 +397,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool { func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
// Take the lastL1BatchBlockNum as the biggest between the last // Take the lastL1BatchBlockNum as the biggest between the last
// scheduled one, and the synchronized one. // scheduled one, and the synchronized one.
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum { if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
} }
// Set Debug information // Set Debug information
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
batchInfo.Debug.L1BatchBlockScheduleDeadline = batchInfo.Debug.L1BatchBlockScheduleDeadline =

View File

@@ -25,14 +25,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestPipelineShouldL1L2Batch(t *testing.T) { func TestPipelineShouldL1L2Batch(t *testing.T) {
ethClientSetup := test.NewClientSetupExample() ethClientSetup := test.NewClientSetupExample()
ethClientSetup.ChainID = big.NewInt(int64(chainID)) ethClientSetup.ChainID = big.NewInt(int64(chainID))
@@ -85,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
// //
// Scheduled L1Batch // Scheduled L1Batch
// //
pipeline.state.lastScheduledL1BatchBlockNum = startBlock pipeline.lastScheduledL1BatchBlockNum = startBlock
stats.Sync.LastL1BatchBlock = startBlock - 10 stats.Sync.LastL1BatchBlock = startBlock - 10
// We are are one block before the timeout range * 0.5 // We are are one block before the timeout range * 0.5
@@ -136,11 +128,6 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
blocks, err := tc.GenerateBlocksFromInstructions(set) blocks, err := tc.GenerateBlocksFromInstructions(set)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, blocks) require.NotNil(t, blocks)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("0")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("6860514559199319426609623120853503165917774887908204288119245630904770452486")
ethAddTokens(blocks, ethClient) ethAddTokens(blocks, ethClient)
err = ethClient.CtlAddBlocks(blocks) err = ethClient.CtlAddBlocks(blocks)
@@ -148,7 +135,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
ctx := context.Background() ctx := context.Background()
for { for {
syncBlock, discards, err := sync.Sync(ctx, nil) syncBlock, discards, err := sync.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -185,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
// users with positive balances // users with positive balances
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB) tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
syncStats := sync.Stats() syncStats := sync.Stats()
batchNum := syncStats.Sync.LastBatch.BatchNum batchNum := common.BatchNum(syncStats.Sync.LastBatch)
syncSCVars := sync.SCVars() syncSCVars := sync.SCVars()
pipeline, err := coord.newPipeline(ctx) pipeline, err := coord.newPipeline(ctx)

View File

@@ -13,23 +13,13 @@ import (
// PurgerCfg is the purger configuration // PurgerCfg is the purger configuration
type PurgerCfg struct { type PurgerCfg struct {
// PurgeBatchDelay is the delay between batches to purge outdated // PurgeBatchDelay is the delay between batches to purge outdated transactions
// transactions. Outdated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBatchDelay int64 PurgeBatchDelay int64
// InvalidateBatchDelay is the delay between batches to mark invalid // InvalidateBatchDelay is the delay between batches to mark invalid transactions
// transactions due to nonce lower than the account nonce.
InvalidateBatchDelay int64 InvalidateBatchDelay int64
// PurgeBlockDelay is the delay between blocks to purge outdated // PurgeBlockDelay is the delay between blocks to purge outdated transactions
// transactions. Outdated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBlockDelay int64 PurgeBlockDelay int64
// InvalidateBlockDelay is the delay between blocks to mark invalid // InvalidateBlockDelay is the delay between blocks to mark invalid transactions
// transactions due to nonce lower than the account nonce.
InvalidateBlockDelay int64 InvalidateBlockDelay int64
} }

View File

@@ -21,21 +21,19 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
} }
func newStateDB(t *testing.T) *statedb.LocalStateDB { func newStateDB(t *testing.T) *statedb.LocalStateDB {
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB") syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)
deleteme = append(deleteme, syncDBPath) deleteme = append(deleteme, syncDBPath)
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128, syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
Type: statedb.TypeSynchronizer, NLevels: 48})
assert.NoError(t, err) assert.NoError(t, err)
stateDBPath, err := ioutil.TempDir("", "tmpStateDB") stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
require.NoError(t, err) require.NoError(t, err)
deleteme = append(deleteme, stateDBPath) deleteme = append(deleteme, stateDBPath)
stateDB, err := statedb.NewLocalStateDB(statedb.Config{Path: stateDBPath, Keep: 128, stateDB, err := statedb.NewLocalStateDB(stateDBPath, 128, syncStateDB, statedb.TypeTxSelector, 0)
Type: statedb.TypeTxSelector, NLevels: 0}, syncStateDB)
require.NoError(t, err) require.NoError(t, err)
return stateDB return stateDB
} }

View File

@@ -4,13 +4,11 @@ import (
"context" "context"
"fmt" "fmt"
"math/big" "math/big"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
@@ -37,26 +35,17 @@ type TxManager struct {
vars synchronizer.SCVariables vars synchronizer.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum queue []*BatchInfo
minPipelineNum int
queue Queue
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed // lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
lastSuccessBatch common.BatchNum lastSuccessBatch common.BatchNum
// lastPendingBatch common.BatchNum lastPendingBatch common.BatchNum
// accNonce is the account nonce in the last mined block (due to mined txs) lastSuccessNonce uint64
accNonce uint64 lastPendingNonce uint64
// accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64
lastSentL1BatchBlockNum int64
} }
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB, func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) ( coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (*TxManager, error) {
*TxManager, error) {
chainID, err := ethClient.EthChainID() chainID, err := ethClient.EthChainID()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -65,19 +54,26 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil) lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, err
} }
log.Infow("TxManager started", "nonce", accNonce) lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
if err != nil {
return nil, err
}
if lastSuccessNonce != lastPendingNonce {
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
lastSuccessNonce, lastPendingNonce))
}
log.Infow("TxManager started", "nonce", lastSuccessNonce)
return &TxManager{ return &TxManager{
cfg: *cfg, cfg: *cfg,
ethClient: ethClient, ethClient: ethClient,
l2DB: l2DB, l2DB: l2DB,
coord: coord, coord: coord,
batchCh: make(chan *BatchInfo, queueLen), batchCh: make(chan *BatchInfo, queueLen),
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
discardPipelineCh: make(chan int, queueLen),
account: accounts.Account{ account: accounts.Account{
Address: *address, Address: *address,
}, },
@@ -86,10 +82,8 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
vars: *initSCVars, vars: *initSCVars,
minPipelineNum: 0, lastSuccessNonce: lastSuccessNonce,
queue: NewQueue(), lastPendingNonce: lastPendingNonce,
accNonce: accNonce,
accNextNonce: accNonce,
}, nil }, nil
} }
@@ -103,41 +97,35 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
vars *synchronizer.SCVariablesPtr) {
select { select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
} }
} }
// DiscardPipeline is a thread safe method to notify about a discarded pipeline func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
// due to a reorg if vars.Rollup != nil {
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) { t.vars.Rollup = *vars.Rollup
select { }
case t.discardPipelineCh <- pipelineNum: if vars.Auction != nil {
case <-ctx.Done(): t.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
t.vars.WDelayer = *vars.WDelayer
} }
} }
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&t.vars, vars)
}
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.TransactOpts, error) { func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx) gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if t.cfg.GasPriceIncPerc != 0 { inc := new(big.Int).Set(gasPrice)
inc := new(big.Int).Set(gasPrice) const gasPriceDiv = 100
inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc)) inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
// nolint reason: to calculate percentages we use 100 gasPrice.Add(gasPrice, inc)
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd
gasPrice.Add(gasPrice, inc)
}
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice) // log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID) auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
@@ -145,106 +133,42 @@ func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.Tr
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
auth.Value = big.NewInt(0) // in wei auth.Value = big.NewInt(0) // in wei
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
gasLimit := t.cfg.ForgeBatchGasCost.Fixed + auth.GasLimit = 1000000
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
auth.GasLimit = gasLimit
auth.GasPrice = gasPrice auth.GasPrice = gasPrice
auth.Nonce = nil auth.Nonce = nil
return auth, nil return auth, nil
} }
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error { func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
nextBlock := t.stats.Eth.LastBlock.Num + 1 // TODO: Check if we can forge in the next blockNum, abort if we can't
if !t.canForgeAt(nextBlock) { batchInfo.Debug.Status = StatusSent
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock)) batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
} batchInfo.Debug.SendTimestamp = time.Now()
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch { batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock)) batchInfo.Debug.StartTimestamp).Seconds()
}
margin := t.cfg.SendBatchBlocksMarginCheck
if margin != 0 {
if !t.canForgeAt(nextBlock + margin) {
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
margin, nextBlock))
}
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
margin, nextBlock))
}
}
return nil
}
func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percentages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
// If the increase is 0, force it to be 1 so that a gas increase
// doesn't result in the same value, making the transaction to be equal
// than before.
if r.Cmp(big.NewInt(0)) == 0 {
r = big.NewInt(1)
}
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo,
resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
var auth *bind.TransactOpts auth, err := t.NewAuth(ctx)
if resend { if err != nil {
auth = batchInfo.Auth return tracerr.Wrap(err)
auth.GasPrice = addPerc(auth.GasPrice, 10)
} else {
auth, err = t.NewAuth(ctx, batchInfo)
if err != nil {
return tracerr.Wrap(err)
}
batchInfo.Auth = auth
auth.Nonce = big.NewInt(int64(t.accNextNonce))
} }
auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
t.lastPendingNonce++
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ { for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
auth.GasPrice, t.cfg.MaxGasPrice))
}
// RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth) ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
// We check the errors via strings because we match the if err != nil {
// definition of the error from geth, with the string returned // if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
// via RPC obtained by the client. // log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
if err == nil { // "block", t.stats.Eth.LastBlock.Num+1)
break // return tracerr.Wrap(err)
} else if strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) { // }
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
attempt--
} else if strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt--
} else if strings.Contains(err.Error(), core.ErrReplaceUnderpriced.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if strings.Contains(err.Error(), core.ErrUnderpriced.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else {
log.Errorw("TxManager ethClient.RollupForgeBatch", log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1, "attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum) "batchNum", batchInfo.BatchNum)
} else {
break
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -255,31 +179,11 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err)) return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
} }
if !resend {
t.accNextNonce = auth.Nonce.Uint64() + 1
}
batchInfo.EthTx = ethTx batchInfo.EthTx = ethTx
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash()) log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
now := time.Now()
batchInfo.SendTimestamp = now
if resend {
batchInfo.Debug.ResendNum++
}
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
t.lastPendingBatch = batchInfo.BatchNum
if !resend { if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs),
batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -311,9 +215,7 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
} }
} }
if err != nil { if err != nil {
return tracerr.Wrap( return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.EthTransactionReceipt: %w", err))
fmt.Errorf("reached max attempts for ethClient.EthTransactionReceipt: %w",
err))
} }
batchInfo.Receipt = receipt batchInfo.Receipt = receipt
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
@@ -323,20 +225,13 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) { func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
receipt := batchInfo.Receipt receipt := batchInfo.Receipt
if receipt != nil { if receipt != nil {
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
t.accNonce = batchInfo.EthTx.Nonce() + 1
}
if receipt.Status == types.ReceiptStatusFailed { if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber) _, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash, log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(), "batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err) "err", err)
batchInfo.EthTxErr = err
if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1
}
t.cfg.debugBatchStore(batchInfo)
return nil, tracerr.Wrap(fmt.Errorf( return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err)) "ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful { } else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -344,17 +239,6 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64() batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum - batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
batchInfo.Debug.StartBlockNum batchInfo.Debug.StartBlockNum
if batchInfo.Debug.StartToMineDelay == 0 {
if block, err := t.ethClient.EthBlockByNumber(ctx,
receipt.BlockNumber.Int64()); err != nil {
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
} else {
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.SendTimestamp).Seconds()
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
}
}
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
if batchInfo.BatchNum > t.lastSuccessBatch { if batchInfo.BatchNum > t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum t.lastSuccessBatch = batchInfo.BatchNum
@@ -366,72 +250,11 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
return nil, nil return nil, nil
} }
// TODO:
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
// Queue of BatchInfos
type Queue struct {
list []*BatchInfo
// nonceByBatchNum map[common.BatchNum]uint64
next int
}
// NewQueue returns a new queue
func NewQueue() Queue {
return Queue{
list: make([]*BatchInfo, 0),
// nonceByBatchNum: make(map[common.BatchNum]uint64),
next: 0,
}
}
// Len is the length of the queue
func (q *Queue) Len() int {
return len(q.list)
}
// At returns the BatchInfo at position (or nil if position is out of bounds)
func (q *Queue) At(position int) *BatchInfo {
if position >= len(q.list) {
return nil
}
return q.list[position]
}
// Next returns the next BatchInfo (or nil if queue is empty)
func (q *Queue) Next() (int, *BatchInfo) {
if len(q.list) == 0 {
return 0, nil
}
defer func() { q.next = (q.next + 1) % len(q.list) }()
return q.next, q.list[q.next]
}
// Remove removes the BatchInfo at position
func (q *Queue) Remove(position int) {
// batchInfo := q.list[position]
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
q.list = append(q.list[:position], q.list[position+1:]...)
if len(q.list) == 0 {
q.next = 0
} else {
q.next = position % len(q.list)
}
}
// Push adds a new BatchInfo
func (q *Queue) Push(batchInfo *BatchInfo) {
q.list = append(q.list, batchInfo)
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
}
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
// nonce, ok := q.nonceByBatchNum[batchNum]
// return nonce, ok
// }
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
next := 0
waitDuration := longWaitDuration
var statsVars statsVars var statsVars statsVars
select { select {
case statsVars = <-t.statsVarsCh: case statsVars = <-t.statsVarsCh:
@@ -440,9 +263,8 @@ func (t *TxManager) Run(ctx context.Context) {
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -451,27 +273,8 @@ func (t *TxManager) Run(ctx context.Context) {
case statsVars := <-t.statsVarsCh: case statsVars := <-t.statsVarsCh:
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
case pipelineNum := <-t.discardPipelineCh:
t.minPipelineNum = pipelineNum + 1
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
}
case batchInfo := <-t.batchCh: case batchInfo := <-t.batchCh:
if batchInfo.PipelineNum < t.minPipelineNum { if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
}
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
log.Warnw("TxManager: shouldSend", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
continue
}
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
// If we reach here it's because our ethNode has // If we reach here it's because our ethNode has
@@ -479,35 +282,29 @@ func (t *TxManager) Run(ctx context.Context) {
// ethereum. This could be due to the ethNode // ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that // failure, or an invalid transaction (that
// can't be mined) // can't be mined)
log.Warnw("TxManager: forgeBatch send failed", "err", err, t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
continue continue
} }
t.queue.Push(batchInfo) t.queue = append(t.queue, batchInfo)
if !timer.Stop() { waitDuration = t.cfg.TxManagerCheckInterval
<-timer.C case <-time.After(waitDuration):
} if len(t.queue) == 0 {
timer.Reset(t.cfg.TxManagerCheckInterval) waitDuration = longWaitDuration
case <-timer.C:
queuePosition, batchInfo := t.queue.Next()
if batchInfo == nil {
timer.Reset(longWaitDuration)
continue continue
} }
timer.Reset(t.cfg.TxManagerCheckInterval) current := next
next = (current + 1) % len(t.queue)
batchInfo := t.queue[current]
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
// Our ethNode is giving an error different // Our ethNode is giving an error different
// than "not found" when getting the receipt // than "not found" when getting the receipt
// for the transaction, so we can't figure out // for the transaction, so we can't figure out
// if it was not mined, mined and successful or // if it was not mined, mined and succesfull or
// mined and failed. This could be due to the // mined and failed. This could be due to the
// ethNode failure. // ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{ t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
} }
confirm, err := t.handleReceipt(ctx, batchInfo) confirm, err := t.handleReceipt(ctx, batchInfo)
@@ -515,106 +312,32 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
// Transaction was rejected // Transaction was rejected
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil { t.queue = append(t.queue[:current], t.queue[current+1:]...)
continue if len(t.queue) == 0 {
} else if err != nil { next = 0
log.Errorw("TxManager: removeBadBatchInfos", "err", err) } else {
continue next = current % len(t.queue)
} }
t.coord.SendMsg(ctx, MsgStopPipeline{ t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
continue
} }
now := time.Now()
if !t.cfg.EthNoReuseNonce && confirm == nil &&
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
continue
} else if err != nil {
// If we reach here it's because our ethNode has
// been unable to send the transaction to
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
continue
}
}
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks { if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
log.Debugw("TxManager: forgeBatch tx confirmed", log.Debugw("TxManager tx for RollupForgeBatch confirmed",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum) "batch", batchInfo.BatchNum)
t.queue.Remove(queuePosition) t.queue = append(t.queue[:current], t.queue[current+1:]...)
if len(t.queue) == 0 {
next = 0
} else {
next = current % len(t.queue)
}
} }
} }
} }
} }
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error { // nolint reason: this function will be used in the future
next := 0 //nolint:unused
for { func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
batchInfo := t.queue.At(next)
if batchInfo == nil {
break
}
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
return nil
} else if err != nil {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and successful or
// mined and failed. This could be due to the
// ethNode failure.
next++
continue
}
confirm, err := t.handleReceipt(ctx, batchInfo)
if ctx.Err() != nil {
return nil
} else if err != nil {
// Transaction was rejected
if t.minPipelineNum <= batchInfo.PipelineNum {
t.minPipelineNum = batchInfo.PipelineNum + 1
}
t.queue.Remove(next)
continue
}
// If tx is pending but is from a cancelled pipeline, remove it
// from the queue
if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum {
t.queue.Remove(next)
continue
}
}
next++
}
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
if err != nil {
return err
}
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}
func (t *TxManager) canForgeAt(blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction, return canForge(&t.consts.Auction, &t.vars.Auction,
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot, &stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum) t.cfg.ForgerAddress, blockNum)
} }
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
}
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
}

View File

@@ -1,15 +0,0 @@
package coordinator
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddPerc(t *testing.T) {
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
}

View File

@@ -34,7 +34,7 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
batch := &BatchAPI{} batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow( return batch, tracerr.Wrap(meddler.QueryRow(
hdb.dbRead, batch, hdb.db, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num, `SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root, batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num, batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
@@ -133,10 +133,10 @@ func (hdb *HistoryDB) GetBatchesAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
batchPtrs := []*BatchAPI{} batchPtrs := []*BatchAPI{}
if err := meddler.QueryAll(hdb.dbRead, &batchPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI) batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
@@ -156,7 +156,7 @@ func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url hdb.db, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
INNER JOIN ( INNER JOIN (
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
@@ -212,9 +212,9 @@ func (hdb *HistoryDB) GetBestBidsAPI(
if limit != nil { if limit != nil {
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
} }
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
bidPtrs := []*BidAPI{} bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(hdb.dbRead, &bidPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// log.Debug(query) // log.Debug(query)
@@ -296,9 +296,9 @@ func (hdb *HistoryDB) GetBidsAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.dbRead.Rebind(query) query = hdb.db.Rebind(query)
bids := []*BidAPI{} bids := []*BidAPI{}
if err := meddler.QueryAll(hdb.dbRead, &bids, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.db, &bids, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(bids) == 0 { if len(bids) == 0 {
@@ -384,9 +384,9 @@ func (hdb *HistoryDB) GetTokensAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.dbRead.Rebind(query) query = hdb.db.Rebind(query)
tokens := []*TokenWithUSD{} tokens := []*TokenWithUSD{}
if err := meddler.QueryAll(hdb.dbRead, &tokens, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.db, &tokens, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(tokens) == 0 { if len(tokens) == 0 {
@@ -408,7 +408,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
tx := &TxAPI{} tx := &TxAPI{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position, hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj, hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj, hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd, tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
@@ -541,10 +541,10 @@ func (hdb *HistoryDB) GetTxsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
txsPtrs := []*TxAPI{} txsPtrs := []*TxAPI{}
if err := meddler.QueryAll(hdb.dbRead, &txsPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI) txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
@@ -564,7 +564,7 @@ func (hdb *HistoryDB) GetExitAPI(batchNum *uint, idx *common.Idx) (*ExitAPI, err
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
exit := &ExitAPI{} exit := &ExitAPI{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, exit, `SELECT exit_tree.item_id, exit_tree.batch_num, hdb.db, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx, hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
account.bjj, account.eth_addr, account.bjj, account.eth_addr,
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
@@ -685,10 +685,10 @@ func (hdb *HistoryDB) GetExitsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
exits := []*ExitAPI{} exits := []*ExitAPI{}
if err := meddler.QueryAll(hdb.dbRead, &exits, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(exits) == 0 { if len(exits) == 0 {
@@ -707,7 +707,7 @@ func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
var bucketUpdates []*BucketUpdateAPI var bucketUpdates []*BucketUpdateAPI
err = meddler.QueryAll( err = meddler.QueryAll(
hdb.dbRead, &bucketUpdates, hdb.db, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update `SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket) group by num_bucket)
@@ -772,10 +772,10 @@ func (hdb *HistoryDB) GetCoordinatorsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
coordinators := []*CoordinatorAPI{} coordinators := []*CoordinatorAPI{}
if err := meddler.QueryAll(hdb.dbRead, &coordinators, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &coordinators, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(coordinators) == 0 { if len(coordinators) == 0 {
@@ -795,7 +795,7 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
auctionVars := &common.AuctionVariables{} auctionVars := &common.AuctionVariables{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, auctionVars, `SELECT * FROM auction_vars;`, hdb.db, auctionVars, `SELECT * FROM auction_vars;`,
) )
return auctionVars, tracerr.Wrap(err) return auctionVars, tracerr.Wrap(err)
} }
@@ -816,7 +816,7 @@ func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems i
ORDER BY default_slot_set_bid_slot_num DESC ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2; LIMIT $2;
` `
err = meddler.QueryAll(hdb.dbRead, &auctionVars, query, slotNum, maxItems) err = meddler.QueryAll(hdb.db, &auctionVars, query, slotNum, maxItems)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -832,19 +832,11 @@ func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
account := &AccountAPI{} account := &AccountAPI{}
err = meddler.QueryRow(hdb.dbRead, account, `SELECT account.item_id, hez_idx(account.idx, err = meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx,
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr, token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
token.usd_update, account_update.nonce, account_update.balance FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx)
FROM account inner JOIN (
SELECT idx, nonce, balance
FROM account_update
WHERE idx = $1
ORDER BY item_id DESC LIMIT 1
) AS account_update ON account_update.idx = account.idx
INNER JOIN token ON account.token_id = token.token_id
WHERE account.idx = $1;`, idx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -872,13 +864,8 @@ func (hdb *HistoryDB) GetAccountsAPI(
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num, queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block, account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update, token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
account_update.nonce, account_update.balance, COUNT(*) OVER() AS total_items COUNT(*) OVER() AS total_items
FROM account inner JOIN ( FROM account INNER JOIN token ON account.token_id = token.token_id `
SELECT DISTINCT idx,
first_value(nonce) over(partition by idx ORDER BY item_id DESC) as nonce,
first_value(balance) over(partition by idx ORDER BY item_id DESC) as balance
FROM account_update
) AS account_update ON account_update.idx = account.idx INNER JOIN token ON account.token_id = token.token_id `
// Apply filters // Apply filters
nextIsAnd := false nextIsAnd := false
// ethAddr filter // ethAddr filter
@@ -927,10 +914,10 @@ func (hdb *HistoryDB) GetAccountsAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.dbRead.Rebind(query) query = hdb.db.Rebind(query)
accounts := []*AccountAPI{} accounts := []*AccountAPI{}
if err := meddler.QueryAll(hdb.dbRead, &accounts, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.db, &accounts, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(accounts) == 0 { if len(accounts) == 0 {
@@ -952,19 +939,11 @@ func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, err
metricsTotals := &MetricsTotals{} metricsTotals := &MetricsTotals{}
metrics := &Metrics{} metrics := &Metrics{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(batch.batch_num), 0) as batch_num, COALESCE (MIN(tx.batch_num), 0) as batch_num, COALESCE (MIN(block.timestamp),
COALESCE (MIN(block.timestamp), NOW()) AS min_timestamp, NOW()) AS min_timestamp, COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`)
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS' and batch.batch_num <= $1;`, lastBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) as total_txs
FROM tx WHERE tx.batch_num between $1 AND $2;`, metricsTotals.FirstBatchNum, lastBatchNum)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -985,13 +964,12 @@ func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, err
} }
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches, hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num between $1 and $2;`, metricsTotals.FirstBatchNum, lastBatchNum) WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if metricsTotals.TotalBatches > 0 { if metricsTotals.TotalBatches > 0 {
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches) metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
} else { } else {
@@ -1003,24 +981,11 @@ func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, err
metrics.AvgTransactionFee = 0 metrics.AvgTransactionFee = 0
} }
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, metrics, hdb.db, metrics,
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`) `SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
err = meddler.QueryRow(
hdb.dbRead, metrics,
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0)
AS estimated_time_to_forge_l1 FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
metricsTotals.FirstBatchNum, lastBatchNum,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
return metrics, nil return metrics, nil
} }
@@ -1035,7 +1000,7 @@ func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{} metricsTotals := &MetricsTotals{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(tx.*) as total_txs, hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`) WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
@@ -1043,7 +1008,7 @@ func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
return 0, tracerr.Wrap(err) return 0, tracerr.Wrap(err)
} }
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, metricsTotals, `SELECT COUNT(*) AS total_batches, hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum) WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil { if err != nil {
@@ -1059,19 +1024,3 @@ func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
return avgTransactionFee, nil return avgTransactionFee, nil
} }
// GetCommonAccountAPI returns the account associated to an account idx
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
account := &common.Account{}
err = meddler.QueryRow(
hdb.dbRead, account, `SELECT idx, token_id, batch_num, bjj, eth_addr
FROM account WHERE idx = $1;`, idx,
)
return account, tracerr.Wrap(err)
}

View File

@@ -27,35 +27,30 @@ const (
// HistoryDB persist the historic of the rollup // HistoryDB persist the historic of the rollup
type HistoryDB struct { type HistoryDB struct {
dbRead *sqlx.DB db *sqlx.DB
dbWrite *sqlx.DB
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
// NewHistoryDB initialize the DB // NewHistoryDB initialize the DB
func NewHistoryDB(dbRead, dbWrite *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB { func NewHistoryDB(db *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
return &HistoryDB{ return &HistoryDB{db: db, apiConnCon: apiConnCon}
dbRead: dbRead,
dbWrite: dbWrite,
apiConnCon: apiConnCon,
}
} }
// DB returns a pointer to the L2DB.db. This method should be used only for // DB returns a pointer to the L2DB.db. This method should be used only for
// internal testing purposes. // internal testing purposes.
func (hdb *HistoryDB) DB() *sqlx.DB { func (hdb *HistoryDB) DB() *sqlx.DB {
return hdb.dbWrite return hdb.db
} }
// AddBlock insert a block into the DB // AddBlock insert a block into the DB
func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.dbWrite, block) } func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.db, block) }
func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error { func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error {
return tracerr.Wrap(meddler.Insert(d, "block", block)) return tracerr.Wrap(meddler.Insert(d, "block", block))
} }
// AddBlocks inserts blocks into the DB // AddBlocks inserts blocks into the DB
func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error { func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error {
return tracerr.Wrap(hdb.addBlocks(hdb.dbWrite, blocks)) return tracerr.Wrap(hdb.addBlocks(hdb.db, blocks))
} }
func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error { func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
@@ -66,7 +61,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
timestamp, timestamp,
hash hash
) VALUES %s;`, ) VALUES %s;`,
blocks, blocks[:],
)) ))
} }
@@ -74,7 +69,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) { func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
block := &common.Block{} block := &common.Block{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, block, hdb.db, block,
"SELECT * FROM block WHERE eth_block_num = $1;", blockNum, "SELECT * FROM block WHERE eth_block_num = $1;", blockNum,
) )
return block, tracerr.Wrap(err) return block, tracerr.Wrap(err)
@@ -84,7 +79,7 @@ func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) { func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &blocks, hdb.db, &blocks,
"SELECT * FROM block ORDER BY eth_block_num;", "SELECT * FROM block ORDER BY eth_block_num;",
) )
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err) return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
@@ -94,7 +89,7 @@ func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) { func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &blocks, hdb.db, &blocks,
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;", "SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;",
from, to, from, to,
) )
@@ -105,13 +100,13 @@ func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) { func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) {
block := &common.Block{} block := &common.Block{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;", hdb.db, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;",
) )
return block, tracerr.Wrap(err) return block, tracerr.Wrap(err)
} }
// AddBatch insert a Batch into the DB // AddBatch insert a Batch into the DB
func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.dbWrite, batch) } func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.db, batch) }
func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error { func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
// Calculate total collected fees in USD // Calculate total collected fees in USD
// Get IDs of collected tokens for fees // Get IDs of collected tokens for fees
@@ -134,9 +129,9 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = hdb.dbWrite.Rebind(query) query = hdb.db.Rebind(query)
if err := meddler.QueryAll( if err := meddler.QueryAll(
hdb.dbWrite, &tokenPrices, query, args..., hdb.db, &tokenPrices, query, args...,
); err != nil { ); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -158,7 +153,7 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
// AddBatches insert Bids into the DB // AddBatches insert Bids into the DB
func (hdb *HistoryDB) AddBatches(batches []common.Batch) error { func (hdb *HistoryDB) AddBatches(batches []common.Batch) error {
return tracerr.Wrap(hdb.addBatches(hdb.dbWrite, batches)) return tracerr.Wrap(hdb.addBatches(hdb.db, batches))
} }
func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error { func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
for i := 0; i < len(batches); i++ { for i := 0; i < len(batches); i++ {
@@ -169,24 +164,11 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
return nil return nil
} }
// GetBatch returns the batch with the given batchNum
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
batchNum,
)
return &batch, tracerr.Wrap(err)
}
// GetAllBatches retrieve all batches from the DB // GetAllBatches retrieve all batches from the DB
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) { func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &batches, hdb.db, &batches,
`SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected,
batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root, batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root,
batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch
@@ -199,7 +181,7 @@ func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) { func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &batches, hdb.db, &batches,
`SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator, `SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator,
state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd
FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`, FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`,
@@ -211,7 +193,7 @@ func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, erro
// GetFirstBatchBlockNumBySlot returns the ethereum block number of the first // GetFirstBatchBlockNumBySlot returns the ethereum block number of the first
// batch within a slot // batch within a slot
func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) { func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) {
row := hdb.dbRead.QueryRow( row := hdb.db.QueryRow(
`SELECT eth_block_num FROM batch `SELECT eth_block_num FROM batch
WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum, WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum,
) )
@@ -221,26 +203,14 @@ func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error)
// GetLastBatchNum returns the BatchNum of the latest forged batch // GetLastBatchNum returns the BatchNum of the latest forged batch
func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) { func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
row := hdb.dbRead.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;") row := hdb.db.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;")
var batchNum common.BatchNum var batchNum common.BatchNum
return batchNum, tracerr.Wrap(row.Scan(&batchNum)) return batchNum, tracerr.Wrap(row.Scan(&batchNum))
} }
// GetLastBatch returns the last forged batch
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
)
return &batch, tracerr.Wrap(err)
}
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch // GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) { func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
row := hdb.dbRead.QueryRow(`SELECT eth_block_num FROM batch row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
WHERE forge_l1_txs_num IS NOT NULL WHERE forge_l1_txs_num IS NOT NULL
ORDER BY batch_num DESC LIMIT 1;`) ORDER BY batch_num DESC LIMIT 1;`)
var blockNum int64 var blockNum int64
@@ -250,7 +220,7 @@ func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged // GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged
// batches. If there's no batch in the DB (nil, nil) is returned. // batches. If there's no batch in the DB (nil, nil) is returned.
func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) { func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
row := hdb.dbRead.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;") row := hdb.db.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
lastL1TxsNum := new(int64) lastL1TxsNum := new(int64)
return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum)) return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum))
} }
@@ -261,15 +231,15 @@ func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
func (hdb *HistoryDB) Reorg(lastValidBlock int64) error { func (hdb *HistoryDB) Reorg(lastValidBlock int64) error {
var err error var err error
if lastValidBlock < 0 { if lastValidBlock < 0 {
_, err = hdb.dbWrite.Exec("DELETE FROM block;") _, err = hdb.db.Exec("DELETE FROM block;")
} else { } else {
_, err = hdb.dbWrite.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock) _, err = hdb.db.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock)
} }
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// AddBids insert Bids into the DB // AddBids insert Bids into the DB
func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.dbWrite, bids) } func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.db, bids) }
func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error { func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
if len(bids) == 0 { if len(bids) == 0 {
return nil return nil
@@ -278,7 +248,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;", "INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
bids, bids[:],
)) ))
} }
@@ -286,7 +256,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) { func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
var bids []*common.Bid var bids []*common.Bid
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &bids, hdb.db, &bids,
`SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid `SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid
ORDER BY item_id;`, ORDER BY item_id;`,
) )
@@ -297,7 +267,7 @@ func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) { func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
bidCoord := &common.BidCoordinator{} bidCoord := &common.BidCoordinator{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, bidCoord, hdb.db, bidCoord,
`SELECT ( `SELECT (
SELECT default_slot_set_bid SELECT default_slot_set_bid
FROM auction_vars FROM auction_vars
@@ -320,7 +290,7 @@ func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinat
// AddCoordinators insert Coordinators into the DB // AddCoordinators insert Coordinators into the DB
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error { func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
return tracerr.Wrap(hdb.addCoordinators(hdb.dbWrite, coordinators)) return tracerr.Wrap(hdb.addCoordinators(hdb.db, coordinators))
} }
func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error { func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error {
if len(coordinators) == 0 { if len(coordinators) == 0 {
@@ -329,13 +299,13 @@ func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordi
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;", "INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
coordinators, coordinators[:],
)) ))
} }
// AddExitTree insert Exit tree into the DB // AddExitTree insert Exit tree into the DB
func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error { func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error {
return tracerr.Wrap(hdb.addExitTree(hdb.dbWrite, exitTree)) return tracerr.Wrap(hdb.addExitTree(hdb.db, exitTree))
} }
func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error { func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error {
if len(exitTree) == 0 { if len(exitTree) == 0 {
@@ -345,7 +315,7 @@ func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) erro
d, d,
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+ "INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;", "instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
exitTree, exitTree[:],
)) ))
} }
@@ -423,13 +393,11 @@ func (hdb *HistoryDB) updateExitTree(d sqlx.Ext, blockNum int64,
// AddToken insert a token into the DB // AddToken insert a token into the DB
func (hdb *HistoryDB) AddToken(token *common.Token) error { func (hdb *HistoryDB) AddToken(token *common.Token) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "token", token)) return tracerr.Wrap(meddler.Insert(hdb.db, "token", token))
} }
// AddTokens insert tokens into the DB // AddTokens insert tokens into the DB
func (hdb *HistoryDB) AddTokens(tokens []common.Token) error { func (hdb *HistoryDB) AddTokens(tokens []common.Token) error { return hdb.addTokens(hdb.db, tokens) }
return hdb.addTokens(hdb.dbWrite, tokens)
}
func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error { func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
if len(tokens) == 0 { if len(tokens) == 0 {
return nil return nil
@@ -450,17 +418,16 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
symbol, symbol,
decimals decimals
) VALUES %s;`, ) VALUES %s;`,
tokens, tokens[:],
)) ))
} }
// UpdateTokenValue updates the USD value of a token. Value is the price in // UpdateTokenValue updates the USD value of a token
// USD of a normalized token (1 token = 10^decimals units)
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error { func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
// Sanitize symbol // Sanitize symbol
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ") tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
_, err := hdb.dbWrite.Exec( _, err := hdb.db.Exec(
"UPDATE token SET usd = $1 WHERE symbol = $2;", "UPDATE token SET usd = $1 WHERE symbol = $2;",
value, tokenSymbol, value, tokenSymbol,
) )
@@ -471,7 +438,7 @@ func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error
func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) { func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
token := &TokenWithUSD{} token := &TokenWithUSD{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID, hdb.db, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID,
) )
return token, tracerr.Wrap(err) return token, tracerr.Wrap(err)
} }
@@ -480,25 +447,34 @@ func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) { func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
var tokens []*TokenWithUSD var tokens []*TokenWithUSD
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &tokens, hdb.db, &tokens,
"SELECT * FROM token ORDER BY token_id;", "SELECT * FROM token ORDER BY token_id;",
) )
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err) return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
} }
// GetTokenSymbolsAndAddrs returns all the token symbols and addresses from the DB // GetTokenSymbols returns all the token symbols from the DB
func (hdb *HistoryDB) GetTokenSymbolsAndAddrs() ([]TokenSymbolAndAddr, error) { func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
var tokens []*TokenSymbolAndAddr var tokenSymbols []string
err := meddler.QueryAll( rows, err := hdb.db.Query("SELECT symbol FROM token;")
hdb.dbRead, &tokens, if err != nil {
"SELECT symbol, eth_addr FROM token;", return nil, tracerr.Wrap(err)
) }
return db.SlicePtrsToSlice(tokens).([]TokenSymbolAndAddr), tracerr.Wrap(err) defer db.RowsClose(rows)
sym := new(string)
for rows.Next() {
err = rows.Scan(sym)
if err != nil {
return nil, tracerr.Wrap(err)
}
tokenSymbols = append(tokenSymbols, *sym)
}
return tokenSymbols, nil
} }
// AddAccounts insert accounts into the DB // AddAccounts insert accounts into the DB
func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error { func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error {
return tracerr.Wrap(hdb.addAccounts(hdb.dbWrite, accounts)) return tracerr.Wrap(hdb.addAccounts(hdb.db, accounts))
} }
func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error { func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error {
if len(accounts) == 0 { if len(accounts) == 0 {
@@ -513,7 +489,7 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
bjj, bjj,
eth_addr eth_addr
) VALUES %s;`, ) VALUES %s;`,
accounts, accounts[:],
)) ))
} }
@@ -521,49 +497,18 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) { func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
var accs []*common.Account var accs []*common.Account
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &accs, hdb.db, &accs,
"SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;", "SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;",
) )
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err) return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
} }
// AddAccountUpdates inserts accUpdates into the DB
func (hdb *HistoryDB) AddAccountUpdates(accUpdates []common.AccountUpdate) error {
return tracerr.Wrap(hdb.addAccountUpdates(hdb.dbWrite, accUpdates))
}
func (hdb *HistoryDB) addAccountUpdates(d meddler.DB, accUpdates []common.AccountUpdate) error {
if len(accUpdates) == 0 {
return nil
}
return tracerr.Wrap(db.BulkInsert(
d,
`INSERT INTO account_update (
eth_block_num,
batch_num,
idx,
nonce,
balance
) VALUES %s;`,
accUpdates,
))
}
// GetAllAccountUpdates returns all the AccountUpdate from the DB
func (hdb *HistoryDB) GetAllAccountUpdates() ([]common.AccountUpdate, error) {
var accUpdates []*common.AccountUpdate
err := meddler.QueryAll(
hdb.dbRead, &accUpdates,
"SELECT eth_block_num, batch_num, idx, nonce, balance FROM account_update ORDER BY idx;",
)
return db.SlicePtrsToSlice(accUpdates).([]common.AccountUpdate), tracerr.Wrap(err)
}
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user, // If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx. // BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
// EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB. // EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB.
func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error { func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error {
return tracerr.Wrap(hdb.addL1Txs(hdb.dbWrite, l1txs)) return tracerr.Wrap(hdb.addL1Txs(hdb.db, l1txs))
} }
// addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
@@ -617,7 +562,7 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error {
// AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx. // AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error { func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error {
return tracerr.Wrap(hdb.addL2Txs(hdb.dbWrite, l2txs)) return tracerr.Wrap(hdb.addL2Txs(hdb.db, l2txs))
} }
// addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx. // addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
@@ -676,7 +621,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
fee, fee,
nonce nonce
) VALUES %s;`, ) VALUES %s;`,
txs, txs[:],
)) ))
} }
@@ -684,7 +629,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) { func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
var exits []*common.ExitInfo var exits []*common.ExitInfo
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &exits, hdb.db, &exits,
`SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof, `SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof,
exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request, exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request,
exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`, exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`,
@@ -696,7 +641,7 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) { func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
var txs []*common.L1Tx var txs []*common.L1Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0 hdb.db, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount, tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
@@ -713,7 +658,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
// Since the query specifies that only coordinator txs are returned, it's safe to assume // Since the query specifies that only coordinator txs are returned, it's safe to assume
// that returned txs will always have effective amounts // that returned txs will always have effective amounts
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, hdb.db, &txs,
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, tx.amount AS effective_amount, tx.amount, tx.amount AS effective_amount,
@@ -728,7 +673,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) { func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
var txs []*common.L2Tx var txs []*common.L2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, hdb.db, &txs,
`SELECT tx.id, tx.batch_num, tx.position, `SELECT tx.id, tx.batch_num, tx.position,
tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.from_idx, tx.to_idx, tx.amount, tx.token_id,
tx.fee, tx.nonce, tx.type, tx.eth_block_num tx.fee, tx.nonce, tx.type, tx.eth_block_num
@@ -741,7 +686,7 @@ func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) { func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
var txs []*common.L1Tx var txs []*common.L1Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null hdb.db, &txs, // only L1 user txs can have batch_num set to null
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, NULL AS effective_amount, tx.amount, NULL AS effective_amount,
@@ -754,21 +699,11 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
} }
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
// open or frozen queues that are not yet forged)
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) FROM tx WHERE batch_num IS NULL;`,
)
var count int
return count, tracerr.Wrap(row.Scan(&count))
}
// TODO: Think about chaning all the queries that return a last value, to queries that return the next valid value. // TODO: Think about chaning all the queries that return a last value, to queries that return the next valid value.
// GetLastTxsPosition for a given to_forge_l1_txs_num // GetLastTxsPosition for a given to_forge_l1_txs_num
func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) { func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) {
row := hdb.dbRead.QueryRow( row := hdb.db.QueryRow(
"SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;", "SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;",
toForgeL1TxsNum, toForgeL1TxsNum,
) )
@@ -782,15 +717,15 @@ func (hdb *HistoryDB) GetSCVars() (*common.RollupVariables, *common.AuctionVaria
var rollup common.RollupVariables var rollup common.RollupVariables
var auction common.AuctionVariables var auction common.AuctionVariables
var wDelayer common.WDelayerVariables var wDelayer common.WDelayerVariables
if err := meddler.QueryRow(hdb.dbRead, &rollup, if err := meddler.QueryRow(hdb.db, &rollup,
"SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
if err := meddler.QueryRow(hdb.dbRead, &auction, if err := meddler.QueryRow(hdb.db, &auction,
"SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
if err := meddler.QueryRow(hdb.dbRead, &wDelayer, if err := meddler.QueryRow(hdb.db, &wDelayer,
"SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
@@ -821,7 +756,7 @@ func (hdb *HistoryDB) addBucketUpdates(d meddler.DB, bucketUpdates []common.Buck
block_stamp, block_stamp,
withdrawals withdrawals
) VALUES %s;`, ) VALUES %s;`,
bucketUpdates, bucketUpdates[:],
)) ))
} }
@@ -835,7 +770,7 @@ func (hdb *HistoryDB) AddBucketUpdatesTest(d meddler.DB, bucketUpdates []common.
func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) { func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
var bucketUpdates []*common.BucketUpdate var bucketUpdates []*common.BucketUpdate
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates, hdb.db, &bucketUpdates,
`SELECT eth_block_num, num_bucket, block_stamp, withdrawals `SELECT eth_block_num, num_bucket, block_stamp, withdrawals
FROM bucket_update ORDER BY item_id;`, FROM bucket_update ORDER BY item_id;`,
) )
@@ -853,7 +788,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
eth_addr, eth_addr,
value_usd value_usd
) VALUES %s;`, ) VALUES %s;`,
tokenExchanges, tokenExchanges[:],
)) ))
} }
@@ -861,7 +796,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) { func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) {
var tokenExchanges []*common.TokenExchange var tokenExchanges []*common.TokenExchange
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &tokenExchanges, hdb.db, &tokenExchanges,
"SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;", "SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;",
) )
return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err) return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err)
@@ -881,7 +816,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
token_addr, token_addr,
amount amount
) VALUES %s;`, ) VALUES %s;`,
escapeHatchWithdrawals, escapeHatchWithdrawals[:],
)) ))
} }
@@ -889,7 +824,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) { func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) {
var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &escapeHatchWithdrawals, hdb.db, &escapeHatchWithdrawals,
"SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;", "SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;",
) )
return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal), return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal),
@@ -902,7 +837,7 @@ func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHat
// exist in the smart contracts. // exist in the smart contracts.
func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables, func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables,
auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error { auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error {
txn, err := hdb.dbWrite.Beginx() txn, err := hdb.db.Beginx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -986,7 +921,7 @@ func (hdb *HistoryDB) setExtraInfoForgedL1UserTxs(d sqlx.Ext, txs []common.L1Tx)
// the pagination system of the API/DB depends on this. Within blocks, all // the pagination system of the API/DB depends on this. Within blocks, all
// items should also be in the correct order (Accounts, Tokens, Txs, etc.) // items should also be in the correct order (Accounts, Tokens, Txs, etc.)
func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) { func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
txn, err := hdb.dbWrite.Beginx() txn, err := hdb.db.Beginx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1058,11 +993,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Add accountBalances if it exists
if err := hdb.addAccountUpdates(txn, batch.UpdatedAccounts); err != nil {
return tracerr.Wrap(err)
}
// Set the EffectiveAmount and EffectiveDepositAmount of all the // Set the EffectiveAmount and EffectiveDepositAmount of all the
// L1UserTxs that have been forged in this batch // L1UserTxs that have been forged in this batch
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil { if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {
@@ -1144,7 +1074,7 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) { func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{} coordinator := &CoordinatorAPI{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, coordinator, hdb.db, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;", "SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr, bidderAddr,
) )
@@ -1153,14 +1083,14 @@ func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*Coordina
// AddAuctionVars insert auction vars into the DB // AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error { func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars)) return tracerr.Wrap(meddler.Insert(hdb.db, "auction_vars", auctionVars))
} }
// GetTokensTest used to get tokens in a testing context // GetTokensTest used to get tokens in a testing context
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) { func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
tokens := []*TokenWithUSD{} tokens := []*TokenWithUSD{}
if err := meddler.QueryAll( if err := meddler.QueryAll(
hdb.dbRead, &tokens, hdb.db, &tokens,
"SELECT * FROM TOKEN", "SELECT * FROM TOKEN",
); err != nil { ); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)

View File

@@ -39,12 +39,12 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
historyDB = NewHistoryDB(db, db, nil) historyDB = NewHistoryDB(db, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
historyDBWithACC = NewHistoryDB(db, db, apiConnCon) historyDBWithACC = NewHistoryDB(db, apiConnCon)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -203,10 +203,6 @@ func TestBatches(t *testing.T) {
fetchedLastBatchNum, err := historyDB.GetLastBatchNum() fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum) assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
// Test GetLastBatch
fetchedLastBatch, err := historyDB.GetLastBatch()
assert.NoError(t, err)
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
// Test GetLastL1TxsNum // Test GetLastL1TxsNum
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum() fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
assert.NoError(t, err) assert.NoError(t, err)
@@ -215,12 +211,6 @@ func TestBatches(t *testing.T) {
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum() fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum) assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
// Test GetBatch
fetchedBatch, err := historyDB.GetBatch(1)
require.NoError(t, err)
assert.Equal(t, &batches[0], fetchedBatch)
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
} }
func TestBids(t *testing.T) { func TestBids(t *testing.T) {
@@ -377,22 +367,6 @@ func TestAccounts(t *testing.T) {
accs[i].Balance = nil accs[i].Balance = nil
assert.Equal(t, accs[i], acc) assert.Equal(t, accs[i], acc)
} }
// Test AccountBalances
accUpdates := make([]common.AccountUpdate, len(accs))
for i, acc := range accs {
accUpdates[i] = common.AccountUpdate{
EthBlockNum: batches[acc.BatchNum-1].EthBlockNum,
BatchNum: acc.BatchNum,
Idx: acc.Idx,
Nonce: common.Nonce(i),
Balance: big.NewInt(int64(i)),
}
}
err = historyDB.AddAccountUpdates(accUpdates)
require.NoError(t, err)
fetchedAccBalances, err := historyDB.GetAllAccountUpdates()
require.NoError(t, err)
assert.Equal(t, accUpdates, fetchedAccBalances)
} }
func TestTxs(t *testing.T) { func TestTxs(t *testing.T) {
@@ -637,10 +611,10 @@ func TestTxs(t *testing.T) {
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type) assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
// Tx ID // Tx ID
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String()) assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String()) assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String()) assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String()) assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
// Tx From and To IDx // Tx From and To IDx
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx) assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
@@ -720,10 +694,6 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
assert.Equal(t, 5, len(l1UserTxs)) assert.Equal(t, 5, len(l1UserTxs))
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs) assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
count, err := historyDB.GetUnforgedL1UserTxsCount()
require.NoError(t, err)
assert.Equal(t, 5, count)
// No l1UserTxs for this toForgeL1TxsNum // No l1UserTxs for this toForgeL1TxsNum
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2) l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
require.NoError(t, err) require.NoError(t, err)
@@ -821,11 +791,11 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
} }
// Add second batch to trigger the update of the batch_num, // Add second batch to trigger the update of the batch_num,
// while avoiding the implicit call of setExtraInfoForgedL1UserTxs // while avoiding the implicit call of setExtraInfoForgedL1UserTxs
err = historyDB.addBlock(historyDB.dbWrite, &blocks[1].Block) err = historyDB.addBlock(historyDB.db, &blocks[1].Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.addBatch(historyDB.dbWrite, &blocks[1].Rollup.Batches[0].Batch) err = historyDB.addBatch(historyDB.db, &blocks[1].Rollup.Batches[0].Batch)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.addAccounts(historyDB.dbWrite, blocks[1].Rollup.Batches[0].CreatedAccounts) err = historyDB.addAccounts(historyDB.db, blocks[1].Rollup.Batches[0].CreatedAccounts)
require.NoError(t, err) require.NoError(t, err)
// Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block // Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block
@@ -835,7 +805,7 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
l1Txs[1].EffectiveAmount = big.NewInt(0) l1Txs[1].EffectiveAmount = big.NewInt(0)
l1Txs[2].EffectiveDepositAmount = big.NewInt(0) l1Txs[2].EffectiveDepositAmount = big.NewInt(0)
l1Txs[2].EffectiveAmount = big.NewInt(0) l1Txs[2].EffectiveAmount = big.NewInt(0)
err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.dbWrite, l1Txs) err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.db, l1Txs)
require.NoError(t, err) require.NoError(t, err)
dbL1Txs, err := historyDB.GetAllL1UserTxs() dbL1Txs, err := historyDB.GetAllL1UserTxs()
@@ -922,10 +892,10 @@ func TestUpdateExitTree(t *testing.T) {
common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false, common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false,
Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr}, Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr},
) )
err = historyDB.addBlock(historyDB.dbWrite, &block.Block) err = historyDB.addBlock(historyDB.db, &block.Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num, err = historyDB.updateExitTree(historyDB.db, block.Block.Num,
block.Rollup.Withdrawals, block.WDelayer.Withdrawals) block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
require.NoError(t, err) require.NoError(t, err)
@@ -955,10 +925,10 @@ func TestUpdateExitTree(t *testing.T) {
Token: tokenAddr, Token: tokenAddr,
Amount: big.NewInt(80), Amount: big.NewInt(80),
}) })
err = historyDB.addBlock(historyDB.dbWrite, &block.Block) err = historyDB.addBlock(historyDB.db, &block.Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num, err = historyDB.updateExitTree(historyDB.db, block.Block.Num,
block.Rollup.Withdrawals, block.WDelayer.Withdrawals) block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
require.NoError(t, err) require.NoError(t, err)
@@ -1001,7 +971,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
URL: "bar", URL: "bar",
}, },
} }
err = historyDB.addCoordinators(historyDB.dbWrite, coords) err = historyDB.addCoordinators(historyDB.db, coords)
require.NoError(t, err) require.NoError(t, err)
bids := []common.Bid{ bids := []common.Bid{
@@ -1019,7 +989,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
}, },
} }
err = historyDB.addBids(historyDB.dbWrite, bids) err = historyDB.addBids(historyDB.db, bids)
require.NoError(t, err) require.NoError(t, err)
forger10, err := historyDB.GetBestBidCoordinator(10) forger10, err := historyDB.GetBestBidCoordinator(10)
@@ -1057,7 +1027,7 @@ func TestAddBucketUpdates(t *testing.T) {
Withdrawals: big.NewInt(42), Withdrawals: big.NewInt(42),
}, },
} }
err := historyDB.addBucketUpdates(historyDB.dbWrite, bucketUpdates) err := historyDB.addBucketUpdates(historyDB.db, bucketUpdates)
require.NoError(t, err) require.NoError(t, err)
dbBucketUpdates, err := historyDB.GetAllBucketUpdates() dbBucketUpdates, err := historyDB.GetAllBucketUpdates()
require.NoError(t, err) require.NoError(t, err)
@@ -1082,7 +1052,7 @@ func TestAddTokenExchanges(t *testing.T) {
ValueUSD: 67890, ValueUSD: 67890,
}, },
} }
err := historyDB.addTokenExchanges(historyDB.dbWrite, tokenExchanges) err := historyDB.addTokenExchanges(historyDB.db, tokenExchanges)
require.NoError(t, err) require.NoError(t, err)
dbTokenExchanges, err := historyDB.GetAllTokenExchanges() dbTokenExchanges, err := historyDB.GetAllTokenExchanges()
require.NoError(t, err) require.NoError(t, err)
@@ -1111,7 +1081,7 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
Amount: big.NewInt(20003), Amount: big.NewInt(20003),
}, },
} }
err := historyDB.addEscapeHatchWithdrawals(historyDB.dbWrite, escapeHatchWithdrawals) err := historyDB.addEscapeHatchWithdrawals(historyDB.db, escapeHatchWithdrawals)
require.NoError(t, err) require.NoError(t, err)
dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals() dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals()
require.NoError(t, err) require.NoError(t, err)
@@ -1179,12 +1149,16 @@ func TestGetMetricsAPI(t *testing.T) {
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch) assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
// Frequency is not exactly the desired one, some decimals may appear // Frequency is not exactly the desired one, some decimals may appear
// There is a -2 as time for first and last batch is not taken into account assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01) assert.Less(t, res.BatchFrequency, float64(frequency+1))
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01) // Truncate frecuency into an int to do an exact check
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees
@@ -1211,8 +1185,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
set = append(set, til.Instruction{Typ: til.TypeNewBlock}) set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// Transfers // Transfers
const numBlocks int = 30 for x := 0; x < 6000; x++ {
for x := 0; x < numBlocks; x++ {
set = append(set, til.Instruction{ set = append(set, til.Instruction{
Typ: common.TxTypeTransfer, Typ: common.TxTypeTransfer,
TokenID: common.TokenID(0), TokenID: common.TokenID(0),
@@ -1236,20 +1209,19 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
err = tc.FillBlocksExtra(blocks, &tilCfgExtra) err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err) require.NoError(t, err)
const numBatches int = 2 + numBlocks const numBatches int = 6002
const blockNum = 4 + numBlocks const numTx int = 6003
const blockNum = 6005 - 1
// Sanity check // Sanity check
require.Equal(t, blockNum, len(blocks)) require.Equal(t, blockNum, len(blocks))
// Adding one batch per block // Adding one batch per block
// batch frequency can be chosen // batch frequency can be chosen
const blockTime time.Duration = 3600 * time.Second const frequency int = 15
now := time.Now()
require.NoError(t, err)
for i := range blocks { for i := range blocks {
blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime) blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
err = historyDB.AddBlockSCData(&blocks[i]) err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -1257,10 +1229,16 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1) assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1) // Frequency is not exactly the desired one, some decimals may appear
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1) assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
assert.Less(t, res.BatchFrequency, float64(frequency+1))
// Truncate frecuency into an int to do an exact check
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees

View File

@@ -147,12 +147,6 @@ type txWrite struct {
Nonce *common.Nonce `meddler:"nonce"` Nonce *common.Nonce `meddler:"nonce"`
} }
// TokenSymbolAndAddr token representation with only Eth addr and symbol
type TokenSymbolAndAddr struct {
Symbol string `meddler:"symbol"`
Addr ethCommon.Address `meddler:"eth_addr"`
}
// TokenWithUSD add USD info to common.Token // TokenWithUSD add USD info to common.Token
type TokenWithUSD struct { type TokenWithUSD struct {
ItemID uint64 `json:"itemId" meddler:"item_id"` ItemID uint64 `json:"itemId" meddler:"item_id"`
@@ -245,8 +239,8 @@ type AccountAPI struct {
BatchNum common.BatchNum `meddler:"batch_num"` BatchNum common.BatchNum `meddler:"batch_num"`
PublicKey apitypes.HezBJJ `meddler:"bjj"` PublicKey apitypes.HezBJJ `meddler:"bjj"`
EthAddr apitypes.HezEthAddr `meddler:"eth_addr"` EthAddr apitypes.HezEthAddr `meddler:"eth_addr"`
Nonce common.Nonce `meddler:"nonce"` // max of 40 bits used Nonce common.Nonce `meddler:"-"` // max of 40 bits used
Balance *apitypes.BigIntStr `meddler:"balance"` // max of 192 bits used Balance *apitypes.BigIntStr `meddler:"-"` // max of 192 bits used
TotalItems uint64 `meddler:"total_items"` TotalItems uint64 `meddler:"total_items"`
FirstItem uint64 `meddler:"first_item"` FirstItem uint64 `meddler:"first_item"`
LastItem uint64 `meddler:"last_item"` LastItem uint64 `meddler:"last_item"`
@@ -310,13 +304,12 @@ type BatchAPI struct {
// Metrics define metrics of the network // Metrics define metrics of the network
type Metrics struct { type Metrics struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"` TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"` BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"` TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"` TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"` TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
AvgTransactionFee float64 `json:"avgTransactionFee"` AvgTransactionFee float64 `json:"avgTransactionFee"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
} }
// MetricsTotals is used to get temporal information from HistoryDB // MetricsTotals is used to get temporal information from HistoryDB

View File

@@ -27,8 +27,6 @@ const (
// PathLast defines the subpath of the last Batch in the subpath // PathLast defines the subpath of the last Batch in the subpath
// of the StateDB // of the StateDB
PathLast = "last" PathLast = "last"
// DefaultKeep is the default value for the Keep parameter
DefaultKeep = 128
) )
var ( var (
@@ -36,18 +34,16 @@ var (
KeyCurrentBatch = []byte("k:currentbatch") KeyCurrentBatch = []byte("k:currentbatch")
// keyCurrentIdx is used as key in the db to store the CurrentIdx // keyCurrentIdx is used as key in the db to store the CurrentIdx
keyCurrentIdx = []byte("k:idx") keyCurrentIdx = []byte("k:idx")
// ErrNoLast is returned when the KVDB has been configured to not have
// a Last checkpoint but a Last method is used
ErrNoLast = fmt.Errorf("no last checkpoint")
) )
// KVDB represents the Key-Value DB object // KVDB represents the Key-Value DB object
type KVDB struct { type KVDB struct {
cfg Config path string
db *pebble.Storage db *pebble.Storage
// CurrentIdx holds the current Idx that the BatchBuilder is using // CurrentIdx holds the current Idx that the BatchBuilder is using
CurrentIdx common.Idx CurrentIdx common.Idx
CurrentBatch common.BatchNum CurrentBatch common.BatchNum
keep int
m sync.Mutex m sync.Mutex
last *Last last *Last
} }
@@ -65,13 +61,13 @@ func (k *Last) setNew() error {
defer k.rw.Unlock() defer k.rw.Unlock()
if k.db != nil { if k.db != nil {
k.db.Close() k.db.Close()
k.db = nil
} }
lastPath := path.Join(k.path, PathLast) lastPath := path.Join(k.path, PathLast)
if err := os.RemoveAll(lastPath); err != nil { err := os.RemoveAll(lastPath)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
db, err := pebble.NewPebbleStorage(lastPath, false) db, err := pebble.NewPebbleStorage(path.Join(k.path, lastPath), false)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -84,7 +80,6 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
defer k.rw.Unlock() defer k.rw.Unlock()
if k.db != nil { if k.db != nil {
k.db.Close() k.db.Close()
k.db = nil
} }
lastPath := path.Join(k.path, PathLast) lastPath := path.Join(k.path, PathLast)
if err := kvdb.MakeCheckpointFromTo(batchNum, lastPath); err != nil { if err := kvdb.MakeCheckpointFromTo(batchNum, lastPath); err != nil {
@@ -101,48 +96,26 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
func (k *Last) close() { func (k *Last) close() {
k.rw.Lock() k.rw.Lock()
defer k.rw.Unlock() defer k.rw.Unlock()
if k.db != nil { k.db.Close()
k.db.Close()
k.db = nil
}
}
// Config of the KVDB
type Config struct {
// Path where the checkpoints will be stored
Path string
// Keep is the number of old checkpoints to keep. If 0, all
// checkpoints are kept.
Keep int
// At every checkpoint, check that there are no gaps between the
// checkpoints
NoGapsCheck bool
// NoLast skips having an opened DB with a checkpoint to the last
// batchNum for thread-safe reads.
NoLast bool
} }
// NewKVDB creates a new KVDB, allowing to use an in-memory or in-disk storage. // NewKVDB creates a new KVDB, allowing to use an in-memory or in-disk storage.
// Checkpoints older than the value defined by `keep` will be deleted. // Checkpoints older than the value defined by `keep` will be deleted.
// func NewKVDB(pathDB string, keep int) (*KVDB, error) { func NewKVDB(pathDB string, keep int) (*KVDB, error) {
func NewKVDB(cfg Config) (*KVDB, error) {
var sto *pebble.Storage var sto *pebble.Storage
var err error var err error
sto, err = pebble.NewPebbleStorage(path.Join(cfg.Path, PathCurrent), false) sto, err = pebble.NewPebbleStorage(path.Join(pathDB, PathCurrent), false)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
var last *Last
if !cfg.NoLast {
last = &Last{
path: cfg.Path,
}
}
kvdb := &KVDB{ kvdb := &KVDB{
cfg: cfg, path: pathDB,
db: sto, db: sto,
last: last, keep: keep,
last: &Last{
path: pathDB,
},
} }
// load currentBatch // load currentBatch
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch() kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
@@ -160,32 +133,29 @@ func NewKVDB(cfg Config) (*KVDB, error) {
} }
// LastRead is a thread-safe method to query the last KVDB // LastRead is a thread-safe method to query the last KVDB
func (k *KVDB) LastRead(fn func(db *pebble.Storage) error) error { func (kvdb *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
if k.last == nil { kvdb.last.rw.RLock()
return tracerr.Wrap(ErrNoLast) defer kvdb.last.rw.RUnlock()
} return fn(kvdb.last.db)
k.last.rw.RLock()
defer k.last.rw.RUnlock()
return fn(k.last.db)
} }
// DB returns the *pebble.Storage from the KVDB // DB returns the *pebble.Storage from the KVDB
func (k *KVDB) DB() *pebble.Storage { func (kvdb *KVDB) DB() *pebble.Storage {
return k.db return kvdb.db
} }
// StorageWithPrefix returns the db.Storage with the given prefix from the // StorageWithPrefix returns the db.Storage with the given prefix from the
// current KVDB // current KVDB
func (k *KVDB) StorageWithPrefix(prefix []byte) db.Storage { func (kvdb *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
return k.db.WithPrefix(prefix) return kvdb.db.WithPrefix(prefix)
} }
// Reset resets the KVDB to the checkpoint at the given batchNum. Reset does // Reset resets the KVDB to the checkpoint at the given batchNum. Reset does
// not delete the checkpoints between old current and the new current, those // not delete the checkpoints between old current and the new current, those
// checkpoints will remain in the storage, and eventually will be deleted when // checkpoints will remain in the storage, and eventually will be deleted when
// MakeCheckpoint overwrites them. // MakeCheckpoint overwrites them.
func (k *KVDB) Reset(batchNum common.BatchNum) error { func (kvdb *KVDB) Reset(batchNum common.BatchNum) error {
return k.reset(batchNum, true) return kvdb.reset(batchNum, true)
} }
// reset resets the KVDB to the checkpoint at the given batchNum. Reset does // reset resets the KVDB to the checkpoint at the given batchNum. Reset does
@@ -193,19 +163,21 @@ func (k *KVDB) Reset(batchNum common.BatchNum) error {
// checkpoints will remain in the storage, and eventually will be deleted when // checkpoints will remain in the storage, and eventually will be deleted when
// MakeCheckpoint overwrites them. `closeCurrent` will close the currently // MakeCheckpoint overwrites them. `closeCurrent` will close the currently
// opened db before doing the reset. // opened db before doing the reset.
func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error { func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
currentPath := path.Join(k.cfg.Path, PathCurrent) currentPath := path.Join(kvdb.path, PathCurrent)
if closeCurrent && k.db != nil { if closeCurrent {
k.db.Close() if err := kvdb.db.Pebble().Close(); err != nil {
k.db = nil return tracerr.Wrap(err)
}
} }
// remove 'current' // remove 'current'
if err := os.RemoveAll(currentPath); err != nil { err := os.RemoveAll(currentPath)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// remove all checkpoints > batchNum // remove all checkpoints > batchNum
list, err := k.ListCheckpoints() list, err := kvdb.ListCheckpoints()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -218,7 +190,7 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
} }
} }
for _, bn := range list[start:] { for _, bn := range list[start:] {
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil { if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -229,27 +201,23 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
k.db = sto kvdb.db = sto
k.CurrentIdx = common.RollupConstReservedIDx // 255 kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
k.CurrentBatch = 0 kvdb.CurrentBatch = 0
if k.last != nil { if err := kvdb.last.setNew(); err != nil {
if err := k.last.setNew(); err != nil { return tracerr.Wrap(err)
return tracerr.Wrap(err)
}
} }
return nil return nil
} }
// copy 'batchNum' to 'current' // copy 'batchNum' to 'current'
if err := k.MakeCheckpointFromTo(batchNum, currentPath); err != nil { if err := kvdb.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// copy 'batchNum' to 'last' // copy 'batchNum' to 'last'
if k.last != nil { if err := kvdb.last.set(kvdb, batchNum); err != nil {
if err := k.last.set(k, batchNum); err != nil { return tracerr.Wrap(err)
return tracerr.Wrap(err)
}
} }
// open the new 'current' // open the new 'current'
@@ -257,15 +225,15 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
k.db = sto kvdb.db = sto
// get currentBatch num // get currentBatch num
k.CurrentBatch, err = k.GetCurrentBatch() kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// idx is obtained from the statedb reset // idx is obtained from the statedb reset
k.CurrentIdx, err = k.GetCurrentIdx() kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -275,28 +243,28 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
// ResetFromSynchronizer performs a reset in the KVDB getting the state from // ResetFromSynchronizer performs a reset in the KVDB getting the state from
// synchronizerKVDB for the given batchNum. // synchronizerKVDB for the given batchNum.
func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error { func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
if synchronizerKVDB == nil { if synchronizerKVDB == nil {
return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil")) return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil"))
} }
currentPath := path.Join(k.cfg.Path, PathCurrent) currentPath := path.Join(kvdb.path, PathCurrent)
if k.db != nil { if err := kvdb.db.Pebble().Close(); err != nil {
k.db.Close() return tracerr.Wrap(err)
k.db = nil
} }
// remove 'current' // remove 'current'
if err := os.RemoveAll(currentPath); err != nil { err := os.RemoveAll(currentPath)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// remove all checkpoints // remove all checkpoints
list, err := k.ListCheckpoints() list, err := kvdb.ListCheckpoints()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for _, bn := range list { for _, bn := range list {
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil { if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -307,22 +275,22 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
k.db = sto kvdb.db = sto
k.CurrentIdx = common.RollupConstReservedIDx // 255 kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
k.CurrentBatch = 0 kvdb.CurrentBatch = 0
return nil return nil
} }
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum)) checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
// copy synchronizer 'BatchNumX' to 'BatchNumX' // copy synchronizer'BatchNumX' to 'BatchNumX'
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil { if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// copy 'BatchNumX' to 'current' // copy 'BatchNumX' to 'current'
err = k.MakeCheckpointFromTo(batchNum, currentPath) err = kvdb.MakeCheckpointFromTo(batchNum, currentPath)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -332,15 +300,15 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
k.db = sto kvdb.db = sto
// get currentBatch num // get currentBatch num
k.CurrentBatch, err = k.GetCurrentBatch() kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// get currentIdx // get currentIdx
k.CurrentIdx, err = k.GetCurrentIdx() kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -349,8 +317,8 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
} }
// GetCurrentBatch returns the current BatchNum stored in the KVDB // GetCurrentBatch returns the current BatchNum stored in the KVDB
func (k *KVDB) GetCurrentBatch() (common.BatchNum, error) { func (kvdb *KVDB) GetCurrentBatch() (common.BatchNum, error) {
cbBytes, err := k.db.Get(KeyCurrentBatch) cbBytes, err := kvdb.db.Get(KeyCurrentBatch)
if tracerr.Unwrap(err) == db.ErrNotFound { if tracerr.Unwrap(err) == db.ErrNotFound {
return 0, nil return 0, nil
} }
@@ -361,12 +329,12 @@ func (k *KVDB) GetCurrentBatch() (common.BatchNum, error) {
} }
// setCurrentBatch stores the current BatchNum in the KVDB // setCurrentBatch stores the current BatchNum in the KVDB
func (k *KVDB) setCurrentBatch() error { func (kvdb *KVDB) setCurrentBatch() error {
tx, err := k.db.NewTx() tx, err := kvdb.db.NewTx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
err = tx.Put(KeyCurrentBatch, k.CurrentBatch.Bytes()) err = tx.Put(KeyCurrentBatch, kvdb.CurrentBatch.Bytes())
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -377,9 +345,9 @@ func (k *KVDB) setCurrentBatch() error {
} }
// GetCurrentIdx returns the stored Idx from the KVDB, which is the last Idx // GetCurrentIdx returns the stored Idx from the KVDB, which is the last Idx
// used for an Account in the k. // used for an Account in the KVDB.
func (k *KVDB) GetCurrentIdx() (common.Idx, error) { func (kvdb *KVDB) GetCurrentIdx() (common.Idx, error) {
idxBytes, err := k.db.Get(keyCurrentIdx) idxBytes, err := kvdb.db.Get(keyCurrentIdx)
if tracerr.Unwrap(err) == db.ErrNotFound { if tracerr.Unwrap(err) == db.ErrNotFound {
return common.RollupConstReservedIDx, nil // 255, nil return common.RollupConstReservedIDx, nil // 255, nil
} }
@@ -390,10 +358,10 @@ func (k *KVDB) GetCurrentIdx() (common.Idx, error) {
} }
// SetCurrentIdx stores Idx in the KVDB // SetCurrentIdx stores Idx in the KVDB
func (k *KVDB) SetCurrentIdx(idx common.Idx) error { func (kvdb *KVDB) SetCurrentIdx(idx common.Idx) error {
k.CurrentIdx = idx kvdb.CurrentIdx = idx
tx, err := k.db.NewTx() tx, err := kvdb.db.NewTx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -413,64 +381,49 @@ func (k *KVDB) SetCurrentIdx(idx common.Idx) error {
// MakeCheckpoint does a checkpoint at the given batchNum in the defined path. // MakeCheckpoint does a checkpoint at the given batchNum in the defined path.
// Internally this advances & stores the current BatchNum, and then stores a // Internally this advances & stores the current BatchNum, and then stores a
// Checkpoint of the current state of the k. // Checkpoint of the current state of the KVDB.
func (k *KVDB) MakeCheckpoint() error { func (kvdb *KVDB) MakeCheckpoint() error {
// advance currentBatch // advance currentBatch
k.CurrentBatch++ kvdb.CurrentBatch++
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, k.CurrentBatch)) checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, kvdb.CurrentBatch))
if err := k.setCurrentBatch(); err != nil { if err := kvdb.setCurrentBatch(); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// if checkpoint BatchNum already exist in disk, delete it // if checkpoint BatchNum already exist in disk, delete it
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
} else if err != nil { err := os.RemoveAll(checkpointPath)
return tracerr.Wrap(err) if err != nil {
} else {
if err := os.RemoveAll(checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
// execute Checkpoint // execute Checkpoint
if err := k.db.Pebble().Checkpoint(checkpointPath); err != nil { if err := kvdb.db.Pebble().Checkpoint(checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// copy 'CurrentBatch' to 'last' // copy 'CurrentBatch' to 'last'
if k.last != nil { if err := kvdb.last.set(kvdb, kvdb.CurrentBatch); err != nil {
if err := k.last.set(k, k.CurrentBatch); err != nil { return tracerr.Wrap(err)
return tracerr.Wrap(err)
}
} }
// delete old checkpoints // delete old checkpoints
if err := k.deleteOldCheckpoints(); err != nil { if err := kvdb.deleteOldCheckpoints(); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
} }
// CheckpointExists returns true if the checkpoint exists
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, tracerr.Wrap(err)
}
return true, nil
}
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum // DeleteCheckpoint removes if exist the checkpoint of the given batchNum
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error { func (kvdb *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum)) checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum)) return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
} else if err != nil {
return tracerr.Wrap(err)
} }
return os.RemoveAll(checkpointPath) return os.RemoveAll(checkpointPath)
@@ -478,8 +431,8 @@ func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
// ListCheckpoints returns the list of batchNums of the checkpoints, sorted. // ListCheckpoints returns the list of batchNums of the checkpoints, sorted.
// If there's a gap between the list of checkpoints, an error is returned. // If there's a gap between the list of checkpoints, an error is returned.
func (k *KVDB) ListCheckpoints() ([]int, error) { func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
files, err := ioutil.ReadDir(k.cfg.Path) files, err := ioutil.ReadDir(kvdb.path)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -496,12 +449,12 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
} }
} }
sort.Ints(checkpoints) sort.Ints(checkpoints)
if !k.cfg.NoGapsCheck && len(checkpoints) > 0 { if len(checkpoints) > 0 {
first := checkpoints[0] first := checkpoints[0]
for _, checkpoint := range checkpoints[1:] { for _, checkpoint := range checkpoints[1:] {
first++ first++
if checkpoint != first { if checkpoint != first {
log.Errorw("gap between checkpoints", "checkpoints", checkpoints) log.Errorw("GAP", "checkpoints", checkpoints)
return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint)) return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint))
} }
} }
@@ -511,14 +464,14 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
// deleteOldCheckpoints deletes old checkpoints when there are more than // deleteOldCheckpoints deletes old checkpoints when there are more than
// `s.keep` checkpoints // `s.keep` checkpoints
func (k *KVDB) deleteOldCheckpoints() error { func (kvdb *KVDB) deleteOldCheckpoints() error {
list, err := k.ListCheckpoints() list, err := kvdb.ListCheckpoints()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if k.cfg.Keep > 0 && len(list) > k.cfg.Keep { if len(list) > kvdb.keep {
for _, checkpoint := range list[:len(list)-k.cfg.Keep] { for _, checkpoint := range list[:len(list)-kvdb.keep] {
if err := k.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil { if err := kvdb.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -529,42 +482,43 @@ func (k *KVDB) deleteOldCheckpoints() error {
// MakeCheckpointFromTo makes a checkpoint from the current db at fromBatchNum // MakeCheckpointFromTo makes a checkpoint from the current db at fromBatchNum
// to the dest folder. This method is locking, so it can be called from // to the dest folder. This method is locking, so it can be called from
// multiple places at the same time. // multiple places at the same time.
func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error { func (kvdb *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum)) source := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
if _, err := os.Stat(source); os.IsNotExist(err) { if _, err := os.Stat(source); os.IsNotExist(err) {
// if kvdb does not have checkpoint at batchNum, return err // if kvdb does not have checkpoint at batchNum, return err
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source)) return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
} else if err != nil {
return tracerr.Wrap(err)
} }
// By locking we allow calling MakeCheckpointFromTo from multiple // By locking we allow calling MakeCheckpointFromTo from multiple
// places at the same time for the same stateDB. This allows the // places at the same time for the same stateDB. This allows the
// synchronizer to do a reset to a batchNum at the same time as the // synchronizer to do a reset to a batchNum at the same time as the
// pipeline is doing a txSelector.Reset and batchBuilder.Reset from // pipeline is doing a txSelector.Reset and batchBuilder.Reset from
// synchronizer to the same batchNum // synchronizer to the same batchNum
k.m.Lock() kvdb.m.Lock()
defer k.m.Unlock() defer kvdb.m.Unlock()
return PebbleMakeCheckpoint(source, dest) return pebbleMakeCheckpoint(source, dest)
} }
// PebbleMakeCheckpoint is a hepler function to make a pebble checkpoint from func pebbleMakeCheckpoint(source, dest string) error {
// source to dest.
func PebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint // Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); !os.IsNotExist(err) {
} else if err != nil { err := os.RemoveAll(dest)
return tracerr.Wrap(err) if err != nil {
} else {
if err := os.RemoveAll(dest); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
sto, err := pebble.NewPebbleStorage(source, false) sto, err := pebble.NewPebbleStorage(source, false)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
defer sto.Close() defer func() {
errClose := sto.Pebble().Close()
if errClose != nil {
log.Errorw("Pebble.Close", "err", errClose)
}
}()
// execute Checkpoint // execute Checkpoint
err = sto.Pebble().Checkpoint(dest) err = sto.Pebble().Checkpoint(dest)
@@ -576,12 +530,7 @@ func PebbleMakeCheckpoint(source, dest string) error {
} }
// Close the DB // Close the DB
func (k *KVDB) Close() { func (kvdb *KVDB) Close() {
if k.db != nil { kvdb.db.Close()
k.db.Close() kvdb.last.close()
k.db = nil
}
if k.last != nil {
k.last.close()
}
} }

View File

@@ -37,7 +37,7 @@ func TestCheckpoints(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
db, err := NewKVDB(Config{Path: dir, Keep: 128}) db, err := NewKVDB(dir, 128)
require.NoError(t, err) require.NoError(t, err)
// add test key-values // add test key-values
@@ -72,7 +72,7 @@ func TestCheckpoints(t *testing.T) {
err = db.Reset(3) err = db.Reset(3)
require.NoError(t, err) require.NoError(t, err)
printCheckpoints(t, db.cfg.Path) printCheckpoints(t, db.path)
// check that currentBatch is as expected after Reset // check that currentBatch is as expected after Reset
cb, err = db.GetCurrentBatch() cb, err = db.GetCurrentBatch()
@@ -99,7 +99,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal, err := ioutil.TempDir("", "ldb") dirLocal, err := ioutil.TempDir("", "ldb")
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal)) defer require.NoError(t, os.RemoveAll(dirLocal))
ldb, err := NewKVDB(Config{Path: dirLocal, Keep: 128}) ldb, err := NewKVDB(dirLocal, 128)
require.NoError(t, err) require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB) // get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -120,7 +120,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal2, err := ioutil.TempDir("", "ldb2") dirLocal2, err := ioutil.TempDir("", "ldb2")
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal2)) defer require.NoError(t, os.RemoveAll(dirLocal2))
ldb2, err := NewKVDB(Config{Path: dirLocal2, Keep: 128}) ldb2, err := NewKVDB(dirLocal2, 128)
require.NoError(t, err) require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB) // get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -139,9 +139,9 @@ func TestCheckpoints(t *testing.T) {
debug := false debug := false
if debug { if debug {
printCheckpoints(t, db.cfg.Path) printCheckpoints(t, db.path)
printCheckpoints(t, ldb.cfg.Path) printCheckpoints(t, ldb.path)
printCheckpoints(t, ldb2.cfg.Path) printCheckpoints(t, ldb2.path)
} }
} }
@@ -150,7 +150,7 @@ func TestListCheckpoints(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
db, err := NewKVDB(Config{Path: dir, Keep: 128}) db, err := NewKVDB(dir, 128)
require.NoError(t, err) require.NoError(t, err)
numCheckpoints := 16 numCheckpoints := 16
@@ -181,7 +181,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
keep := 16 keep := 16
db, err := NewKVDB(Config{Path: dir, Keep: keep}) db, err := NewKVDB(dir, keep)
require.NoError(t, err) require.NoError(t, err)
numCheckpoints := 32 numCheckpoints := 32
@@ -202,7 +202,7 @@ func TestGetCurrentIdx(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
keep := 16 keep := 16
db, err := NewKVDB(Config{Path: dir, Keep: keep}) db, err := NewKVDB(dir, keep)
require.NoError(t, err) require.NoError(t, err)
idx, err := db.GetCurrentIdx() idx, err := db.GetCurrentIdx()
@@ -211,7 +211,7 @@ func TestGetCurrentIdx(t *testing.T) {
db.Close() db.Close()
db, err = NewKVDB(Config{Path: dir, Keep: keep}) db, err = NewKVDB(dir, keep)
require.NoError(t, err) require.NoError(t, err)
idx, err = db.GetCurrentIdx() idx, err = db.GetCurrentIdx()
@@ -227,7 +227,7 @@ func TestGetCurrentIdx(t *testing.T) {
db.Close() db.Close()
db, err = NewKVDB(Config{Path: dir, Keep: keep}) db, err = NewKVDB(dir, keep)
require.NoError(t, err) require.NoError(t, err)
idx, err = db.GetCurrentIdx() idx, err = db.GetCurrentIdx()

View File

@@ -1,18 +1,12 @@
package l2db package l2db
import ( import (
"fmt"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/russross/meddler" "github.com/russross/meddler"
) )
var (
errPoolFull = fmt.Errorf("the pool is at full capacity. More transactions are not accepted currently")
)
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB // AddAccountCreationAuthAPI inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
cancel, err := l2db.apiConnCon.Acquire() cancel, err := l2db.apiConnCon.Acquire()
@@ -34,7 +28,7 @@ func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCre
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
auth := new(AccountCreationAuthAPI) auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow( return auth, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, auth, l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;", "SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr, addr,
)) ))
@@ -48,54 +42,20 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
row := l2db.db.QueryRow(
row := l2db.dbRead.QueryRow(`SELECT "SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
($1::NUMERIC * COALESCE(token.usd, 0) * fee_percentage($2::NUMERIC)) / common.PoolL2TxStatePending,
(10.0 ^ token.decimals::NUMERIC) )
FROM token WHERE token.token_id = $3;`, var totalTxs uint32
tx.AmountFloat, tx.Fee, tx.TokenID) if err := row.Scan(&totalTxs); err != nil {
var feeUSD float64
if err := row.Scan(&feeUSD); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if feeUSD < l2db.minFeeUSD { if totalTxs >= l2db.maxTxs {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)", return tracerr.New(
feeUSD, l2db.minFeeUSD)) "The pool is at full capacity. More transactions are not accepted currently",
)
} }
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
// Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
if err != nil {
return err
}
valuesPart, err := meddler.Default.PlaceholdersString(tx, false)
if err != nil {
return err
}
values, err := meddler.Default.Values(tx, false)
if err != nil {
return err
}
q := fmt.Sprintf(
`INSERT INTO tx_pool (%s)
SELECT %s
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
namesPart, valuesPart,
len(values)+1, len(values)+2) //nolint:gomnd
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
res, err := l2db.dbWrite.Exec(q, values...)
if err != nil {
return tracerr.Wrap(err)
}
rowsAffected, err := res.RowsAffected()
if err != nil {
return tracerr.Wrap(err)
}
if rowsAffected == 0 {
return tracerr.Wrap(errPoolFull)
}
return nil
} }
// selectPoolTxAPI select part of queries to get PoolL2TxRead // selectPoolTxAPI select part of queries to get PoolL2TxRead
@@ -118,7 +78,7 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
tx := new(PoolTxAPI) tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow( return tx, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, tx, l2db.db, tx,
selectPoolTxAPI+"WHERE tx_id = $1;", selectPoolTxAPI+"WHERE tx_id = $1;",
txID, txID,
)) ))

View File

@@ -21,12 +21,10 @@ import (
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant // L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
// due to them being forged or invalid after a safety period // due to them being forged or invalid after a safety period
type L2DB struct { type L2DB struct {
dbRead *sqlx.DB db *sqlx.DB
dbWrite *sqlx.DB
safetyPeriod common.BatchNum safetyPeriod common.BatchNum
ttl time.Duration ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
@@ -34,20 +32,17 @@ type L2DB struct {
// To create it, it's needed db connection, safety period expressed in batches, // To create it, it's needed db connection, safety period expressed in batches,
// maxTxs that the DB should have and TTL (time to live) for pending txs. // maxTxs that the DB should have and TTL (time to live) for pending txs.
func NewL2DB( func NewL2DB(
dbRead, dbWrite *sqlx.DB, db *sqlx.DB,
safetyPeriod common.BatchNum, safetyPeriod common.BatchNum,
maxTxs uint32, maxTxs uint32,
minFeeUSD float64,
TTL time.Duration, TTL time.Duration,
apiConnCon *db.APIConnectionController, apiConnCon *db.APIConnectionController,
) *L2DB { ) *L2DB {
return &L2DB{ return &L2DB{
dbRead: dbRead, db: db,
dbWrite: dbWrite,
safetyPeriod: safetyPeriod, safetyPeriod: safetyPeriod,
ttl: TTL, ttl: TTL,
maxTxs: maxTxs, maxTxs: maxTxs,
minFeeUSD: minFeeUSD,
apiConnCon: apiConnCon, apiConnCon: apiConnCon,
} }
} }
@@ -55,18 +50,12 @@ func NewL2DB(
// DB returns a pointer to the L2DB.db. This method should be used only for // DB returns a pointer to the L2DB.db. This method should be used only for
// internal testing purposes. // internal testing purposes.
func (l2db *L2DB) DB() *sqlx.DB { func (l2db *L2DB) DB() *sqlx.DB {
return l2db.dbWrite return l2db.db
}
// MinFeeUSD returns the minimum fee in USD that is required to accept txs into
// the pool
func (l2db *L2DB) MinFeeUSD() float64 {
return l2db.minFeeUSD
} }
// AddAccountCreationAuth inserts an account creation authorization into the DB // AddAccountCreationAuth inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
_, err := l2db.dbWrite.Exec( _, err := l2db.db.Exec(
`INSERT INTO account_creation_auth (eth_addr, bjj, signature) `INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES ($1, $2, $3);`, VALUES ($1, $2, $3);`,
auth.EthAddr, auth.BJJ, auth.Signature, auth.EthAddr, auth.BJJ, auth.Signature,
@@ -74,26 +63,34 @@ func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// AddManyAccountCreationAuth inserts a batch of accounts creation authorization
// if not exist into the DB
func (l2db *L2DB) AddManyAccountCreationAuth(auths []common.AccountCreationAuth) error {
_, err := sqlx.NamedExec(l2db.dbWrite,
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES (:ethaddr, :bjj, :signature)
ON CONFLICT (eth_addr) DO NOTHING`, auths)
return tracerr.Wrap(err)
}
// GetAccountCreationAuth returns an account creation authorization from the DB // GetAccountCreationAuth returns an account creation authorization from the DB
func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) { func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) {
auth := new(common.AccountCreationAuth) auth := new(common.AccountCreationAuth)
return auth, tracerr.Wrap(meddler.QueryRow( return auth, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, auth, l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;", "SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr, addr,
)) ))
} }
// AddTx inserts a tx to the pool
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
common.PoolL2TxStatePending,
)
var totalTxs uint32
if err := row.Scan(&totalTxs); err != nil {
return tracerr.Wrap(err)
}
if totalTxs >= l2db.maxTxs {
return tracerr.New(
"The pool is at full capacity. More transactions are not accepted currently",
)
}
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
}
// UpdateTxsInfo updates the parameter Info of the pool transactions // UpdateTxsInfo updates the parameter Info of the pool transactions
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error { func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
if len(txs) == 0 { if len(txs) == 0 {
@@ -117,7 +114,7 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
WHERE tx_pool.tx_id = tx_update.id; WHERE tx_pool.tx_id = tx_update.id;
` `
if len(txUpdates) > 0 { if len(txUpdates) > 0 {
if _, err := sqlx.NamedExec(l2db.dbWrite, query, txUpdates); err != nil { if _, err := sqlx.NamedExec(l2db.db, query, txUpdates); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -125,8 +122,9 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
return nil return nil
} }
// NewPoolL2TxWriteFromPoolL2Tx creates a new PoolL2TxWrite from a PoolL2Tx // AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite { // but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
// transform tx from *common.PoolL2Tx to PoolL2TxWrite // transform tx from *common.PoolL2Tx to PoolL2TxWrite
insertTx := &PoolL2TxWrite{ insertTx := &PoolL2TxWrite{
TxID: tx.TxID, TxID: tx.TxID,
@@ -168,15 +166,8 @@ func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite {
f := new(big.Float).SetInt(tx.Amount) f := new(big.Float).SetInt(tx.Amount)
amountF, _ := f.Float64() amountF, _ := f.Float64()
insertTx.AmountFloat = amountF insertTx.AmountFloat = amountF
return insertTx
}
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
// but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
insertTx := NewPoolL2TxWriteFromPoolL2Tx(tx)
// insert tx // insert tx
return tracerr.Wrap(meddler.Insert(l2db.dbWrite, "tx_pool", insertTx)) return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
} }
// selectPoolTxCommon select part of queries to get common.PoolL2Tx // selectPoolTxCommon select part of queries to get common.PoolL2Tx
@@ -185,15 +176,14 @@ tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx, tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount, rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type, tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
(fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f) / fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f AS fee_usd, token.usd_update
(10.0 ^ token.decimals::NUMERIC) AS fee_usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id ` FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// GetTx return the specified Tx in common.PoolL2Tx format // GetTx return the specified Tx in common.PoolL2Tx format
func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) { func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
tx := new(common.PoolL2Tx) tx := new(common.PoolL2Tx)
return tx, tracerr.Wrap(meddler.QueryRow( return tx, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, tx, l2db.db, tx,
selectPoolTxCommon+"WHERE tx_id = $1;", selectPoolTxCommon+"WHERE tx_id = $1;",
txID, txID,
)) ))
@@ -203,7 +193,7 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) { func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx var txs []*common.PoolL2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
l2db.dbRead, &txs, l2db.db, &txs,
selectPoolTxCommon+"WHERE state = $1", selectPoolTxCommon+"WHERE state = $1",
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
) )
@@ -228,8 +218,8 @@ func (l2db *L2DB) StartForging(txIDs []common.TxID, batchNum common.BatchNum) er
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.dbWrite.Rebind(query) query = l2db.db.Rebind(query)
_, err = l2db.dbWrite.Exec(query, args...) _, err = l2db.db.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -251,8 +241,8 @@ func (l2db *L2DB) DoneForging(txIDs []common.TxID, batchNum common.BatchNum) err
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.dbWrite.Rebind(query) query = l2db.db.Rebind(query)
_, err = l2db.dbWrite.Exec(query, args...) _, err = l2db.db.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -273,8 +263,8 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.dbWrite.Rebind(query) query = l2db.db.Rebind(query)
_, err = l2db.dbWrite.Exec(query, args...) _, err = l2db.db.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -282,7 +272,7 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
// of unique FromIdx // of unique FromIdx
func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) { func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) {
var idxs []common.Idx var idxs []common.Idx
rows, err := l2db.dbRead.Query(`SELECT DISTINCT from_idx FROM tx_pool rows, err := l2db.db.Query(`SELECT DISTINCT from_idx FROM tx_pool
WHERE state = $1;`, common.PoolL2TxStatePending) WHERE state = $1;`, common.PoolL2TxStatePending)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -320,10 +310,10 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
return nil return nil
} }
// Fill the batch_num in the query with Sprintf because we are using a // Fill the batch_num in the query with Sprintf because we are using a
// named query which works with slices, and doesn't handle an extra // named query which works with slices, and doens't handle an extra
// individual argument. // individual argument.
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum) query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil { if _, err := sqlx.NamedExec(l2db.db, query, updatedAccounts); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -332,11 +322,10 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
// Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg. // Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg.
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending // The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error { func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
_, err := l2db.dbWrite.Exec( _, err := l2db.db.Exec(
`UPDATE tx_pool SET batch_num = NULL, state = $1 `UPDATE tx_pool SET batch_num = NULL, state = $1
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`, WHERE (state = $2 OR state = $3) AND batch_num > $4`,
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
common.PoolL2TxStateForging,
common.PoolL2TxStateForged, common.PoolL2TxStateForged,
common.PoolL2TxStateInvalid, common.PoolL2TxStateInvalid,
lastValidBatch, lastValidBatch,
@@ -348,7 +337,7 @@ func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
// it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded // it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded
func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) { func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
now := time.Now().UTC().Unix() now := time.Now().UTC().Unix()
_, err = l2db.dbWrite.Exec( _, err = l2db.db.Exec(
`DELETE FROM tx_pool WHERE ( `DELETE FROM tx_pool WHERE (
batch_num < $1 AND (state = $2 OR state = $3) batch_num < $1 AND (state = $2 OR state = $3)
) OR ( ) OR (
@@ -364,14 +353,3 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
) )
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// PurgeByExternalDelete deletes all pending transactions marked with true in
// the `external_delete` column. An external process can set this column to
// true to instruct the coordinator to delete the tx when possible.
func (l2db *L2DB) PurgeByExternalDelete() error {
_, err := l2db.dbWrite.Exec(
`DELETE from tx_pool WHERE (external_delete = true AND state = $1);`,
common.PoolL2TxStatePending,
)
return tracerr.Wrap(err)
}

View File

@@ -1,8 +1,8 @@
package l2db package l2db
import ( import (
"database/sql" "math"
"fmt" "math/big"
"os" "os"
"testing" "testing"
"time" "time"
@@ -20,14 +20,12 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var decimals = uint64(3)
var tokenValue = 1.0 // The price update gives a value of 1.0 USD to the token
var l2DB *L2DB var l2DB *L2DB
var l2DBWithACC *L2DB var l2DBWithACC *L2DB
var historyDB *historydb.HistoryDB var historyDB *historydb.HistoryDB
var tc *til.Context var tc *til.Context
var tokens map[common.TokenID]historydb.TokenWithUSD var tokens map[common.TokenID]historydb.TokenWithUSD
var tokensValue map[common.TokenID]float64
var accs map[common.Idx]common.Account var accs map[common.Idx]common.Account
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@@ -37,11 +35,11 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil) l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, db, nil) historyDB = historydb.NewHistoryDB(db, nil)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -60,10 +58,10 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
AddToken(1) AddToken(1)
AddToken(2) AddToken(2)
CreateAccountDeposit(1) A: 20000 CreateAccountDeposit(1) A: 2000
CreateAccountDeposit(2) A: 20000 CreateAccountDeposit(2) A: 2000
CreateAccountDeposit(1) B: 10000 CreateAccountDeposit(1) B: 1000
CreateAccountDeposit(2) B: 10000 CreateAccountDeposit(2) B: 1000
> batchL1 > batchL1
> batchL1 > batchL1
> block > block
@@ -84,23 +82,15 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for i := range blocks {
block := &blocks[i]
for j := range block.Rollup.AddedTokens {
token := &block.Rollup.AddedTokens[j]
token.Name = fmt.Sprintf("Token %d", token.TokenID)
token.Symbol = fmt.Sprintf("TK%d", token.TokenID)
token.Decimals = decimals
}
}
tokens = make(map[common.TokenID]historydb.TokenWithUSD) tokens = make(map[common.TokenID]historydb.TokenWithUSD)
// tokensValue = make(map[common.TokenID]float64) tokensValue = make(map[common.TokenID]float64)
accs = make(map[common.Idx]common.Account) accs = make(map[common.Idx]common.Account)
value := 5 * 5.389329
now := time.Now().UTC() now := time.Now().UTC()
// Add all blocks except for the last one // Add all blocks except for the last one
for i := range blocks[:len(blocks)-1] { for i := range blocks[:len(blocks)-1] {
if err := historyDB.AddBlockSCData(&blocks[i]); err != nil { err = historyDB.AddBlockSCData(&blocks[i])
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for _, batch := range blocks[i].Rollup.Batches { for _, batch := range blocks[i].Rollup.Batches {
@@ -116,38 +106,39 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
Name: token.Name, Name: token.Name,
Symbol: token.Symbol, Symbol: token.Symbol,
Decimals: token.Decimals, Decimals: token.Decimals,
USD: &tokenValue,
USDUpdate: &now,
} }
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
readToken.USDUpdate = &now
readToken.USD = &value
tokens[token.TokenID] = readToken tokens[token.TokenID] = readToken
// Set value to the tokens }
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD) // Set value to the tokens (tokens have no symbol)
if err != nil { tokenSymbol := ""
return tracerr.Wrap(err) err := historyDB.UpdateTokenValue(tokenSymbol, value)
} if err != nil {
return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
func generatePoolL2Txs() ([]common.PoolL2Tx, error) { func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
// Fee = 126 corresponds to ~10%
setPool := ` setPool := `
Type: PoolL2 Type: PoolL2
PoolTransfer(1) A-B: 6000 (126) PoolTransfer(1) A-B: 6 (4)
PoolTransfer(2) A-B: 3000 (126) PoolTransfer(2) A-B: 3 (1)
PoolTransfer(1) B-A: 5000 (126) PoolTransfer(1) B-A: 5 (2)
PoolTransfer(2) B-A: 10000 (126) PoolTransfer(2) B-A: 10 (3)
PoolTransfer(1) A-B: 7000 (126) PoolTransfer(1) A-B: 7 (2)
PoolTransfer(2) A-B: 2000 (126) PoolTransfer(2) A-B: 2 (1)
PoolTransfer(1) B-A: 8000 (126) PoolTransfer(1) B-A: 8 (2)
PoolTransfer(2) B-A: 1000 (126) PoolTransfer(2) B-A: 1 (1)
PoolTransfer(1) A-B: 3000 (126) PoolTransfer(1) A-B: 3 (1)
PoolTransferToEthAddr(2) B-A: 5000 (126) PoolTransferToEthAddr(2) B-A: 5 (2)
PoolTransferToBJJ(2) B-A: 5000 (126) PoolTransferToBJJ(2) B-A: 5 (2)
PoolExit(1) A: 5000 (126) PoolExit(1) A: 5 (2)
PoolExit(2) B: 3000 (126) PoolExit(2) B: 3 (1)
` `
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool) poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
if err != nil { if err != nil {
@@ -162,74 +153,25 @@ func TestAddTxTest(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err) assert.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx) assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone() nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone) assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset) assert.Equal(t, 0, offset)
} }
} }
func TestAddTxAPI(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
oldMaxTxs := l2DBWithACC.maxTxs
// set max number of pending txs that can be kept in the pool to 5
l2DBWithACC.maxTxs = 5
poolL2Txs, err := generatePoolL2Txs()
txs := make([]*PoolL2TxWrite, len(poolL2Txs))
for i := range poolL2Txs {
txs[i] = NewPoolL2TxWriteFromPoolL2Tx(&poolL2Txs[i])
}
require.NoError(t, err)
require.GreaterOrEqual(t, len(poolL2Txs), 8)
for i := range txs[:5] {
err := l2DBWithACC.AddTxAPI(txs[i])
require.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset)
}
err = l2DBWithACC.AddTxAPI(txs[5])
assert.Equal(t, errPoolFull, tracerr.Unwrap(err))
// reset maxTxs to original value
l2DBWithACC.maxTxs = oldMaxTxs
// set minFeeUSD to a high value than the tx feeUSD to test the error
// of inserting a tx with lower than min fee
oldMinFeeUSD := l2DBWithACC.minFeeUSD
tx := txs[5]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount, decimals, tokenValue)
// set minFeeUSD higher than the tx fee to trigger the error
l2DBWithACC.minFeeUSD = feeAmountUSD + 1
err = l2DBWithACC.AddTxAPI(tx)
require.Error(t, err)
assert.Regexp(t, "tx.feeUSD (.*) < minFeeUSD (.*)", err.Error())
// reset minFeeUSD to original value
l2DBWithACC.minFeeUSD = oldMinFeeUSD
}
func TestUpdateTxsInfo(t *testing.T) { func TestUpdateTxsInfo(t *testing.T) {
err := prepareHistoryDB(historyDB) err := prepareHistoryDB(historyDB)
if err != nil { if err != nil {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) require.NoError(t, err)
@@ -243,7 +185,7 @@ func TestUpdateTxsInfo(t *testing.T) {
for i := range poolL2Txs { for i := range poolL2Txs {
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "test", fetchedTx.Info) assert.Equal(t, "test", fetchedTx.Info)
} }
} }
@@ -261,8 +203,9 @@ func assertTx(t *testing.T, expected, actual *common.PoolL2Tx) {
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix()) assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
// Set expected fee // Set expected fee
amountUSD := common.TokensToUSD(expected.Amount, token.Decimals, *token.USD) f := new(big.Float).SetInt(expected.Amount)
expected.AbsoluteFee = amountUSD * expected.Fee.Percentage() amountF, _ := f.Float64()
expected.AbsoluteFee = *token.USD * amountF * expected.Fee.Percentage()
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee) test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
} }
assert.Equal(t, expected, actual) assert.Equal(t, expected, actual)
@@ -287,28 +230,19 @@ func TestGetPending(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var pendingTxs []*common.PoolL2Tx var pendingTxs []*common.PoolL2Tx
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
pendingTxs = append(pendingTxs, &poolL2Txs[i]) pendingTxs = append(pendingTxs, &poolL2Txs[i])
} }
fetchedTxs, err := l2DB.GetPendingTxs() fetchedTxs, err := l2DB.GetPendingTxs()
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs)) assert.Equal(t, len(pendingTxs), len(fetchedTxs))
for i := range fetchedTxs { for i := range fetchedTxs {
assertTx(t, pendingTxs[i], &fetchedTxs[i]) assertTx(t, pendingTxs[i], &fetchedTxs[i])
} }
// Check AbsoluteFee amount
for i := range fetchedTxs {
tx := &fetchedTxs[i]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount,
tokens[tx.TokenID].Decimals, *tokens[tx.TokenID].USD)
assert.InEpsilon(t, feeAmountUSD, tx.AbsoluteFee, 0.01)
}
} }
func TestStartForging(t *testing.T) { func TestStartForging(t *testing.T) {
@@ -319,13 +253,13 @@ func TestStartForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -333,11 +267,11 @@ func TestStartForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range startForgingTxIDs { for _, id := range startForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -351,13 +285,13 @@ func TestDoneForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -365,7 +299,7 @@ func TestDoneForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -377,12 +311,12 @@ func TestDoneForging(t *testing.T) {
} }
// Done forging txs // Done forging txs
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range doneForgingTxIDs { for _, id := range doneForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -396,13 +330,13 @@ func TestInvalidate(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var invalidTxIDs []common.TxID var invalidTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 { if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
randomizer++ randomizer++
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
@@ -410,11 +344,11 @@ func TestInvalidate(t *testing.T) {
} }
// Invalidate txs // Invalidate txs
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -428,7 +362,7 @@ func TestInvalidateOldNonces(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
// Update Accounts currentNonce // Update Accounts currentNonce
var updateAccounts []common.IdxNonce var updateAccounts []common.IdxNonce
var currentNonce = common.Nonce(1) var currentNonce = common.Nonce(1)
@@ -445,13 +379,13 @@ func TestInvalidateOldNonces(t *testing.T) {
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
} }
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
} }
// sanity check // sanity check
require.Greater(t, len(invalidTxIDs), 0) require.Greater(t, len(invalidTxIDs), 0)
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum) err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
@@ -473,7 +407,7 @@ func TestReorg(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -484,7 +418,7 @@ func TestReorg(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -496,7 +430,7 @@ func TestReorg(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -521,22 +455,22 @@ func TestReorg(t *testing.T) {
// Invalidate txs BEFORE reorgBatch --> nonReorg // Invalidate txs BEFORE reorgBatch --> nonReorg
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch) err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
// Done forging txs in reorgBatch --> Reorg // Done forging txs in reorgBatch --> Reorg
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch) err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
require.NoError(t, err) assert.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -553,7 +487,7 @@ func TestReorg2(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -564,7 +498,7 @@ func TestReorg2(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -576,7 +510,7 @@ func TestReorg2(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -598,22 +532,22 @@ func TestReorg2(t *testing.T) {
} }
// Done forging txs BEFORE reorgBatch --> nonReorg // Done forging txs BEFORE reorgBatch --> nonReorg
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch) err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
// Invalidate txs in reorgBatch --> Reorg // Invalidate txs in reorgBatch --> Reorg
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch) err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
require.NoError(t, err) assert.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -629,7 +563,7 @@ func TestPurge(t *testing.T) {
var poolL2Tx []common.PoolL2Tx var poolL2Tx []common.PoolL2Tx
for i := 0; i < generateTx; i++ { for i := 0; i < generateTx; i++ {
poolL2TxAux, err := generatePoolL2Txs() poolL2TxAux, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
poolL2Tx = append(poolL2Tx, poolL2TxAux...) poolL2Tx = append(poolL2Tx, poolL2TxAux...)
} }
@@ -656,39 +590,39 @@ func TestPurge(t *testing.T) {
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID) deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
} }
err := l2DB.AddTxTest(&tx) err := l2DB.AddTxTest(&tx)
require.NoError(t, err) assert.NoError(t, err)
} }
// Set batchNum keeped txs // Set batchNum keeped txs
for i := range keepedIDs { for i := range keepedIDs {
_, err = l2DB.dbWrite.Exec( _, err = l2DB.db.Exec(
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;", "UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
safeBatchNum, keepedIDs[i], safeBatchNum, keepedIDs[i],
) )
require.NoError(t, err) assert.NoError(t, err)
} }
// Start forging txs and set batchNum // Start forging txs and set batchNum
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Done forging txs and set batchNum // Done forging txs and set batchNum
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Invalidate txs and set batchNum // Invalidate txs and set batchNum
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Update timestamp of afterTTL txs // Update timestamp of afterTTL txs
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0) deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
for _, id := range afterTTLIDs { for _, id := range afterTTLIDs {
// Set timestamp // Set timestamp
_, err = l2DB.dbWrite.Exec( _, err = l2DB.db.Exec(
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;", "UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
deleteTimestamp, common.PoolL2TxStatePending, id, deleteTimestamp, common.PoolL2TxStatePending, id,
) )
require.NoError(t, err) assert.NoError(t, err)
} }
// Purge txs // Purge txs
err = l2DB.Purge(safeBatchNum) err = l2DB.Purge(safeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Check results // Check results
for _, id := range deletedIDs { for _, id := range deletedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
@@ -696,7 +630,7 @@ func TestPurge(t *testing.T) {
} }
for _, id := range keepedIDs { for _, id := range keepedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
require.NoError(t, err) assert.NoError(t, err)
} }
} }
@@ -710,47 +644,10 @@ func TestAuth(t *testing.T) {
for i := 0; i < len(auths); i++ { for i := 0; i < len(auths); i++ {
// Add to the DB // Add to the DB
err := l2DB.AddAccountCreationAuth(auths[i]) err := l2DB.AddAccountCreationAuth(auths[i])
require.NoError(t, err) assert.NoError(t, err)
// Fetch from DB // Fetch from DB
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr) auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
require.NoError(t, err) assert.NoError(t, err)
// Check fetched vs generated
assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
assert.Equal(t, auths[i].BJJ, auth.BJJ)
assert.Equal(t, auths[i].Signature, auth.Signature)
assert.Equal(t, auths[i].Timestamp.Unix(), auths[i].Timestamp.Unix())
nameZone, offset := auths[i].Timestamp.Zone()
assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset)
}
}
func TestManyAuth(t *testing.T) {
test.WipeDB(l2DB.DB())
const nAuths = 5
chainID := uint16(0)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
// Generate authorizations
genAuths := test.GenAuths(nAuths, chainID, hermezContractAddr)
auths := make([]common.AccountCreationAuth, len(genAuths))
// Convert to a non-pointer slice
for i := 0; i < len(genAuths); i++ {
auths[i] = *genAuths[i]
}
// Add a duplicate one to check the not exist condition
err := l2DB.AddAccountCreationAuth(genAuths[0])
require.NoError(t, err)
// Add to the DB
err = l2DB.AddManyAccountCreationAuth(auths)
require.NoError(t, err)
// Assert the result
for i := 0; i < len(auths); i++ {
// Fetch from DB
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
require.NoError(t, err)
// Check fetched vs generated // Check fetched vs generated
assert.Equal(t, auths[i].EthAddr, auth.EthAddr) assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
assert.Equal(t, auths[i].BJJ, auth.BJJ) assert.Equal(t, auths[i].BJJ, auth.BJJ)
@@ -768,7 +665,7 @@ func TestAddGet(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
// We will work with only 3 txs // We will work with only 3 txs
require.GreaterOrEqual(t, len(poolL2Txs), 3) require.GreaterOrEqual(t, len(poolL2Txs), 3)
@@ -804,56 +701,3 @@ func TestAddGet(t *testing.T) {
assert.Equal(t, txs[i], *dbTx) assert.Equal(t, txs[i], *dbTx)
} }
} }
func TestPurgeByExternalDelete(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
txs, err := generatePoolL2Txs()
require.NoError(t, err)
// We will work with 8 txs
require.GreaterOrEqual(t, len(txs), 8)
txs = txs[:8]
for i := range txs {
require.NoError(t, l2DB.AddTxTest(&txs[i]))
}
// We will recreate this scenario:
// tx index, status , external_delete
// 0 , pending, false
// 1 , pending, false
// 2 , pending, true // will be deleted
// 3 , pending, true // will be deleted
// 4 , fging , false
// 5 , fging , false
// 6 , fging , true
// 7 , fging , true
require.NoError(t, l2DB.StartForging(
[]common.TxID{txs[4].TxID, txs[5].TxID, txs[6].TxID, txs[7].TxID},
1))
_, err = l2DB.dbWrite.Exec(
`UPDATE tx_pool SET external_delete = true WHERE
tx_id IN ($1, $2, $3, $4)
;`,
txs[2].TxID, txs[3].TxID, txs[6].TxID, txs[7].TxID,
)
require.NoError(t, err)
require.NoError(t, l2DB.PurgeByExternalDelete())
// Query txs that are have been not deleted
for _, i := range []int{0, 1, 4, 5, 6, 7} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.NoError(t, err)
}
// Query txs that have been deleted
for _, i := range []int{2, 3} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
}
}

View File

@@ -34,7 +34,6 @@ type PoolL2TxWrite struct {
RqFee *common.FeeSelector `meddler:"rq_fee"` RqFee *common.FeeSelector `meddler:"rq_fee"`
RqNonce *common.Nonce `meddler:"rq_nonce"` RqNonce *common.Nonce `meddler:"rq_nonce"`
Type common.TxType `meddler:"tx_type"` Type common.TxType `meddler:"tx_type"`
ClientIP string `meddler:"client_ip"`
} }
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API // PoolTxAPI represents a L2 Tx pool with extra metadata used by the API
@@ -95,6 +94,7 @@ func (tx PoolTxAPI) MarshalJSON() ([]byte, error) {
"info": tx.Info, "info": tx.Info,
"signature": tx.Signature, "signature": tx.Signature,
"timestamp": tx.Timestamp, "timestamp": tx.Timestamp,
"batchNum": tx.BatchNum,
"requestFromAccountIndex": tx.RqFromIdx, "requestFromAccountIndex": tx.RqFromIdx,
"requestToAccountIndex": tx.RqToIdx, "requestToAccountIndex": tx.RqToIdx,
"requestToHezEthereumAddress": tx.RqToEthAddr, "requestToHezEthereumAddress": tx.RqToEthAddr,

View File

@@ -47,7 +47,7 @@ CREATE TABLE token (
name VARCHAR(20) NOT NULL, name VARCHAR(20) NOT NULL,
symbol VARCHAR(10) NOT NULL, symbol VARCHAR(10) NOT NULL,
decimals INT NOT NULL, decimals INT NOT NULL,
usd NUMERIC, -- value of a normalized token (1 token = 10^decimals units) usd NUMERIC,
usd_update TIMESTAMP WITHOUT TIME ZONE usd_update TIMESTAMP WITHOUT TIME ZONE
); );
@@ -100,15 +100,6 @@ CREATE TABLE account (
eth_addr BYTEA NOT NULL eth_addr BYTEA NOT NULL
); );
CREATE TABLE account_update (
item_id SERIAL,
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
nonce BIGINT NOT NULL,
balance BYTEA NOT NULL
);
CREATE TABLE exit_tree ( CREATE TABLE exit_tree (
item_id SERIAL PRIMARY KEY, item_id SERIAL PRIMARY KEY,
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE, batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
@@ -627,9 +618,7 @@ CREATE TABLE tx_pool (
rq_amount BYTEA, rq_amount BYTEA,
rq_fee SMALLINT, rq_fee SMALLINT,
rq_nonce BIGINT, rq_nonce BIGINT,
tx_type VARCHAR(40) NOT NULL, tx_type VARCHAR(40) NOT NULL
client_ip VARCHAR,
external_delete BOOLEAN NOT NULL DEFAULT false
); );
-- +migrate StatementBegin -- +migrate StatementBegin
@@ -662,35 +651,34 @@ CREATE TABLE account_creation_auth (
); );
-- +migrate Down -- +migrate Down
-- triggers -- drop triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token; DROP TRIGGER trigger_token_usd_update ON token;
DROP TRIGGER IF EXISTS trigger_set_tx ON tx; DROP TRIGGER trigger_set_tx ON tx;
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch; DROP TRIGGER trigger_forge_l1_txs ON batch;
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool; DROP TRIGGER trigger_set_pool_tx ON tx_pool;
-- functions -- drop functions
DROP FUNCTION IF EXISTS hez_idx; DROP FUNCTION hez_idx;
DROP FUNCTION IF EXISTS set_token_usd_update; DROP FUNCTION set_token_usd_update;
DROP FUNCTION IF EXISTS fee_percentage; DROP FUNCTION fee_percentage;
DROP FUNCTION IF EXISTS set_tx; DROP FUNCTION set_tx;
DROP FUNCTION IF EXISTS forge_l1_user_txs; DROP FUNCTION forge_l1_user_txs;
DROP FUNCTION IF EXISTS set_pool_tx; DROP FUNCTION set_pool_tx;
-- drop tables IF EXISTS -- drop tables
DROP TABLE IF EXISTS account_creation_auth; DROP TABLE account_creation_auth;
DROP TABLE IF EXISTS tx_pool; DROP TABLE tx_pool;
DROP TABLE IF EXISTS auction_vars; DROP TABLE auction_vars;
DROP TABLE IF EXISTS rollup_vars; DROP TABLE rollup_vars;
DROP TABLE IF EXISTS escape_hatch_withdrawal; DROP TABLE escape_hatch_withdrawal;
DROP TABLE IF EXISTS bucket_update; DROP TABLE bucket_update;
DROP TABLE IF EXISTS token_exchange; DROP TABLE token_exchange;
DROP TABLE IF EXISTS wdelayer_vars; DROP TABLE wdelayer_vars;
DROP TABLE IF EXISTS tx; DROP TABLE tx;
DROP TABLE IF EXISTS exit_tree; DROP TABLE exit_tree;
DROP TABLE IF EXISTS account_update; DROP TABLE account;
DROP TABLE IF EXISTS account; DROP TABLE token;
DROP TABLE IF EXISTS token; DROP TABLE bid;
DROP TABLE IF EXISTS bid; DROP TABLE batch;
DROP TABLE IF EXISTS batch; DROP TABLE coordinator;
DROP TABLE IF EXISTS coordinator; DROP TABLE block;
DROP TABLE IF EXISTS block; -- drop sequences
-- sequences DROP SEQUENCE tx_item_id;
DROP SEQUENCE IF EXISTS tx_item_id;

View File

@@ -17,8 +17,7 @@ import (
var ( var (
// ErrStateDBWithoutMT is used when a method that requires a MerkleTree // ErrStateDBWithoutMT is used when a method that requires a MerkleTree
// is called in a StateDB that does not have a MerkleTree defined // is called in a StateDB that does not have a MerkleTree defined
ErrStateDBWithoutMT = errors.New( ErrStateDBWithoutMT = errors.New("Can not call method to use MerkleTree in a StateDB without MerkleTree")
"Can not call method to use MerkleTree in a StateDB without MerkleTree")
// ErrAccountAlreadyExists is used when CreateAccount is called and the // ErrAccountAlreadyExists is used when CreateAccount is called and the
// Account already exists // Account already exists
@@ -29,8 +28,7 @@ var (
ErrIdxNotFound = errors.New("Idx can not be found") ErrIdxNotFound = errors.New("Idx can not be found")
// ErrGetIdxNoCase is used when trying to get the Idx from EthAddr & // ErrGetIdxNoCase is used when trying to get the Idx from EthAddr &
// BJJ with not compatible combination // BJJ with not compatible combination
ErrGetIdxNoCase = errors.New( ErrGetIdxNoCase = errors.New("Can not get Idx due unexpected combination of ethereum Address & BabyJubJub PublicKey")
"Can not get Idx due unexpected combination of ethereum Address & BabyJubJub PublicKey")
// PrefixKeyIdx is the key prefix for idx in the db // PrefixKeyIdx is the key prefix for idx in the db
PrefixKeyIdx = []byte("i:") PrefixKeyIdx = []byte("i:")
@@ -54,40 +52,19 @@ const (
// TypeBatchBuilder defines a StateDB used by the BatchBuilder, that // TypeBatchBuilder defines a StateDB used by the BatchBuilder, that
// generates the ExitTree and the ZKInput when processing the txs // generates the ExitTree and the ZKInput when processing the txs
TypeBatchBuilder = "batchbuilder" TypeBatchBuilder = "batchbuilder"
// MaxNLevels is the maximum value of NLevels for the merkle tree,
// which comes from the fact that AccountIdx has 48 bits.
MaxNLevels = 48
) )
// TypeStateDB determines the type of StateDB // TypeStateDB determines the type of StateDB
type TypeStateDB string type TypeStateDB string
// Config of the StateDB
type Config struct {
// Path where the checkpoints will be stored
Path string
// Keep is the number of old checkpoints to keep. If 0, all
// checkpoints are kept.
Keep int
// NoLast skips having an opened DB with a checkpoint to the last
// batchNum for thread-safe reads.
NoLast bool
// Type of StateDB (
Type TypeStateDB
// NLevels is the number of merkle tree levels in case the Type uses a
// merkle tree. If the Type doesn't use a merkle tree, NLevels should
// be 0.
NLevels int
// At every checkpoint, check that there are no gaps between the
// checkpoints
noGapsCheck bool
}
// StateDB represents the StateDB object // StateDB represents the StateDB object
type StateDB struct { type StateDB struct {
cfg Config path string
db *kvdb.KVDB Typ TypeStateDB
MT *merkletree.MerkleTree db *kvdb.KVDB
nLevels int
MT *merkletree.MerkleTree
keep int
} }
// Last offers a subset of view methods of the StateDB that can be // Last offers a subset of view methods of the StateDB that can be
@@ -127,41 +104,36 @@ func (s *Last) GetAccounts() ([]common.Account, error) {
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk // NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
// storage. Checkpoints older than the value defined by `keep` will be // storage. Checkpoints older than the value defined by `keep` will be
// deleted. // deleted.
// func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) { func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
func NewStateDB(cfg Config) (*StateDB, error) {
var kv *kvdb.KVDB var kv *kvdb.KVDB
var err error var err error
kv, err = kvdb.NewKVDB(kvdb.Config{Path: cfg.Path, Keep: cfg.Keep, kv, err = kvdb.NewKVDB(pathDB, keep)
NoGapsCheck: cfg.noGapsCheck, NoLast: cfg.NoLast})
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
var mt *merkletree.MerkleTree = nil var mt *merkletree.MerkleTree = nil
if cfg.Type == TypeSynchronizer || cfg.Type == TypeBatchBuilder { if typ == TypeSynchronizer || typ == TypeBatchBuilder {
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), cfg.NLevels) mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), nLevels)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
} }
if cfg.Type == TypeTxSelector && cfg.NLevels != 0 { if typ == TypeTxSelector && nLevels != 0 {
return nil, tracerr.Wrap( return nil, tracerr.Wrap(fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
} }
return &StateDB{ return &StateDB{
cfg: cfg, path: pathDB,
db: kv, db: kv,
MT: mt, nLevels: nLevels,
MT: mt,
Typ: typ,
keep: keep,
}, nil }, nil
} }
// Type returns the StateDB configured Type
func (s *StateDB) Type() TypeStateDB {
return s.cfg.Type
}
// LastRead is a thread-safe method to query the last checkpoint of the StateDB // LastRead is a thread-safe method to query the last checkpoint of the StateDB
// via the Last type methods // via the Last type methods
func (s *StateDB) LastRead(fn func(sdbLast *Last) error) error { func (s *StateDB) LastRead(fn func(sdbLast *Last) error) error {
@@ -207,7 +179,7 @@ func (s *StateDB) LastGetCurrentBatch() (common.BatchNum, error) {
func (s *StateDB) LastMTGetRoot() (*big.Int, error) { func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
var root *big.Int var root *big.Int
if err := s.LastRead(func(sdb *Last) error { if err := s.LastRead(func(sdb *Last) error {
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.cfg.NLevels) mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.nLevels)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -223,7 +195,7 @@ func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
// Internally this advances & stores the current BatchNum, and then stores a // Internally this advances & stores the current BatchNum, and then stores a
// Checkpoint of the current state of the StateDB. // Checkpoint of the current state of the StateDB.
func (s *StateDB) MakeCheckpoint() error { func (s *StateDB) MakeCheckpoint() error {
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.cfg.Type) log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.Typ)
return s.db.MakeCheckpoint() return s.db.MakeCheckpoint()
} }
@@ -258,8 +230,8 @@ func (s *StateDB) SetCurrentIdx(idx common.Idx) error {
// those checkpoints will remain in the storage, and eventually will be // those checkpoints will remain in the storage, and eventually will be
// deleted when MakeCheckpoint overwrites them. // deleted when MakeCheckpoint overwrites them.
func (s *StateDB) Reset(batchNum common.BatchNum) error { func (s *StateDB) Reset(batchNum common.BatchNum) error {
log.Debugw("Making StateDB Reset", "batch", batchNum, "type", s.cfg.Type) err := s.db.Reset(batchNum)
if err := s.db.Reset(batchNum); err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if s.MT != nil { if s.MT != nil {
@@ -270,6 +242,7 @@ func (s *StateDB) Reset(batchNum common.BatchNum) error {
} }
s.MT = mt s.MT = mt
} }
log.Debugw("Making StateDB Reset", "batch", batchNum)
return nil return nil
} }
@@ -350,8 +323,7 @@ func GetAccountInTreeDB(sto db.Storage, idx common.Idx) (*common.Account, error)
// CreateAccount creates a new Account in the StateDB for the given Idx. If // CreateAccount creates a new Account in the StateDB for the given Idx. If
// StateDB.MT==nil, MerkleTree is not affected, otherwise updates the // StateDB.MT==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof. // MerkleTree, returning a CircomProcessorProof.
func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) ( func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
*merkletree.CircomProcessorProof, error) {
cpp, err := CreateAccountInTreeDB(s.db.DB(), s.MT, idx, account) cpp, err := CreateAccountInTreeDB(s.db.DB(), s.MT, idx, account)
if err != nil { if err != nil {
return cpp, tracerr.Wrap(err) return cpp, tracerr.Wrap(err)
@@ -365,8 +337,7 @@ func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (
// from ExitTree. Creates a new Account in the StateDB for the given Idx. If // from ExitTree. Creates a new Account in the StateDB for the given Idx. If
// StateDB.MT==nil, MerkleTree is not affected, otherwise updates the // StateDB.MT==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof. // MerkleTree, returning a CircomProcessorProof.
func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
account *common.Account) (*merkletree.CircomProcessorProof, error) {
// store at the DB the key: v, and value: leaf.Bytes() // store at the DB the key: v, and value: leaf.Bytes()
v, err := account.HashValue() v, err := account.HashValue()
if err != nil { if err != nil {
@@ -415,8 +386,7 @@ func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common
// UpdateAccount updates the Account in the StateDB for the given Idx. If // UpdateAccount updates the Account in the StateDB for the given Idx. If
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the // StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof. // MerkleTree, returning a CircomProcessorProof.
func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) ( func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
*merkletree.CircomProcessorProof, error) {
return UpdateAccountInTreeDB(s.db.DB(), s.MT, idx, account) return UpdateAccountInTreeDB(s.db.DB(), s.MT, idx, account)
} }
@@ -424,8 +394,7 @@ func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) (
// from ExitTree. Updates the Account in the StateDB for the given Idx. If // from ExitTree. Updates the Account in the StateDB for the given Idx. If
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the // StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
// MerkleTree, returning a CircomProcessorProof. // MerkleTree, returning a CircomProcessorProof.
func UpdateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, func UpdateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
account *common.Account) (*merkletree.CircomProcessorProof, error) {
// store at the DB the key: v, and value: account.Bytes() // store at the DB the key: v, and value: account.Bytes()
v, err := account.HashValue() v, err := account.HashValue()
if err != nil { if err != nil {
@@ -492,10 +461,9 @@ type LocalStateDB struct {
// NewLocalStateDB returns a new LocalStateDB connected to the given // NewLocalStateDB returns a new LocalStateDB connected to the given
// synchronizerDB. Checkpoints older than the value defined by `keep` will be // synchronizerDB. Checkpoints older than the value defined by `keep` will be
// deleted. // deleted.
func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error) { func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeStateDB,
cfg.noGapsCheck = true nLevels int) (*LocalStateDB, error) {
cfg.NoLast = true s, err := NewStateDB(path, keep, typ, nLevels)
s, err := NewStateDB(cfg)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -505,24 +473,18 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
}, nil }, nil
} }
// CheckpointExists returns true if the checkpoint exists // Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocalStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. // gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints. // If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
if fromSynchronizer { if fromSynchronizer {
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type) err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db)
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// open the MT for the current s.db // open the MT for the current s.db
if l.MT != nil { if l.MT != nil {
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT), mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT), l.MT.MaxLevels())
l.MT.MaxLevels())
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }

View File

@@ -22,8 +22,7 @@ import (
func newAccount(t *testing.T, i int) *common.Account { func newAccount(t *testing.T, i int) *common.Account {
var sk babyjub.PrivateKey var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
require.NoError(t, err) require.NoError(t, err)
pk := sk.Public() pk := sk.Public()
@@ -46,7 +45,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0}) sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err) require.NoError(t, err)
// test values // test values
@@ -79,7 +78,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
// call NewStateDB which should get the db at the last checkpoint state // call NewStateDB which should get the db at the last checkpoint state
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data) // executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0}) sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err) require.NoError(t, err)
v, err = sdb.db.DB().Get(k0) v, err = sdb.db.DB().Get(k0)
assert.NotNil(t, err) assert.NotNil(t, err)
@@ -117,7 +116,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
bn, err := sdb.getCurrentBatch() bn, err := sdb.getCurrentBatch()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, common.BatchNum(0), bn) assert.Equal(t, common.BatchNum(0), bn)
err = sdb.MakeCheckpoint() err = sdb.db.MakeCheckpoint()
require.NoError(t, err) require.NoError(t, err)
bn, err = sdb.getCurrentBatch() bn, err = sdb.getCurrentBatch()
require.NoError(t, err) require.NoError(t, err)
@@ -159,7 +158,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
// call NewStateDB which should get the db at the last checkpoint state // call NewStateDB which should get the db at the last checkpoint state
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data) // executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0}) sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err) require.NoError(t, err)
bn, err = sdb.getCurrentBatch() bn, err = sdb.getCurrentBatch()
@@ -183,7 +182,7 @@ func TestStateDBWithoutMT(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0}) sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err) require.NoError(t, err)
// create test accounts // create test accounts
@@ -237,7 +236,7 @@ func TestStateDBWithMT(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32}) sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
// create test accounts // create test accounts
@@ -291,7 +290,7 @@ func TestCheckpoints(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32}) sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
err = sdb.Reset(0) err = sdb.Reset(0)
@@ -336,7 +335,7 @@ func TestCheckpoints(t *testing.T) {
assert.Equal(t, common.BatchNum(i+1), cb) assert.Equal(t, common.BatchNum(i+1), cb)
} }
// printCheckpoints(t, sdb.cfg.Path) // printCheckpoints(t, sdb.path)
// reset checkpoint // reset checkpoint
err = sdb.Reset(3) err = sdb.Reset(3)
@@ -372,8 +371,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal, err := ioutil.TempDir("", "ldb") dirLocal, err := ioutil.TempDir("", "ldb")
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal)) defer require.NoError(t, os.RemoveAll(dirLocal))
ldb, err := NewLocalStateDB(Config{Path: dirLocal, Keep: 128, Type: TypeBatchBuilder, ldb, err := NewLocalStateDB(dirLocal, 128, sdb, TypeBatchBuilder, 32)
NLevels: 32}, sdb)
require.NoError(t, err) require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB) // get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -394,27 +392,28 @@ func TestCheckpoints(t *testing.T) {
dirLocal2, err := ioutil.TempDir("", "ldb2") dirLocal2, err := ioutil.TempDir("", "ldb2")
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal2)) defer require.NoError(t, os.RemoveAll(dirLocal2))
ldb2, err := NewLocalStateDB(Config{Path: dirLocal2, Keep: 128, Type: TypeBatchBuilder, ldb2, err := NewLocalStateDB(dirLocal2, 128, sdb, TypeBatchBuilder, 32)
NLevels: 32}, sdb)
require.NoError(t, err) require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB) // get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
err = ldb2.Reset(4, true) err = ldb2.Reset(4, true)
require.NoError(t, err) require.NoError(t, err)
// check that currentBatch is 4 after the Reset // check that currentBatch is 4 after the Reset
cb = ldb2.CurrentBatch() cb, err = ldb2.db.GetCurrentBatch()
require.NoError(t, err)
assert.Equal(t, common.BatchNum(4), cb) assert.Equal(t, common.BatchNum(4), cb)
// advance one checkpoint in ldb2 // advance one checkpoint in ldb2
err = ldb2.MakeCheckpoint() err = ldb2.db.MakeCheckpoint()
require.NoError(t, err)
cb, err = ldb2.db.GetCurrentBatch()
require.NoError(t, err) require.NoError(t, err)
cb = ldb2.CurrentBatch()
assert.Equal(t, common.BatchNum(5), cb) assert.Equal(t, common.BatchNum(5), cb)
debug := false debug := false
if debug { if debug {
printCheckpoints(t, sdb.cfg.Path) printCheckpoints(t, sdb.path)
printCheckpoints(t, ldb.cfg.Path) printCheckpoints(t, ldb.path)
printCheckpoints(t, ldb2.cfg.Path) printCheckpoints(t, ldb2.path)
} }
} }
@@ -422,7 +421,7 @@ func TestStateDBGetAccounts(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0}) sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err) require.NoError(t, err)
// create test accounts // create test accounts
@@ -469,13 +468,12 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32}) sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1)) ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
// test value from js version (compatibility-canary) // test value from js version (compatibility-canary)
assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", assert.Equal(t, "1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", (hex.EncodeToString(ay0.Bytes())))
(hex.EncodeToString(ay0.Bytes())))
bjjPoint0Comp := babyjub.PackSignY(true, ay0) bjjPoint0Comp := babyjub.PackSignY(true, ay0)
bjj0 := babyjub.PublicKeyComp(bjjPoint0Comp) bjj0 := babyjub.PublicKeyComp(bjjPoint0Comp)
@@ -534,9 +532,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
// root value generated by js version: // root value generated by js version:
assert.Equal(t, assert.Equal(t, "17298264051379321456969039521810887093935433569451713402227686942080129181291", sdb.MT.Root().BigInt().String())
"13174362770971232417413036794215823584762073355951212910715422236001731746065",
sdb.MT.Root().BigInt().String())
} }
// TestListCheckpoints performs almost the same test than kvdb/kvdb_test.go // TestListCheckpoints performs almost the same test than kvdb/kvdb_test.go
@@ -546,7 +542,7 @@ func TestListCheckpoints(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32}) sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
numCheckpoints := 16 numCheckpoints := 16
@@ -579,7 +575,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
keep := 16 keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32}) sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
numCheckpoints := 32 numCheckpoints := 32
@@ -600,7 +596,7 @@ func TestCurrentIdx(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir)) defer require.NoError(t, os.RemoveAll(dir))
keep := 16 keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32}) sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
idx := sdb.CurrentIdx() idx := sdb.CurrentIdx()
@@ -608,7 +604,7 @@ func TestCurrentIdx(t *testing.T) {
sdb.Close() sdb.Close()
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32}) sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
idx = sdb.CurrentIdx() idx = sdb.CurrentIdx()
@@ -622,30 +618,9 @@ func TestCurrentIdx(t *testing.T) {
sdb.Close() sdb.Close()
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32}) sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err) require.NoError(t, err)
idx = sdb.CurrentIdx() idx = sdb.CurrentIdx()
assert.Equal(t, common.Idx(255), idx) assert.Equal(t, common.Idx(255), idx)
} }
func TestResetFromBadCheckpoint(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
require.NoError(t, err)
err = sdb.MakeCheckpoint()
require.NoError(t, err)
err = sdb.MakeCheckpoint()
require.NoError(t, err)
err = sdb.MakeCheckpoint()
require.NoError(t, err)
// reset from a checkpoint that doesn't exist
err = sdb.Reset(10)
require.Error(t, err)
}

View File

@@ -18,8 +18,7 @@ func concatEthAddrTokenID(addr ethCommon.Address, tokenID common.TokenID) []byte
b = append(b[:], tokenID.Bytes()[:]...) b = append(b[:], tokenID.Bytes()[:]...)
return b return b
} }
func concatEthAddrBJJTokenID(addr ethCommon.Address, pk babyjub.PublicKeyComp, func concatEthAddrBJJTokenID(addr ethCommon.Address, pk babyjub.PublicKeyComp, tokenID common.TokenID) []byte {
tokenID common.TokenID) []byte {
pkComp := pk pkComp := pk
var b []byte var b []byte
b = append(b, addr.Bytes()...) b = append(b, addr.Bytes()...)
@@ -33,8 +32,7 @@ func concatEthAddrBJJTokenID(addr ethCommon.Address, pk babyjub.PublicKeyComp,
// - key: EthAddr & BabyJubJub PublicKey Compressed, value: idx // - key: EthAddr & BabyJubJub PublicKey Compressed, value: idx
// If Idx already exist for the given EthAddr & BJJ, the remaining Idx will be // If Idx already exist for the given EthAddr & BJJ, the remaining Idx will be
// always the smallest one. // always the smallest one.
func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address, func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address, pk babyjub.PublicKeyComp, tokenID common.TokenID) error {
pk babyjub.PublicKeyComp, tokenID common.TokenID) error {
oldIdx, err := s.GetIdxByEthAddrBJJ(addr, pk, tokenID) oldIdx, err := s.GetIdxByEthAddrBJJ(addr, pk, tokenID)
if err == nil { if err == nil {
// EthAddr & BJJ already have an Idx // EthAddr & BJJ already have an Idx
@@ -42,8 +40,7 @@ func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address,
// if new idx is smaller, store the new one // if new idx is smaller, store the new one
// if new idx is bigger, don't store and return, as the used one will be the old // if new idx is bigger, don't store and return, as the used one will be the old
if idx >= oldIdx { if idx >= oldIdx {
log.Debug("StateDB.setIdxByEthAddrBJJ: Idx not stored because there " + log.Debug("StateDB.setIdxByEthAddrBJJ: Idx not stored because there already exist a smaller Idx for the given EthAddr & BJJ")
"already exist a smaller Idx for the given EthAddr & BJJ")
return nil return nil
} }
} }
@@ -83,8 +80,7 @@ func (s *StateDB) setIdxByEthAddrBJJ(idx common.Idx, addr ethCommon.Address,
// GetIdxByEthAddr returns the smallest Idx in the StateDB for the given // GetIdxByEthAddr returns the smallest Idx in the StateDB for the given
// Ethereum Address. Will return common.Idx(0) and error in case that Idx is // Ethereum Address. Will return common.Idx(0) and error in case that Idx is
// not found in the StateDB. // not found in the StateDB.
func (s *StateDB) GetIdxByEthAddr(addr ethCommon.Address, tokenID common.TokenID) (common.Idx, func (s *StateDB) GetIdxByEthAddr(addr ethCommon.Address, tokenID common.TokenID) (common.Idx, error) {
error) {
k := concatEthAddrTokenID(addr, tokenID) k := concatEthAddrTokenID(addr, tokenID)
b, err := s.db.DB().Get(append(PrefixKeyAddr, k...)) b, err := s.db.DB().Get(append(PrefixKeyAddr, k...))
if err != nil { if err != nil {
@@ -120,22 +116,18 @@ func (s *StateDB) GetIdxByEthAddrBJJ(addr ethCommon.Address, pk babyjub.PublicKe
return common.Idx(0), tracerr.Wrap(ErrIdxNotFound) return common.Idx(0), tracerr.Wrap(ErrIdxNotFound)
} else if err != nil { } else if err != nil {
return common.Idx(0), return common.Idx(0),
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", ErrIdxNotFound, addr.Hex(), pk, tokenID))
ErrIdxNotFound, addr.Hex(), pk, tokenID))
} }
idx, err := common.IdxFromBytes(b) idx, err := common.IdxFromBytes(b)
if err != nil { if err != nil {
return common.Idx(0), return common.Idx(0),
tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", err, addr.Hex(), pk, tokenID))
err, addr.Hex(), pk, tokenID))
} }
return idx, nil return idx, nil
} }
// rest of cases (included case ToEthAddr==0) are not possible // rest of cases (included case ToEthAddr==0) are not possible
return common.Idx(0), return common.Idx(0),
tracerr.Wrap( tracerr.Wrap(fmt.Errorf("GetIdxByEthAddrBJJ: Not found, %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d", ErrGetIdxNoCase, addr.Hex(), pk, tokenID))
fmt.Errorf("GetIdxByEthAddrBJJ: Not found, %s: ToEthAddr: %s, ToBJJ: %s, TokenID: %d",
ErrGetIdxNoCase, addr.Hex(), pk, tokenID))
} }
// GetTokenIDsFromIdxs returns a map containing the common.TokenID with its // GetTokenIDsFromIdxs returns a map containing the common.TokenID with its
@@ -145,9 +137,7 @@ func (s *StateDB) GetTokenIDsFromIdxs(idxs []common.Idx) (map[common.TokenID]com
for i := 0; i < len(idxs); i++ { for i := 0; i < len(idxs); i++ {
a, err := s.GetAccount(idxs[i]) a, err := s.GetAccount(idxs[i])
if err != nil { if err != nil {
return nil, return nil, tracerr.Wrap(fmt.Errorf("GetTokenIDsFromIdxs error on GetAccount with Idx==%d: %s", idxs[i], err.Error()))
tracerr.Wrap(fmt.Errorf("GetTokenIDsFromIdxs error on GetAccount with Idx==%d: %s",
idxs[i], err.Error()))
} }
m[a.TokenID] = idxs[i] m[a.TokenID] = idxs[i]
} }

View File

@@ -18,7 +18,7 @@ func TestGetIdx(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir)) defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0}) sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
assert.NoError(t, err) assert.NoError(t, err)
var sk babyjub.PrivateKey var sk babyjub.PrivateKey

View File

@@ -13,9 +13,9 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate" migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler" "github.com/russross/meddler"
"golang.org/x/sync/semaphore"
) )
var migrations *migrate.PackrMigrationSource var migrations *migrate.PackrMigrationSource
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
// APIConnectionController is used to limit the SQL open connections used by the API // APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct { type APIConnectionController struct {
smphr *semaphore.Weighted smphr semaphore.Semaphore
timeout time.Duration timeout time.Duration
} }
// NewAPIConnectionController initialize APIConnectionController // NewAPICnnectionController initialize APIConnectionController
func NewAPIConnectionController(maxConnections int, timeout time.Duration) *APIConnectionController { func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{ return &APIConnectionController{
smphr: semaphore.NewWeighted(int64(maxConnections)), smphr: semaphore.New(maxConnections),
timeout: timeout, timeout: timeout,
} }
} }

View File

@@ -70,8 +70,7 @@ type AuctionEventInitialize struct {
} }
// AuctionVariables returns the AuctionVariables from the initialize event // AuctionVariables returns the AuctionVariables from the initialize event
func (ei *AuctionEventInitialize) AuctionVariables( func (ei *AuctionEventInitialize) AuctionVariables(InitialMinimalBidding *big.Int) *common.AuctionVariables {
InitialMinimalBidding *big.Int) *common.AuctionVariables {
return &common.AuctionVariables{ return &common.AuctionVariables{
EthBlockNum: 0, EthBlockNum: 0,
DonationAddress: ei.DonationAddress, DonationAddress: ei.DonationAddress,
@@ -223,15 +222,12 @@ type AuctionInterface interface {
AuctionGetAllocationRatio() ([3]uint16, error) AuctionGetAllocationRatio() ([3]uint16, error)
AuctionSetDonationAddress(newDonationAddress ethCommon.Address) (*types.Transaction, error) AuctionSetDonationAddress(newDonationAddress ethCommon.Address) (*types.Transaction, error)
AuctionGetDonationAddress() (*ethCommon.Address, error) AuctionGetDonationAddress() (*ethCommon.Address, error)
AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, newBootCoordinatorURL string) (*types.Transaction, error)
newBootCoordinatorURL string) (*types.Transaction, error)
AuctionGetBootCoordinator() (*ethCommon.Address, error) AuctionGetBootCoordinator() (*ethCommon.Address, error)
AuctionChangeDefaultSlotSetBid(slotSet int64, AuctionChangeDefaultSlotSetBid(slotSet int64, newInitialMinBid *big.Int) (*types.Transaction, error)
newInitialMinBid *big.Int) (*types.Transaction, error)
// Coordinator Management // Coordinator Management
AuctionSetCoordinator(forger ethCommon.Address, coordinatorURL string) (*types.Transaction, AuctionSetCoordinator(forger ethCommon.Address, coordinatorURL string) (*types.Transaction, error)
error)
// Slot Info // Slot Info
AuctionGetSlotNumber(blockNum int64) (int64, error) AuctionGetSlotNumber(blockNum int64) (int64, error)
@@ -241,8 +237,7 @@ type AuctionInterface interface {
AuctionGetSlotSet(slot int64) (*big.Int, error) AuctionGetSlotSet(slot int64) (*big.Int, error)
// Bidding // Bidding
AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, deadline *big.Int) ( AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, deadline *big.Int) (tx *types.Transaction, err error)
tx *types.Transaction, err error)
AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64, slotSets [6]bool, AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64, slotSets [6]bool,
maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error) maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error)
@@ -259,7 +254,7 @@ type AuctionInterface interface {
// //
AuctionConstants() (*common.AuctionConstants, error) AuctionConstants() (*common.AuctionConstants, error)
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error)
AuctionEventInit() (*AuctionEventInitialize, int64, error) AuctionEventInit() (*AuctionEventInitialize, int64, error)
} }
@@ -280,10 +275,8 @@ type AuctionClient struct {
} }
// NewAuctionClient creates a new AuctionClient. `tokenAddress` is the address of the HEZ tokens. // NewAuctionClient creates a new AuctionClient. `tokenAddress` is the address of the HEZ tokens.
func NewAuctionClient(client *EthereumClient, address ethCommon.Address, func NewAuctionClient(client *EthereumClient, address ethCommon.Address, tokenHEZCfg TokenConfig) (*AuctionClient, error) {
tokenHEZCfg TokenConfig) (*AuctionClient, error) { contractAbi, err := abi.JSON(strings.NewReader(string(HermezAuctionProtocol.HermezAuctionProtocolABI)))
contractAbi, err :=
abi.JSON(strings.NewReader(string(HermezAuctionProtocol.HermezAuctionProtocolABI)))
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -338,8 +331,7 @@ func (c *AuctionClient) AuctionGetSlotDeadline() (slotDeadline uint8, err error)
} }
// AuctionSetOpenAuctionSlots is the interface to call the smart contract function // AuctionSetOpenAuctionSlots is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetOpenAuctionSlots( func (c *AuctionClient) AuctionSetOpenAuctionSlots(newOpenAuctionSlots uint16) (tx *types.Transaction, err error) {
newOpenAuctionSlots uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -363,8 +355,7 @@ func (c *AuctionClient) AuctionGetOpenAuctionSlots() (openAuctionSlots uint16, e
} }
// AuctionSetClosedAuctionSlots is the interface to call the smart contract function // AuctionSetClosedAuctionSlots is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetClosedAuctionSlots( func (c *AuctionClient) AuctionSetClosedAuctionSlots(newClosedAuctionSlots uint16) (tx *types.Transaction, err error) {
newClosedAuctionSlots uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -388,8 +379,7 @@ func (c *AuctionClient) AuctionGetClosedAuctionSlots() (closedAuctionSlots uint1
} }
// AuctionSetOutbidding is the interface to call the smart contract function // AuctionSetOutbidding is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetOutbidding(newOutbidding uint16) (tx *types.Transaction, func (c *AuctionClient) AuctionSetOutbidding(newOutbidding uint16) (tx *types.Transaction, err error) {
err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
12500000, //nolint:gomnd 12500000, //nolint:gomnd
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -413,8 +403,7 @@ func (c *AuctionClient) AuctionGetOutbidding() (outbidding uint16, err error) {
} }
// AuctionSetAllocationRatio is the interface to call the smart contract function // AuctionSetAllocationRatio is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetAllocationRatio( func (c *AuctionClient) AuctionSetAllocationRatio(newAllocationRatio [3]uint16) (tx *types.Transaction, err error) {
newAllocationRatio [3]uint16) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -438,8 +427,7 @@ func (c *AuctionClient) AuctionGetAllocationRatio() (allocationRation [3]uint16,
} }
// AuctionSetDonationAddress is the interface to call the smart contract function // AuctionSetDonationAddress is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetDonationAddress( func (c *AuctionClient) AuctionSetDonationAddress(newDonationAddress ethCommon.Address) (tx *types.Transaction, err error) {
newDonationAddress ethCommon.Address) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -452,8 +440,7 @@ func (c *AuctionClient) AuctionSetDonationAddress(
} }
// AuctionGetDonationAddress is the interface to call the smart contract function // AuctionGetDonationAddress is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetDonationAddress() (donationAddress *ethCommon.Address, func (c *AuctionClient) AuctionGetDonationAddress() (donationAddress *ethCommon.Address, err error) {
err error) {
var _donationAddress ethCommon.Address var _donationAddress ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
_donationAddress, err = c.auction.GetDonationAddress(c.opts) _donationAddress, err = c.auction.GetDonationAddress(c.opts)
@@ -465,13 +452,11 @@ func (c *AuctionClient) AuctionGetDonationAddress() (donationAddress *ethCommon.
} }
// AuctionSetBootCoordinator is the interface to call the smart contract function // AuctionSetBootCoordinator is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, func (c *AuctionClient) AuctionSetBootCoordinator(newBootCoordinator ethCommon.Address, newBootCoordinatorURL string) (tx *types.Transaction, err error) {
newBootCoordinatorURL string) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
return c.auction.SetBootCoordinator(auth, newBootCoordinator, return c.auction.SetBootCoordinator(auth, newBootCoordinator, newBootCoordinatorURL)
newBootCoordinatorURL)
}, },
); err != nil { ); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed setting bootCoordinator: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed setting bootCoordinator: %w", err))
@@ -480,8 +465,7 @@ func (c *AuctionClient) AuctionSetBootCoordinator(newBootCoordinator ethCommon.A
} }
// AuctionGetBootCoordinator is the interface to call the smart contract function // AuctionGetBootCoordinator is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetBootCoordinator() (bootCoordinator *ethCommon.Address, func (c *AuctionClient) AuctionGetBootCoordinator() (bootCoordinator *ethCommon.Address, err error) {
err error) {
var _bootCoordinator ethCommon.Address var _bootCoordinator ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
_bootCoordinator, err = c.auction.GetBootCoordinator(c.opts) _bootCoordinator, err = c.auction.GetBootCoordinator(c.opts)
@@ -493,8 +477,7 @@ func (c *AuctionClient) AuctionGetBootCoordinator() (bootCoordinator *ethCommon.
} }
// AuctionChangeDefaultSlotSetBid is the interface to call the smart contract function // AuctionChangeDefaultSlotSetBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionChangeDefaultSlotSetBid(slotSet int64, func (c *AuctionClient) AuctionChangeDefaultSlotSetBid(slotSet int64, newInitialMinBid *big.Int) (tx *types.Transaction, err error) {
newInitialMinBid *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -508,8 +491,7 @@ func (c *AuctionClient) AuctionChangeDefaultSlotSetBid(slotSet int64,
} }
// AuctionGetClaimableHEZ is the interface to call the smart contract function // AuctionGetClaimableHEZ is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetClaimableHEZ( func (c *AuctionClient) AuctionGetClaimableHEZ(claimAddress ethCommon.Address) (claimableHEZ *big.Int, err error) {
claimAddress ethCommon.Address) (claimableHEZ *big.Int, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
claimableHEZ, err = c.auction.GetClaimableHEZ(c.opts, claimAddress) claimableHEZ, err = c.auction.GetClaimableHEZ(c.opts, claimAddress)
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -520,8 +502,7 @@ func (c *AuctionClient) AuctionGetClaimableHEZ(
} }
// AuctionSetCoordinator is the interface to call the smart contract function // AuctionSetCoordinator is the interface to call the smart contract function
func (c *AuctionClient) AuctionSetCoordinator(forger ethCommon.Address, func (c *AuctionClient) AuctionSetCoordinator(forger ethCommon.Address, coordinatorURL string) (tx *types.Transaction, err error) {
coordinatorURL string) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -570,8 +551,7 @@ func (c *AuctionClient) AuctionGetSlotSet(slot int64) (slotSet *big.Int, err err
} }
// AuctionGetDefaultSlotSetBid is the interface to call the smart contract function // AuctionGetDefaultSlotSetBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionGetDefaultSlotSetBid(slotSet uint8) (minBidSlotSet *big.Int, func (c *AuctionClient) AuctionGetDefaultSlotSetBid(slotSet uint8) (minBidSlotSet *big.Int, err error) {
err error) {
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
minBidSlotSet, err = c.auction.GetDefaultSlotSetBid(c.opts, slotSet) minBidSlotSet, err = c.auction.GetDefaultSlotSetBid(c.opts, slotSet)
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -594,8 +574,7 @@ func (c *AuctionClient) AuctionGetSlotNumber(blockNum int64) (slot int64, err er
} }
// AuctionBid is the interface to call the smart contract function // AuctionBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.Int, deadline *big.Int) (tx *types.Transaction, err error) {
deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -607,8 +586,7 @@ func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.I
} }
tokenName := c.tokenHEZCfg.Name tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
amount, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest) signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, amount, deadline, digest, signature) permit := createPermit(owner, spender, amount, deadline, digest, signature)
_slot := big.NewInt(slot) _slot := big.NewInt(slot)
@@ -621,8 +599,8 @@ func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.I
} }
// AuctionMultiBid is the interface to call the smart contract function // AuctionMultiBid is the interface to call the smart contract function
func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64, func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlot int64, slotSets [6]bool,
slotSets [6]bool, maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error) { maxBid, minBid, deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
1000000, //nolint:gomnd 1000000, //nolint:gomnd
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -635,14 +613,12 @@ func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlo
tokenName := c.tokenHEZCfg.Name tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
amount, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest) signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, amount, deadline, digest, signature) permit := createPermit(owner, spender, amount, deadline, digest, signature)
_startingSlot := big.NewInt(startingSlot) _startingSlot := big.NewInt(startingSlot)
_endingSlot := big.NewInt(endingSlot) _endingSlot := big.NewInt(endingSlot)
return c.auction.ProcessMultiBid(auth, amount, _startingSlot, _endingSlot, return c.auction.ProcessMultiBid(auth, amount, _startingSlot, _endingSlot, slotSets, maxBid, minBid, permit)
slotSets, maxBid, minBid, permit)
}, },
); err != nil { ); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed multibid: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed multibid: %w", err))
@@ -651,8 +627,7 @@ func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlo
} }
// AuctionCanForge is the interface to call the smart contract function // AuctionCanForge is the interface to call the smart contract function
func (c *AuctionClient) AuctionCanForge(forger ethCommon.Address, blockNum int64) (canForge bool, func (c *AuctionClient) AuctionCanForge(forger ethCommon.Address, blockNum int64) (canForge bool, err error) {
err error) {
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
canForge, err = c.auction.CanForge(c.opts, forger, big.NewInt(blockNum)) canForge, err = c.auction.CanForge(c.opts, forger, big.NewInt(blockNum))
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -705,8 +680,7 @@ func (c *AuctionClient) AuctionConstants() (auctionConstants *common.AuctionCons
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
auctionConstants.InitialMinimalBidding, err = auctionConstants.InitialMinimalBidding, err = c.auction.INITIALMINIMALBIDDING(c.opts)
c.auction.INITIALMINIMALBIDDING(c.opts)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -777,35 +751,21 @@ func (c *AuctionClient) AuctionVariables() (auctionVariables *common.AuctionVari
} }
var ( var (
logAuctionNewBid = crypto.Keccak256Hash([]byte( logAuctionNewBid = crypto.Keccak256Hash([]byte("NewBid(uint128,uint128,address)"))
"NewBid(uint128,uint128,address)")) logAuctionNewSlotDeadline = crypto.Keccak256Hash([]byte("NewSlotDeadline(uint8)"))
logAuctionNewSlotDeadline = crypto.Keccak256Hash([]byte( logAuctionNewClosedAuctionSlots = crypto.Keccak256Hash([]byte("NewClosedAuctionSlots(uint16)"))
"NewSlotDeadline(uint8)")) logAuctionNewOutbidding = crypto.Keccak256Hash([]byte("NewOutbidding(uint16)"))
logAuctionNewClosedAuctionSlots = crypto.Keccak256Hash([]byte( logAuctionNewDonationAddress = crypto.Keccak256Hash([]byte("NewDonationAddress(address)"))
"NewClosedAuctionSlots(uint16)")) logAuctionNewBootCoordinator = crypto.Keccak256Hash([]byte("NewBootCoordinator(address,string)"))
logAuctionNewOutbidding = crypto.Keccak256Hash([]byte( logAuctionNewOpenAuctionSlots = crypto.Keccak256Hash([]byte("NewOpenAuctionSlots(uint16)"))
"NewOutbidding(uint16)")) logAuctionNewAllocationRatio = crypto.Keccak256Hash([]byte("NewAllocationRatio(uint16[3])"))
logAuctionNewDonationAddress = crypto.Keccak256Hash([]byte( logAuctionSetCoordinator = crypto.Keccak256Hash([]byte("SetCoordinator(address,address,string)"))
"NewDonationAddress(address)")) logAuctionNewForgeAllocated = crypto.Keccak256Hash([]byte("NewForgeAllocated(address,address,uint128,uint128,uint128,uint128)"))
logAuctionNewBootCoordinator = crypto.Keccak256Hash([]byte( logAuctionNewDefaultSlotSetBid = crypto.Keccak256Hash([]byte("NewDefaultSlotSetBid(uint128,uint128)"))
"NewBootCoordinator(address,string)")) logAuctionNewForge = crypto.Keccak256Hash([]byte("NewForge(address,uint128)"))
logAuctionNewOpenAuctionSlots = crypto.Keccak256Hash([]byte( logAuctionHEZClaimed = crypto.Keccak256Hash([]byte("HEZClaimed(address,uint128)"))
"NewOpenAuctionSlots(uint16)")) logAuctionInitialize = crypto.Keccak256Hash([]byte(
logAuctionNewAllocationRatio = crypto.Keccak256Hash([]byte( "InitializeHermezAuctionProtocolEvent(address,address,string,uint16,uint8,uint16,uint16,uint16[3])"))
"NewAllocationRatio(uint16[3])"))
logAuctionSetCoordinator = crypto.Keccak256Hash([]byte(
"SetCoordinator(address,address,string)"))
logAuctionNewForgeAllocated = crypto.Keccak256Hash([]byte(
"NewForgeAllocated(address,address,uint128,uint128,uint128,uint128)"))
logAuctionNewDefaultSlotSetBid = crypto.Keccak256Hash([]byte(
"NewDefaultSlotSetBid(uint128,uint128)"))
logAuctionNewForge = crypto.Keccak256Hash([]byte(
"NewForge(address,uint128)"))
logAuctionHEZClaimed = crypto.Keccak256Hash([]byte(
"HEZClaimed(address,uint128)"))
logAuctionInitialize = crypto.Keccak256Hash([]byte(
"InitializeHermezAuctionProtocolEvent(address,address,string," +
"uint16,uint8,uint16,uint16,uint16[3])"))
) )
// AuctionEventInit returns the initialize event with its corresponding block number // AuctionEventInit returns the initialize event with its corresponding block number
@@ -821,8 +781,7 @@ func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, erro
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(logs) != 1 { if len(logs) != 1 {
return nil, 0, return nil, 0, tracerr.Wrap(fmt.Errorf("no event of type InitializeHermezAuctionProtocolEvent found"))
tracerr.Wrap(fmt.Errorf("no event of type InitializeHermezAuctionProtocolEvent found"))
} }
vLog := logs[0] vLog := logs[0]
if vLog.Topics[0] != logAuctionInitialize { if vLog.Topics[0] != logAuctionInitialize {
@@ -838,22 +797,15 @@ func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, erro
} }
// AuctionEventsByBlock returns the events in a block that happened in the // AuctionEventsByBlock returns the events in a block that happened in the
// Auction Smart Contract. // Auction Smart Contract and the blockHash where the eents happened. If there
// To query by blockNum, set blockNum >= 0 and blockHash == nil. // are no events in that block, blockHash is nil.
// To query by blockHash set blockHash != nil, and blockNum will be ignored. func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error) {
// If there are no events in that block the result is nil.
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*AuctionEvents, error) {
var auctionEvents AuctionEvents var auctionEvents AuctionEvents
var blockHash *ethCommon.Hash
var blockNumBigInt *big.Int
if blockHash == nil {
blockNumBigInt = big.NewInt(blockNum)
}
query := ethereum.FilterQuery{ query := ethereum.FilterQuery{
BlockHash: blockHash, FromBlock: big.NewInt(blockNum),
FromBlock: blockNumBigInt, ToBlock: big.NewInt(blockNum),
ToBlock: blockNumBigInt,
Addresses: []ethCommon.Address{ Addresses: []ethCommon.Address{
c.address, c.address,
}, },
@@ -862,17 +814,15 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
logs, err := c.client.client.FilterLogs(context.TODO(), query) logs, err := c.client.client.FilterLogs(context.TODO(), query)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
if len(logs) == 0 { if len(logs) > 0 {
return nil, nil blockHash = &logs[0].BlockHash
} }
for _, vLog := range logs { for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash { if vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
vLog.BlockHash.String()) return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
} }
switch vLog.Topics[0] { switch vLog.Topics[0] {
case logAuctionNewBid: case logAuctionNewBid:
@@ -882,9 +832,8 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
Address ethCommon.Address Address ethCommon.Address
} }
var newBid AuctionEventNewBid var newBid AuctionEventNewBid
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", vLog.Data); err != nil {
vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
newBid.BidAmount = auxNewBid.BidAmount newBid.BidAmount = auxNewBid.BidAmount
newBid.Slot = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64() newBid.Slot = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
@@ -892,90 +841,74 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
auctionEvents.NewBid = append(auctionEvents.NewBid, newBid) auctionEvents.NewBid = append(auctionEvents.NewBid, newBid)
case logAuctionNewSlotDeadline: case logAuctionNewSlotDeadline:
var newSlotDeadline AuctionEventNewSlotDeadline var newSlotDeadline AuctionEventNewSlotDeadline
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, "NewSlotDeadline", vLog.Data); err != nil {
"NewSlotDeadline", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline) auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline)
case logAuctionNewClosedAuctionSlots: case logAuctionNewClosedAuctionSlots:
var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, "NewClosedAuctionSlots", vLog.Data); err != nil {
"NewClosedAuctionSlots", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
auctionEvents.NewClosedAuctionSlots = auctionEvents.NewClosedAuctionSlots = append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
case logAuctionNewOutbidding: case logAuctionNewOutbidding:
var newOutbidding AuctionEventNewOutbidding var newOutbidding AuctionEventNewOutbidding
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", vLog.Data); err != nil {
vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding) auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding)
case logAuctionNewDonationAddress: case logAuctionNewDonationAddress:
var newDonationAddress AuctionEventNewDonationAddress var newDonationAddress AuctionEventNewDonationAddress
newDonationAddress.NewDonationAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) newDonationAddress.NewDonationAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.NewDonationAddress = append(auctionEvents.NewDonationAddress, auctionEvents.NewDonationAddress = append(auctionEvents.NewDonationAddress, newDonationAddress)
newDonationAddress)
case logAuctionNewBootCoordinator: case logAuctionNewBootCoordinator:
var newBootCoordinator AuctionEventNewBootCoordinator var newBootCoordinator AuctionEventNewBootCoordinator
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, "NewBootCoordinator", vLog.Data); err != nil {
"NewBootCoordinator", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, newBootCoordinator)
newBootCoordinator)
case logAuctionNewOpenAuctionSlots: case logAuctionNewOpenAuctionSlots:
var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, "NewOpenAuctionSlots", vLog.Data); err != nil {
"NewOpenAuctionSlots", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
auctionEvents.NewOpenAuctionSlots = auctionEvents.NewOpenAuctionSlots = append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
case logAuctionNewAllocationRatio: case logAuctionNewAllocationRatio:
var newAllocationRatio AuctionEventNewAllocationRatio var newAllocationRatio AuctionEventNewAllocationRatio
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, "NewAllocationRatio", vLog.Data); err != nil {
"NewAllocationRatio", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, newAllocationRatio)
newAllocationRatio)
case logAuctionSetCoordinator: case logAuctionSetCoordinator:
var setCoordinator AuctionEventSetCoordinator var setCoordinator AuctionEventSetCoordinator
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, "SetCoordinator", vLog.Data); err != nil {
"SetCoordinator", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
setCoordinator.ForgerAddress = ethCommon.BytesToAddress(vLog.Topics[2].Bytes()) setCoordinator.ForgerAddress = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
auctionEvents.SetCoordinator = append(auctionEvents.SetCoordinator, setCoordinator) auctionEvents.SetCoordinator = append(auctionEvents.SetCoordinator, setCoordinator)
case logAuctionNewForgeAllocated: case logAuctionNewForgeAllocated:
var newForgeAllocated AuctionEventNewForgeAllocated var newForgeAllocated AuctionEventNewForgeAllocated
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, "NewForgeAllocated", vLog.Data); err != nil {
"NewForgeAllocated", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes()) newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
newForgeAllocated.SlotToForge = new(big.Int).SetBytes(vLog.Topics[3][:]).Int64() newForgeAllocated.SlotToForge = new(big.Int).SetBytes(vLog.Topics[3][:]).Int64()
auctionEvents.NewForgeAllocated = append(auctionEvents.NewForgeAllocated, auctionEvents.NewForgeAllocated = append(auctionEvents.NewForgeAllocated, newForgeAllocated)
newForgeAllocated)
case logAuctionNewDefaultSlotSetBid: case logAuctionNewDefaultSlotSetBid:
var auxNewDefaultSlotSetBid struct { var auxNewDefaultSlotSetBid struct {
SlotSet *big.Int SlotSet *big.Int
NewInitialMinBid *big.Int NewInitialMinBid *big.Int
} }
var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, "NewDefaultSlotSetBid", vLog.Data); err != nil {
"NewDefaultSlotSetBid", vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid
newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64() newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64()
auctionEvents.NewDefaultSlotSetBid = auctionEvents.NewDefaultSlotSetBid = append(auctionEvents.NewDefaultSlotSetBid, newDefaultSlotSetBid)
append(auctionEvents.NewDefaultSlotSetBid, newDefaultSlotSetBid)
case logAuctionNewForge: case logAuctionNewForge:
var newForge AuctionEventNewForge var newForge AuctionEventNewForge
newForge.Forger = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) newForge.Forger = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
@@ -983,13 +916,12 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
auctionEvents.NewForge = append(auctionEvents.NewForge, newForge) auctionEvents.NewForge = append(auctionEvents.NewForge, newForge)
case logAuctionHEZClaimed: case logAuctionHEZClaimed:
var HEZClaimed AuctionEventHEZClaimed var HEZClaimed AuctionEventHEZClaimed
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", vLog.Data); err != nil {
vLog.Data); err != nil { return nil, nil, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
} }
HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.HEZClaimed = append(auctionEvents.HEZClaimed, HEZClaimed) auctionEvents.HEZClaimed = append(auctionEvents.HEZClaimed, HEZClaimed)
} }
} }
return &auctionEvents, nil return &auctionEvents, blockHash, nil
} }

View File

@@ -58,8 +58,7 @@ func TestAuctionConstants(t *testing.T) {
func TestAuctionVariables(t *testing.T) { func TestAuctionVariables(t *testing.T) {
INITMINBID := new(big.Int) INITMINBID := new(big.Int)
INITMINBID.SetString(minBidStr, 10) INITMINBID.SetString(minBidStr, 10)
defaultSlotSetBid := [6]*big.Int{INITMINBID, INITMINBID, INITMINBID, INITMINBID, INITMINBID, defaultSlotSetBid := [6]*big.Int{INITMINBID, INITMINBID, INITMINBID, INITMINBID, INITMINBID, INITMINBID}
INITMINBID}
auctionVariables, err := auctionClientTest.AuctionVariables() auctionVariables, err := auctionClientTest.AuctionVariables()
require.Nil(t, err) require.Nil(t, err)
@@ -89,7 +88,7 @@ func TestAuctionSetSlotDeadline(t *testing.T) {
assert.Equal(t, newSlotDeadline, slotDeadline) assert.Equal(t, newSlotDeadline, slotDeadline)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline) assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline)
} }
@@ -110,7 +109,7 @@ func TestAuctionSetOpenAuctionSlots(t *testing.T) {
assert.Equal(t, newOpenAuctionSlots, openAuctionSlots) assert.Equal(t, newOpenAuctionSlots, openAuctionSlots)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots) assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots)
} }
@@ -131,10 +130,9 @@ func TestAuctionSetClosedAuctionSlots(t *testing.T) {
assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots) assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newClosedAuctionSlots, assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots) _, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
require.Nil(t, err) require.Nil(t, err)
} }
@@ -155,7 +153,7 @@ func TestAuctionSetOutbidding(t *testing.T) {
assert.Equal(t, newOutbidding, outbidding) assert.Equal(t, newOutbidding, outbidding)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding) assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding)
_, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst) _, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst)
@@ -178,7 +176,7 @@ func TestAuctionSetAllocationRatio(t *testing.T) {
assert.Equal(t, newAllocationRatio, allocationRatio) assert.Equal(t, newAllocationRatio, allocationRatio)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio) assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio)
_, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst) _, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst)
@@ -207,7 +205,7 @@ func TestAuctionSetDonationAddress(t *testing.T) {
assert.Equal(t, &newDonationAddress, donationAddress) assert.Equal(t, &newDonationAddress, donationAddress)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress) assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress)
_, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst) _, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst)
@@ -226,12 +224,11 @@ func TestAuctionSetBootCoordinator(t *testing.T) {
assert.Equal(t, &newBootCoordinator, bootCoordinator) assert.Equal(t, &newBootCoordinator, bootCoordinator)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator) assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL) assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL)
_, err = auctionClientTest.AuctionSetBootCoordinator(bootCoordinatorAddressConst, _, err = auctionClientTest.AuctionSetBootCoordinator(bootCoordinatorAddressConst, bootCoordinatorURL)
bootCoordinatorURL)
require.Nil(t, err) require.Nil(t, err)
} }
@@ -264,7 +261,7 @@ func TestAuctionChangeDefaultSlotSetBid(t *testing.T) {
assert.Equal(t, minBid, newInitialMinBid) assert.Equal(t, minBid, newInitialMinBid)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet) assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet)
assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid) assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid)
@@ -290,7 +287,7 @@ func TestAuctionRegisterCoordinator(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress) assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress)
assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress) assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress)
@@ -309,7 +306,7 @@ func TestAuctionBid(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount) assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount)
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder) assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
@@ -345,12 +342,11 @@ func TestAuctionMultiBid(t *testing.T) {
budget := new(big.Int) budget := new(big.Int)
budget.SetString("45200000000000000000", 10) budget.SetString("45200000000000000000", 10)
bidderAddress := governanceAddressConst bidderAddress := governanceAddressConst
_, err = auctionClientTest.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, _, err = auctionClientTest.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, maxBid, minBid, deadline)
maxBid, minBid, deadline)
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder) assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot) assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot)
@@ -380,15 +376,14 @@ func TestAuctionClaimHEZ(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock() currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil) auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount) assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount)
assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner) assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner)
} }
func TestAuctionForge(t *testing.T) { func TestAuctionForge(t *testing.T) {
auctionClientTestHermez, err := NewAuctionClient(ethereumClientHermez, auctionClientTestHermez, err := NewAuctionClient(ethereumClientHermez, auctionTestAddressConst, tokenHEZ)
auctionTestAddressConst, tokenHEZ)
require.Nil(t, err) require.Nil(t, err)
slotConst := 4 slotConst := 4
blockNum := int64(int(blocksPerSlot)*slotConst + int(genesisBlock)) blockNum := int64(int(blocksPerSlot)*slotConst + int(genesisBlock))

View File

@@ -64,19 +64,16 @@ type ClientConfig struct {
} }
// NewClient creates a new Client to interact with Ethereum and the Hermez smart contracts. // NewClient creates a new Client to interact with Ethereum and the Hermez smart contracts.
func NewClient(client *ethclient.Client, account *accounts.Account, ks *ethKeystore.KeyStore, func NewClient(client *ethclient.Client, account *accounts.Account, ks *ethKeystore.KeyStore, cfg *ClientConfig) (*Client, error) {
cfg *ClientConfig) (*Client, error) {
ethereumClient, err := NewEthereumClient(client, account, ks, &cfg.Ethereum) ethereumClient, err := NewEthereumClient(client, account, ks, &cfg.Ethereum)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
auctionClient, err := NewAuctionClient(ethereumClient, cfg.Auction.Address, auctionClient, err := NewAuctionClient(ethereumClient, cfg.Auction.Address, cfg.Auction.TokenHEZ)
cfg.Auction.TokenHEZ)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
rollupClient, err := NewRollupClient(ethereumClient, cfg.Rollup.Address, rollupClient, err := NewRollupClient(ethereumClient, cfg.Rollup.Address, cfg.Auction.TokenHEZ)
cfg.Auction.TokenHEZ)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -64,8 +64,7 @@ type EthereumConfig struct {
GasPriceDiv uint64 GasPriceDiv uint64
} }
// EthereumClient is an ethereum client to call Smart Contract methods and check blockchain // EthereumClient is an ethereum client to call Smart Contract methods and check blockchain information.
// information.
type EthereumClient struct { type EthereumClient struct {
client *ethclient.Client client *ethclient.Client
chainID *big.Int chainID *big.Int
@@ -77,8 +76,7 @@ type EthereumClient struct {
// NewEthereumClient creates a EthereumClient instance. The account is not mandatory (it can // NewEthereumClient creates a EthereumClient instance. The account is not mandatory (it can
// be nil). If the account is nil, CallAuth will fail with ErrAccountNil. // be nil). If the account is nil, CallAuth will fail with ErrAccountNil.
func NewEthereumClient(client *ethclient.Client, account *accounts.Account, func NewEthereumClient(client *ethclient.Client, account *accounts.Account, ks *ethKeystore.KeyStore, config *EthereumConfig) (*EthereumClient, error) {
ks *ethKeystore.KeyStore, config *EthereumConfig) (*EthereumClient, error) {
if config == nil { if config == nil {
config = &EthereumConfig{ config = &EthereumConfig{
CallGasLimit: defaultCallGasLimit, CallGasLimit: defaultCallGasLimit,
@@ -168,8 +166,7 @@ func (c *EthereumClient) NewAuth() (*bind.TransactOpts, error) {
// This call requires a valid account with Ether that can be spend during the // This call requires a valid account with Ether that can be spend during the
// call. // call.
func (c *EthereumClient) CallAuth(gasLimit uint64, func (c *EthereumClient) CallAuth(gasLimit uint64,
fn func(*ethclient.Client, *bind.TransactOpts) (*types.Transaction, error)) (*types.Transaction, fn func(*ethclient.Client, *bind.TransactOpts) (*types.Transaction, error)) (*types.Transaction, error) {
error) {
if c.account == nil { if c.account == nil {
return nil, tracerr.Wrap(ErrAccountNil) return nil, tracerr.Wrap(ErrAccountNil)
} }
@@ -215,8 +212,7 @@ func (c *EthereumClient) Call(fn func(*ethclient.Client) error) error {
} }
// EthTransactionReceipt returns the transaction receipt of the given txHash // EthTransactionReceipt returns the transaction receipt of the given txHash
func (c *EthereumClient) EthTransactionReceipt(ctx context.Context, func (c *EthereumClient) EthTransactionReceipt(ctx context.Context, txHash ethCommon.Hash) (*types.Receipt, error) {
txHash ethCommon.Hash) (*types.Receipt, error) {
return c.client.TransactionReceipt(ctx, txHash) return c.client.TransactionReceipt(ctx, txHash)
} }
@@ -232,15 +228,13 @@ func (c *EthereumClient) EthLastBlock() (int64, error) {
} }
// EthHeaderByNumber internally calls ethclient.Client HeaderByNumber // EthHeaderByNumber internally calls ethclient.Client HeaderByNumber
// func (c *EthereumClient) EthHeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, // func (c *EthereumClient) EthHeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
// error) {
// return c.client.HeaderByNumber(ctx, number) // return c.client.HeaderByNumber(ctx, number)
// } // }
// EthBlockByNumber internally calls ethclient.Client BlockByNumber and returns // EthBlockByNumber internally calls ethclient.Client BlockByNumber and returns
// *common.Block. If number == -1, the latests known block is returned. // *common.Block. If number == -1, the latests known block is returned.
func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*common.Block, func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*common.Block, error) {
error) {
blockNum := big.NewInt(number) blockNum := big.NewInt(number)
if number == -1 { if number == -1 {
blockNum = nil blockNum = nil
@@ -330,6 +324,5 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction,
Value: tx.Value(), Value: tx.Value(),
Data: tx.Data(), Data: tx.Data(),
} }
result, err := c.client.CallContract(ctx, msg, blockNum) return c.client.CallContract(ctx, msg, blockNum)
return result, tracerr.Wrap(err)
} }

View File

@@ -14,8 +14,7 @@ import (
func addBlock(url string) { func addBlock(url string) {
method := "POST" method := "POST"
payload := strings.NewReader( payload := strings.NewReader("{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_mine\",\n \"params\":[],\n \"id\":1\n}")
"{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_mine\",\n \"params\":[],\n \"id\":1\n}")
client := &http.Client{} client := &http.Client{}
req, err := http.NewRequest(method, url, payload) req, err := http.NewRequest(method, url, payload)
@@ -46,9 +45,7 @@ func addTime(seconds float64, url string) {
secondsStr := strconv.FormatFloat(seconds, 'E', -1, 32) secondsStr := strconv.FormatFloat(seconds, 'E', -1, 32)
method := "POST" method := "POST"
payload := strings.NewReader( payload := strings.NewReader("{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_increaseTime\",\n \"params\":[" + secondsStr + "],\n \"id\":1\n}")
"{\n \"jsonrpc\":\"2.0\",\n \"method\":\"evm_increaseTime\",\n \"params\":[" +
secondsStr + "],\n \"id\":1\n}")
client := &http.Client{} client := &http.Client{}
req, err := http.NewRequest(method, url, payload) req, err := http.NewRequest(method, url, payload)
@@ -69,16 +66,13 @@ func addTime(seconds float64, url string) {
}() }()
} }
func createPermitDigest(tokenAddr, owner, spender ethCommon.Address, chainID, value, nonce, func createPermitDigest(tokenAddr, owner, spender ethCommon.Address, chainID, value, nonce, deadline *big.Int, tokenName string) ([]byte, error) {
deadline *big.Int, tokenName string) ([]byte, error) {
// NOTE: We ignore hash.Write errors because we are writing to a memory // NOTE: We ignore hash.Write errors because we are writing to a memory
// buffer and don't expect any errors to occur. // buffer and don't expect any errors to occur.
abiPermit := abiPermit := []byte("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)")
[]byte("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)")
hashPermit := sha3.NewLegacyKeccak256() hashPermit := sha3.NewLegacyKeccak256()
hashPermit.Write(abiPermit) //nolint:errcheck,gosec hashPermit.Write(abiPermit) //nolint:errcheck,gosec
abiEIP712Domain := abiEIP712Domain := []byte("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)")
[]byte("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)")
hashEIP712Domain := sha3.NewLegacyKeccak256() hashEIP712Domain := sha3.NewLegacyKeccak256()
hashEIP712Domain.Write(abiEIP712Domain) //nolint:errcheck,gosec hashEIP712Domain.Write(abiEIP712Domain) //nolint:errcheck,gosec
var encodeBytes []byte var encodeBytes []byte
@@ -130,8 +124,7 @@ func createPermitDigest(tokenAddr, owner, spender ethCommon.Address, chainID, va
return hashBytes2.Sum(nil), nil return hashBytes2.Sum(nil), nil
} }
func createPermit(owner, spender ethCommon.Address, amount, deadline *big.Int, digest, func createPermit(owner, spender ethCommon.Address, amount, deadline *big.Int, digest, signature []byte) []byte {
signature []byte) []byte {
r := signature[0:32] r := signature[0:32]
s := signature[32:64] s := signature[32:64]
v := signature[64] + byte(27) //nolint:gomnd v := signature[64] + byte(27) //nolint:gomnd

View File

@@ -26,8 +26,7 @@ var (
mnemonic = "explain tackle mirror kit van hammer degree position ginger unfair soup bonus" mnemonic = "explain tackle mirror kit van hammer degree position ginger unfair soup bonus"
) )
func genAcc(w *hdwallet.Wallet, ks *keystore.KeyStore, i int) (*accounts.Account, func genAcc(w *hdwallet.Wallet, ks *keystore.KeyStore, i int) (*accounts.Account, ethCommon.Address) {
ethCommon.Address) {
path := hdwallet.MustParseDerivationPath(fmt.Sprintf("m/44'/60'/0'/0/%d", i)) path := hdwallet.MustParseDerivationPath(fmt.Sprintf("m/44'/60'/0'/0/%d", i))
account, err := w.Derive(path, false) account, err := w.Derive(path, false)
if err != nil { if err != nil {
@@ -112,9 +111,7 @@ func getEnvVariables() {
if err != nil { if err != nil {
log.Fatal(errEnvVar) log.Fatal(errEnvVar)
} }
if auctionAddressStr == "" || auctionTestAddressStr == "" || tokenHEZAddressStr == "" || if auctionAddressStr == "" || auctionTestAddressStr == "" || tokenHEZAddressStr == "" || hermezRollupAddressStr == "" || wdelayerAddressStr == "" || wdelayerTestAddressStr == "" || genesisBlockEnv == "" {
hermezRollupAddressStr == "" || wdelayerAddressStr == "" || wdelayerTestAddressStr == "" ||
genesisBlockEnv == "" {
log.Fatal(errEnvVar) log.Fatal(errEnvVar)
} }
@@ -192,8 +189,7 @@ func TestMain(m *testing.M) {
log.Fatal(err) log.Fatal(err)
} }
ethereumClientEmergencyCouncil, err = NewEthereumClient(ethClient, ethereumClientEmergencyCouncil, err = NewEthereumClient(ethClient, emergencyCouncilAccount, ks, nil)
emergencyCouncilAccount, ks, nil)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }

View File

@@ -243,20 +243,13 @@ type RollupInterface interface {
// Public Functions // Public Functions
RollupForgeBatch(*RollupForgeBatchArgs, *bind.TransactOpts) (*types.Transaction, error) RollupForgeBatch(*RollupForgeBatchArgs, *bind.TransactOpts) (*types.Transaction, error)
RollupAddToken(tokenAddress ethCommon.Address, feeAddToken, RollupAddToken(tokenAddress ethCommon.Address, feeAddToken, deadline *big.Int) (*types.Transaction, error)
deadline *big.Int) (*types.Transaction, error)
RollupWithdrawMerkleProof(babyPubKey babyjub.PublicKeyComp, tokenID uint32, numExitRoot, RollupWithdrawMerkleProof(babyPubKey babyjub.PublicKeyComp, tokenID uint32, numExitRoot, idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (*types.Transaction, error)
idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (*types.Transaction, RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error)
error)
RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32,
numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error)
RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (*types.Transaction, error)
amount *big.Int, tokenID uint32, toIdx int64) (*types.Transaction, error) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64, deadline *big.Int) (tx *types.Transaction, err error)
RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64,
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64,
deadline *big.Int) (tx *types.Transaction, err error)
// Governance Public Functions // Governance Public Functions
RollupUpdateForgeL1L2BatchTimeout(newForgeL1L2BatchTimeout int64) (*types.Transaction, error) RollupUpdateForgeL1L2BatchTimeout(newForgeL1L2BatchTimeout int64) (*types.Transaction, error)
@@ -271,7 +264,7 @@ type RollupInterface interface {
// //
RollupConstants() (*common.RollupConstants, error) RollupConstants() (*common.RollupConstants, error)
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error)
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
RollupEventInit() (*RollupEventInitialize, int64, error) RollupEventInit() (*RollupEventInitialize, int64, error)
} }
@@ -294,8 +287,7 @@ type RollupClient struct {
} }
// NewRollupClient creates a new RollupClient // NewRollupClient creates a new RollupClient
func NewRollupClient(client *EthereumClient, address ethCommon.Address, func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZCfg TokenConfig) (*RollupClient, error) {
tokenHEZCfg TokenConfig) (*RollupClient, error) {
contractAbi, err := abi.JSON(strings.NewReader(string(Hermez.HermezABI))) contractAbi, err := abi.JSON(strings.NewReader(string(Hermez.HermezABI)))
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -324,19 +316,18 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address,
} }
consts, err := c.RollupConstants() consts, err := c.RollupConstants()
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err)) return nil, tracerr.Wrap(err)
} }
c.consts = consts c.consts = consts
return c, nil return c, nil
} }
// RollupForgeBatch is the interface to call the smart contract function // RollupForgeBatch is the interface to call the smart contract function
func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.TransactOpts) (tx *types.Transaction, err error) {
auth *bind.TransactOpts) (tx *types.Transaction, err error) {
if auth == nil { if auth == nil {
auth, err = c.client.NewAuth() auth, err = c.client.NewAuth()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, err
} }
auth.GasLimit = 1000000 auth.GasLimit = 1000000
} }
@@ -402,7 +393,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs,
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch, l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
args.ProofA, args.ProofB, args.ProofC) args.ProofA, args.ProofB, args.ProofC)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Hermez.ForgeBatch: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed Hermez.ForgeBatch: %w", err))
} }
return tx, nil return tx, nil
} }
@@ -410,8 +401,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs,
// RollupAddToken is the interface to call the smart contract function. // RollupAddToken is the interface to call the smart contract function.
// `feeAddToken` is the amount of HEZ tokens that will be paid to add the // `feeAddToken` is the amount of HEZ tokens that will be paid to add the
// token. `feeAddToken` must match the public value of the smart contract. // token. `feeAddToken` must match the public value of the smart contract.
func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToken, func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToken, deadline *big.Int) (tx *types.Transaction, err error) {
deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -423,11 +413,9 @@ func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToke
} }
tokenName := c.tokenHEZCfg.Name tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, feeAddToken, nonce, deadline, tokenName)
feeAddToken, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest) signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, feeAddToken, deadline, digest, permit := createPermit(owner, spender, feeAddToken, deadline, digest, signature)
signature)
return c.hermez.AddToken(auth, tokenAddress, permit) return c.hermez.AddToken(auth, tokenAddress, permit)
}, },
@@ -438,9 +426,7 @@ func (c *RollupClient) RollupAddToken(tokenAddress ethCommon.Address, feeAddToke
} }
// RollupWithdrawMerkleProof is the interface to call the smart contract function // RollupWithdrawMerkleProof is the interface to call the smart contract function
func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp, tokenID uint32, func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp, tokenID uint32, numExitRoot, idx int64, amount *big.Int, siblings []*big.Int, instantWithdraw bool) (tx *types.Transaction, err error) {
numExitRoot, idx int64, amount *big.Int, siblings []*big.Int,
instantWithdraw bool) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -448,8 +434,7 @@ func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp,
babyPubKey := new(big.Int).SetBytes(pkCompB) babyPubKey := new(big.Int).SetBytes(pkCompB)
numExitRootB := uint32(numExitRoot) numExitRootB := uint32(numExitRoot)
idxBig := big.NewInt(idx) idxBig := big.NewInt(idx)
return c.hermez.WithdrawMerkleProof(auth, tokenID, amount, babyPubKey, return c.hermez.WithdrawMerkleProof(auth, tokenID, amount, babyPubKey, numExitRootB, siblings, idxBig, instantWithdraw)
numExitRootB, siblings, idxBig, instantWithdraw)
}, },
); err != nil { ); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed update WithdrawMerkleProof: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed update WithdrawMerkleProof: %w", err))
@@ -458,17 +443,13 @@ func (c *RollupClient) RollupWithdrawMerkleProof(fromBJJ babyjub.PublicKeyComp,
} }
// RollupWithdrawCircuit is the interface to call the smart contract function // RollupWithdrawCircuit is the interface to call the smart contract function
func (c *RollupClient) RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, func (c *RollupClient) RollupWithdrawCircuit(proofA, proofC [2]*big.Int, proofB [2][2]*big.Int, tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction, error) {
tokenID uint32, numExitRoot, idx int64, amount *big.Int, instantWithdraw bool) (*types.Transaction,
error) {
log.Error("TODO") log.Error("TODO")
return nil, tracerr.Wrap(errTODO) return nil, tracerr.Wrap(errTODO)
} }
// RollupL1UserTxERC20ETH is the interface to call the smart contract function // RollupL1UserTxERC20ETH is the interface to call the smart contract function
func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (tx *types.Transaction, err error) {
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64) (tx *types.Transaction,
err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -481,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
} }
fromIdxBig := big.NewInt(fromIdx) fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx) toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat40(depositAmount) depositAmountF, err := common.NewFloat16(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountF, err := common.NewFloat40(amount) amountF, err := common.NewFloat16(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -503,9 +484,7 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
} }
// RollupL1UserTxERC20Permit is the interface to call the smart contract function // RollupL1UserTxERC20Permit is the interface to call the smart contract function
func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64, func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp, fromIdx int64, depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64, deadline *big.Int) (tx *types.Transaction, err error) {
depositAmount *big.Int, amount *big.Int, tokenID uint32, toIdx int64,
deadline *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -518,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
} }
fromIdxBig := big.NewInt(fromIdx) fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx) toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat40(depositAmount) depositAmountF, err := common.NewFloat16(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountF, err := common.NewFloat40(amount) amountF, err := common.NewFloat16(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -537,12 +516,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
} }
tokenName := c.tokenHEZCfg.Name tokenName := c.tokenHEZCfg.Name
tokenAddr := c.tokenHEZCfg.Address tokenAddr := c.tokenHEZCfg.Address
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
amount, nonce, deadline, tokenName)
signature, _ := c.client.ks.SignHash(*c.client.account, digest) signature, _ := c.client.ks.SignHash(*c.client.account, digest)
permit := createPermit(owner, spender, amount, deadline, digest, signature) permit := createPermit(owner, spender, amount, deadline, digest, signature)
return c.hermez.AddL1Transaction(auth, babyPubKey, fromIdxBig, return c.hermez.AddL1Transaction(auth, babyPubKey, fromIdxBig, uint16(depositAmountF),
uint16(depositAmountF), uint16(amountF), tokenID, toIdxBig, permit) uint16(amountF), tokenID, toIdxBig, permit)
}, },
); err != nil { ); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed add L1 Tx ERC20Permit: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed add L1 Tx ERC20Permit: %w", err))
@@ -574,13 +552,11 @@ func (c *RollupClient) RollupLastForgedBatch() (lastForgedBatch int64, err error
} }
// RollupUpdateForgeL1L2BatchTimeout is the interface to call the smart contract function // RollupUpdateForgeL1L2BatchTimeout is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateForgeL1L2BatchTimeout( func (c *RollupClient) RollupUpdateForgeL1L2BatchTimeout(newForgeL1L2BatchTimeout int64) (tx *types.Transaction, err error) {
newForgeL1L2BatchTimeout int64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
return c.hermez.UpdateForgeL1L2BatchTimeout(auth, return c.hermez.UpdateForgeL1L2BatchTimeout(auth, uint8(newForgeL1L2BatchTimeout))
uint8(newForgeL1L2BatchTimeout))
}, },
); err != nil { ); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed update ForgeL1L2BatchTimeout: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed update ForgeL1L2BatchTimeout: %w", err))
@@ -589,8 +565,7 @@ func (c *RollupClient) RollupUpdateForgeL1L2BatchTimeout(
} }
// RollupUpdateFeeAddToken is the interface to call the smart contract function // RollupUpdateFeeAddToken is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Transaction, func (c *RollupClient) RollupUpdateFeeAddToken(newFeeAddToken *big.Int) (tx *types.Transaction, err error) {
err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -625,8 +600,7 @@ func (c *RollupClient) RollupUpdateBucketsParameters(
} }
// RollupUpdateTokenExchange is the interface to call the smart contract function // RollupUpdateTokenExchange is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateTokenExchange(addressArray []ethCommon.Address, func (c *RollupClient) RollupUpdateTokenExchange(addressArray []ethCommon.Address, valueArray []uint64) (tx *types.Transaction, err error) {
valueArray []uint64) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -639,8 +613,7 @@ func (c *RollupClient) RollupUpdateTokenExchange(addressArray []ethCommon.Addres
} }
// RollupUpdateWithdrawalDelay is the interface to call the smart contract function // RollupUpdateWithdrawalDelay is the interface to call the smart contract function
func (c *RollupClient) RollupUpdateWithdrawalDelay(newWithdrawalDelay int64) (tx *types.Transaction, func (c *RollupClient) RollupUpdateWithdrawalDelay(newWithdrawalDelay int64) (tx *types.Transaction, err error) {
err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -666,8 +639,7 @@ func (c *RollupClient) RollupSafeMode() (tx *types.Transaction, err error) {
} }
// RollupInstantWithdrawalViewer is the interface to call the smart contract function // RollupInstantWithdrawalViewer is the interface to call the smart contract function
func (c *RollupClient) RollupInstantWithdrawalViewer(tokenAddress ethCommon.Address, func (c *RollupClient) RollupInstantWithdrawalViewer(tokenAddress ethCommon.Address, amount *big.Int) (instantAllowed bool, err error) {
amount *big.Int) (instantAllowed bool, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
instantAllowed, err = c.hermez.InstantWithdrawalViewer(c.opts, tokenAddress, amount) instantAllowed, err = c.hermez.InstantWithdrawalViewer(c.opts, tokenAddress, amount)
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -702,8 +674,7 @@ func (c *RollupClient) RollupConstants() (rollupConstants *common.RollupConstant
} }
newRollupVerifier.MaxTx = rollupVerifier.MaxTx.Int64() newRollupVerifier.MaxTx = rollupVerifier.MaxTx.Int64()
newRollupVerifier.NLevels = rollupVerifier.NLevels.Int64() newRollupVerifier.NLevels = rollupVerifier.NLevels.Int64()
rollupConstants.Verifiers = append(rollupConstants.Verifiers, rollupConstants.Verifiers = append(rollupConstants.Verifiers, newRollupVerifier)
newRollupVerifier)
} }
rollupConstants.HermezAuctionContract, err = c.hermez.HermezAuctionContract(c.opts) rollupConstants.HermezAuctionContract, err = c.hermez.HermezAuctionContract(c.opts)
if err != nil { if err != nil {
@@ -722,30 +693,19 @@ func (c *RollupClient) RollupConstants() (rollupConstants *common.RollupConstant
} }
var ( var (
logHermezL1UserTxEvent = crypto.Keccak256Hash([]byte( logHermezL1UserTxEvent = crypto.Keccak256Hash([]byte("L1UserTxEvent(uint32,uint8,bytes)"))
"L1UserTxEvent(uint32,uint8,bytes)")) logHermezAddToken = crypto.Keccak256Hash([]byte("AddToken(address,uint32)"))
logHermezAddToken = crypto.Keccak256Hash([]byte( logHermezForgeBatch = crypto.Keccak256Hash([]byte("ForgeBatch(uint32,uint16)"))
"AddToken(address,uint32)")) logHermezUpdateForgeL1L2BatchTimeout = crypto.Keccak256Hash([]byte("UpdateForgeL1L2BatchTimeout(uint8)"))
logHermezForgeBatch = crypto.Keccak256Hash([]byte( logHermezUpdateFeeAddToken = crypto.Keccak256Hash([]byte("UpdateFeeAddToken(uint256)"))
"ForgeBatch(uint32,uint16)")) logHermezWithdrawEvent = crypto.Keccak256Hash([]byte("WithdrawEvent(uint48,uint32,bool)"))
logHermezUpdateForgeL1L2BatchTimeout = crypto.Keccak256Hash([]byte( logHermezUpdateBucketWithdraw = crypto.Keccak256Hash([]byte("UpdateBucketWithdraw(uint8,uint256,uint256)"))
"UpdateForgeL1L2BatchTimeout(uint8)")) logHermezUpdateWithdrawalDelay = crypto.Keccak256Hash([]byte("UpdateWithdrawalDelay(uint64)"))
logHermezUpdateFeeAddToken = crypto.Keccak256Hash([]byte( logHermezUpdateBucketsParameters = crypto.Keccak256Hash([]byte("UpdateBucketsParameters(uint256[4][" +
"UpdateFeeAddToken(uint256)")) strconv.Itoa(common.RollupConstNumBuckets) + "])"))
logHermezWithdrawEvent = crypto.Keccak256Hash([]byte( logHermezUpdateTokenExchange = crypto.Keccak256Hash([]byte("UpdateTokenExchange(address[],uint64[])"))
"WithdrawEvent(uint48,uint32,bool)")) logHermezSafeMode = crypto.Keccak256Hash([]byte("SafeMode()"))
logHermezUpdateBucketWithdraw = crypto.Keccak256Hash([]byte( logHermezInitialize = crypto.Keccak256Hash([]byte("InitializeHermezEvent(uint8,uint256,uint64)"))
"UpdateBucketWithdraw(uint8,uint256,uint256)"))
logHermezUpdateWithdrawalDelay = crypto.Keccak256Hash([]byte(
"UpdateWithdrawalDelay(uint64)"))
logHermezUpdateBucketsParameters = crypto.Keccak256Hash([]byte(
"UpdateBucketsParameters(uint256[4][" + strconv.Itoa(common.RollupConstNumBuckets) + "])"))
logHermezUpdateTokenExchange = crypto.Keccak256Hash([]byte(
"UpdateTokenExchange(address[],uint64[])"))
logHermezSafeMode = crypto.Keccak256Hash([]byte(
"SafeMode()"))
logHermezInitialize = crypto.Keccak256Hash([]byte(
"InitializeHermezEvent(uint8,uint256,uint64)"))
) )
// RollupEventInit returns the initialize event with its corresponding block number // RollupEventInit returns the initialize event with its corresponding block number
@@ -769,47 +729,37 @@ func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error)
} }
var rollupInit RollupEventInitialize var rollupInit RollupEventInitialize
if err := c.contractAbi.UnpackIntoInterface(&rollupInit, "InitializeHermezEvent", if err := c.contractAbi.UnpackIntoInterface(&rollupInit, "InitializeHermezEvent", vLog.Data); err != nil {
vLog.Data); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err) return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err)
} }
// RollupEventsByBlock returns the events in a block that happened in the // RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
// Rollup Smart Contract. func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error) {
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
// If there are no events in that block the result is nil.
func (c *RollupClient) RollupEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*RollupEvents, error) {
var rollupEvents RollupEvents var rollupEvents RollupEvents
var blockHash *ethCommon.Hash
var blockNumBigInt *big.Int
if blockHash == nil {
blockNumBigInt = big.NewInt(blockNum)
}
query := ethereum.FilterQuery{ query := ethereum.FilterQuery{
BlockHash: blockHash, FromBlock: big.NewInt(blockNum),
FromBlock: blockNumBigInt, ToBlock: big.NewInt(blockNum),
ToBlock: blockNumBigInt,
Addresses: []ethCommon.Address{ Addresses: []ethCommon.Address{
c.address, c.address,
}, },
Topics: [][]ethCommon.Hash{}, BlockHash: nil,
Topics: [][]ethCommon.Hash{},
} }
logs, err := c.client.client.FilterLogs(context.Background(), query) logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
if len(logs) == 0 { if len(logs) > 0 {
return nil, nil blockHash = &logs[0].BlockHash
} }
for _, vLog := range logs { for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash { if vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String()) log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent) return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
} }
switch vLog.Topics[0] { switch vLog.Topics[0] {
case logHermezL1UserTxEvent: case logHermezL1UserTxEvent:
@@ -817,11 +767,11 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var L1UserTx RollupEventL1UserTx var L1UserTx RollupEventL1UserTx
err := c.contractAbi.UnpackIntoInterface(&L1UserTxAux, "L1UserTxEvent", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&L1UserTxAux, "L1UserTxEvent", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
L1Tx, err := common.L1UserTxFromBytes(L1UserTxAux.L1UserTx) L1Tx, err := common.L1UserTxFromBytes(L1UserTxAux.L1UserTx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
toForgeL1TxsNum := new(big.Int).SetBytes(vLog.Topics[1][:]).Int64() toForgeL1TxsNum := new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
L1Tx.ToForgeL1TxsNum = &toForgeL1TxsNum L1Tx.ToForgeL1TxsNum = &toForgeL1TxsNum
@@ -833,7 +783,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var addToken RollupEventAddToken var addToken RollupEventAddToken
err := c.contractAbi.UnpackIntoInterface(&addToken, "AddToken", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&addToken, "AddToken", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
addToken.TokenAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) addToken.TokenAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
rollupEvents.AddToken = append(rollupEvents.AddToken, addToken) rollupEvents.AddToken = append(rollupEvents.AddToken, addToken)
@@ -841,7 +791,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var forgeBatch RollupEventForgeBatch var forgeBatch RollupEventForgeBatch
err := c.contractAbi.UnpackIntoInterface(&forgeBatch, "ForgeBatch", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&forgeBatch, "ForgeBatch", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
forgeBatch.BatchNum = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64() forgeBatch.BatchNum = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
forgeBatch.EthTxHash = vLog.TxHash forgeBatch.EthTxHash = vLog.TxHash
@@ -851,10 +801,9 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var updateForgeL1L2BatchTimeout struct { var updateForgeL1L2BatchTimeout struct {
NewForgeL1L2BatchTimeout uint8 NewForgeL1L2BatchTimeout uint8
} }
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, "UpdateForgeL1L2BatchTimeout", vLog.Data)
"UpdateForgeL1L2BatchTimeout", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
rollupEvents.UpdateForgeL1L2BatchTimeout = append(rollupEvents.UpdateForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout = append(rollupEvents.UpdateForgeL1L2BatchTimeout,
RollupEventUpdateForgeL1L2BatchTimeout{ RollupEventUpdateForgeL1L2BatchTimeout{
@@ -864,7 +813,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var updateFeeAddToken RollupEventUpdateFeeAddToken var updateFeeAddToken RollupEventUpdateFeeAddToken
err := c.contractAbi.UnpackIntoInterface(&updateFeeAddToken, "UpdateFeeAddToken", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&updateFeeAddToken, "UpdateFeeAddToken", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
rollupEvents.UpdateFeeAddToken = append(rollupEvents.UpdateFeeAddToken, updateFeeAddToken) rollupEvents.UpdateFeeAddToken = append(rollupEvents.UpdateFeeAddToken, updateFeeAddToken)
case logHermezWithdrawEvent: case logHermezWithdrawEvent:
@@ -880,31 +829,28 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
case logHermezUpdateBucketWithdraw: case logHermezUpdateBucketWithdraw:
var updateBucketWithdrawAux rollupEventUpdateBucketWithdrawAux var updateBucketWithdrawAux rollupEventUpdateBucketWithdrawAux
var updateBucketWithdraw RollupEventUpdateBucketWithdraw var updateBucketWithdraw RollupEventUpdateBucketWithdraw
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, "UpdateBucketWithdraw", vLog.Data)
"UpdateBucketWithdraw", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals
updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()) updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64())
updateBucketWithdraw.BlockStamp = new(big.Int).SetBytes(vLog.Topics[2][:]).Int64() updateBucketWithdraw.BlockStamp = new(big.Int).SetBytes(vLog.Topics[2][:]).Int64()
rollupEvents.UpdateBucketWithdraw = rollupEvents.UpdateBucketWithdraw = append(rollupEvents.UpdateBucketWithdraw, updateBucketWithdraw)
append(rollupEvents.UpdateBucketWithdraw, updateBucketWithdraw)
case logHermezUpdateWithdrawalDelay: case logHermezUpdateWithdrawalDelay:
var withdrawalDelay RollupEventUpdateWithdrawalDelay var withdrawalDelay RollupEventUpdateWithdrawalDelay
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "UpdateWithdrawalDelay", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "UpdateWithdrawalDelay", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
rollupEvents.UpdateWithdrawalDelay = append(rollupEvents.UpdateWithdrawalDelay, withdrawalDelay) rollupEvents.UpdateWithdrawalDelay = append(rollupEvents.UpdateWithdrawalDelay, withdrawalDelay)
case logHermezUpdateBucketsParameters: case logHermezUpdateBucketsParameters:
var bucketsParametersAux rollupEventUpdateBucketsParametersAux var bucketsParametersAux rollupEventUpdateBucketsParametersAux
var bucketsParameters RollupEventUpdateBucketsParameters var bucketsParameters RollupEventUpdateBucketsParameters
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, "UpdateBucketsParameters", vLog.Data)
"UpdateBucketsParameters", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
for i, bucket := range bucketsParametersAux.ArrayBuckets { for i, bucket := range bucketsParametersAux.ArrayBuckets {
bucketsParameters.ArrayBuckets[i].CeilUSD = bucket[0] bucketsParameters.ArrayBuckets[i].CeilUSD = bucket[0]
@@ -912,13 +858,12 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
bucketsParameters.ArrayBuckets[i].BlockWithdrawalRate = bucket[2] bucketsParameters.ArrayBuckets[i].BlockWithdrawalRate = bucket[2]
bucketsParameters.ArrayBuckets[i].MaxWithdrawals = bucket[3] bucketsParameters.ArrayBuckets[i].MaxWithdrawals = bucket[3]
} }
rollupEvents.UpdateBucketsParameters = rollupEvents.UpdateBucketsParameters = append(rollupEvents.UpdateBucketsParameters, bucketsParameters)
append(rollupEvents.UpdateBucketsParameters, bucketsParameters)
case logHermezUpdateTokenExchange: case logHermezUpdateTokenExchange:
var tokensExchange RollupEventUpdateTokenExchange var tokensExchange RollupEventUpdateTokenExchange
err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
rollupEvents.UpdateTokenExchange = append(rollupEvents.UpdateTokenExchange, tokensExchange) rollupEvents.UpdateTokenExchange = append(rollupEvents.UpdateTokenExchange, tokensExchange)
case logHermezSafeMode: case logHermezSafeMode:
@@ -940,16 +885,15 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
bucketsParameters) bucketsParameters)
} }
} }
return &rollupEvents, nil return &rollupEvents, blockHash, nil
} }
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the // RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the
// Rollup Smart Contract in the given transaction, and the sender address. // Rollup Smart Contract in the given transaction, and the sender address.
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash) tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("TransactionByHash: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
txData := tx.Data() txData := tx.Data()
@@ -961,8 +905,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
sender, err := c.client.client.TransactionSender(context.Background(), tx, sender, err := c.client.client.TransactionSender(context.Background(), tx, receipt.Logs[0].BlockHash, receipt.Logs[0].Index)
receipt.Logs[0].BlockHash, receipt.Logs[0].Index)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -987,9 +930,9 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
FeeIdxCoordinator: []common.Idx{}, FeeIdxCoordinator: []common.Idx{},
} }
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1) //nolint:gomnd lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
l1UserTxsData := []byte{} l1UserTxsData := []byte{}
@@ -997,9 +940,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
l1UserTxsData = aux.L1L2TxsData[:numBytesL1TxUser] l1UserTxsData = aux.L1L2TxsData[:numBytesL1TxUser]
} }
for i := 0; i < int(l1UserTxsLen); i++ { for i := 0; i < int(l1UserTxsLen); i++ {
l1Tx, err := l1Tx, err := common.L1TxFromDataAvailability(l1UserTxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes], uint32(nLevels))
common.L1TxFromDataAvailability(l1UserTxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes],
uint32(nLevels))
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -1011,17 +952,14 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
} }
numTxsL2 := len(l2TxsData) / lenL1L2TxsBytes numTxsL2 := len(l2TxsData) / lenL1L2TxsBytes
for i := 0; i < numTxsL2; i++ { for i := 0; i < numTxsL2; i++ {
l2Tx, err := l2Tx, err := common.L2TxFromBytesDataAvailability(l2TxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes], int(nLevels))
common.L2TxFromBytesDataAvailability(l2TxsData[i*lenL1L2TxsBytes:(i+1)*lenL1L2TxsBytes],
int(nLevels))
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx) rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
} }
for i := 0; i < numTxsL1Coord; i++ { for i := 0; i < numTxsL1Coord; i++ {
bytesL1Coordinator := bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes] //nolint:lll
var signature []byte var signature []byte
v := bytesL1Coordinator[0] v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33] s := bytesL1Coordinator[1:33]
@@ -1034,29 +972,24 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash,
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
rollupForgeBatchArgs.L1CoordinatorTxs = append(rollupForgeBatchArgs.L1CoordinatorTxs, *l1Tx) rollupForgeBatchArgs.L1CoordinatorTxs = append(rollupForgeBatchArgs.L1CoordinatorTxs, *l1Tx)
rollupForgeBatchArgs.L1CoordinatorTxsAuths = rollupForgeBatchArgs.L1CoordinatorTxsAuths = append(rollupForgeBatchArgs.L1CoordinatorTxsAuths, signature)
append(rollupForgeBatchArgs.L1CoordinatorTxsAuths, signature)
} }
lenFeeIdxCoordinatorBytes := int(nLevels / 8) //nolint:gomnd lenFeeIdxCoordinatorBytes := int(nLevels / 8) //nolint:gomnd
numFeeIdxCoordinator := len(aux.FeeIdxCoordinator) / lenFeeIdxCoordinatorBytes numFeeIdxCoordinator := len(aux.FeeIdxCoordinator) / lenFeeIdxCoordinatorBytes
for i := 0; i < numFeeIdxCoordinator; i++ { for i := 0; i < numFeeIdxCoordinator; i++ {
var paddedFeeIdx [6]byte var paddedFeeIdx [6]byte
// TODO: This check is not necessary: the first case will always work. Test it // TODO: This check is not necessary: the first case will always work. Test it before removing the if.
// before removing the if.
if lenFeeIdxCoordinatorBytes < common.IdxBytesLen { if lenFeeIdxCoordinatorBytes < common.IdxBytesLen {
copy(paddedFeeIdx[6-lenFeeIdxCoordinatorBytes:], copy(paddedFeeIdx[6-lenFeeIdxCoordinatorBytes:], aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
} else { } else {
copy(paddedFeeIdx[:], copy(paddedFeeIdx[:], aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
aux.FeeIdxCoordinator[i*lenFeeIdxCoordinatorBytes:(i+1)*lenFeeIdxCoordinatorBytes])
} }
feeIdxCoordinator, err := common.IdxFromBytes(paddedFeeIdx[:]) feeIdxCoordinator, err := common.IdxFromBytes(paddedFeeIdx[:])
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
if feeIdxCoordinator != common.Idx(0) { if feeIdxCoordinator != common.Idx(0) {
rollupForgeBatchArgs.FeeIdxCoordinator = rollupForgeBatchArgs.FeeIdxCoordinator = append(rollupForgeBatchArgs.FeeIdxCoordinator, feeIdxCoordinator)
append(rollupForgeBatchArgs.FeeIdxCoordinator, feeIdxCoordinator)
} }
} }
return &rollupForgeBatchArgs, &sender, nil return &rollupForgeBatchArgs, &sender, nil

View File

@@ -91,7 +91,7 @@ func TestRollupAddToken(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress) assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress)
@@ -116,8 +116,7 @@ func TestRollupForgeBatch(t *testing.T) {
minBid.SetString("11000000000000000000", 10) minBid.SetString("11000000000000000000", 10)
budget := new(big.Int) budget := new(big.Int)
budget.SetString("45200000000000000000", 10) budget.SetString("45200000000000000000", 10)
_, err = auctionClient.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, _, err = auctionClient.AuctionMultiBid(budget, currentSlot+4, currentSlot+10, slotSet, maxBid, minBid, deadline)
maxBid, minBid, deadline)
require.NoError(t, err) require.NoError(t, err)
// Add Blocks // Add Blocks
@@ -129,18 +128,12 @@ func TestRollupForgeBatch(t *testing.T) {
// Forge Batch 1 // Forge Batch 1
args := new(RollupForgeBatchArgs) args := new(RollupForgeBatchArgs)
// When encoded, 64 times the 0 idx means that no idx to collect fees is specified. args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
args.FeeIdxCoordinator = []common.Idx{} l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
l1CoordinatorBytes, err := hex.DecodeString(
"1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf" +
"42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230" +
"de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
require.NoError(t, err) require.NoError(t, err)
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
for i := 0; i < numTxsL1; i++ { for i := 0; i < numTxsL1; i++ {
bytesL1Coordinator := bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*
common.RollupConstL1CoordinatorTotalBytes]
var signature []byte var signature []byte
v := bytesL1Coordinator[0] v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33] s := bytesL1Coordinator[1:33]
@@ -156,12 +149,9 @@ func TestRollupForgeBatch(t *testing.T) {
args.L1UserTxs = []common.L1Tx{} args.L1UserTxs = []common.L1Tx{}
args.L2TxsData = []common.L2Tx{} args.L2TxsData = []common.L2Tx{}
newStateRoot := new(big.Int) newStateRoot := new(big.Int)
newStateRoot.SetString( newStateRoot.SetString("18317824016047294649053625209337295956588174734569560016974612130063629505228", 10)
"18317824016047294649053625209337295956588174734569560016974612130063629505228",
10)
newExitRoot := new(big.Int) newExitRoot := new(big.Int)
bytesNumExitRoot, err := hex.DecodeString( bytesNumExitRoot, err := hex.DecodeString("10a89d5fe8d488eda1ba371d633515739933c706c210c604f5bd209180daa43b")
"10a89d5fe8d488eda1ba371d633515739933c706c210c604f5bd209180daa43b")
require.NoError(t, err) require.NoError(t, err)
newExitRoot.SetBytes(bytesNumExitRoot) newExitRoot.SetBytes(bytesNumExitRoot)
args.NewLastIdx = int64(300) args.NewLastIdx = int64(300)
@@ -184,7 +174,7 @@ func TestRollupForgeBatch(t *testing.T) {
currentBlockNum, err = rollupClient.client.EthLastBlock() currentBlockNum, err = rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum) assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum)
@@ -213,11 +203,10 @@ func TestRollupUpdateForgeL1L2BatchTimeout(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, newForgeL1L2BatchTimeout, assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
} }
func TestRollupUpdateFeeAddToken(t *testing.T) { func TestRollupUpdateFeeAddToken(t *testing.T) {
@@ -227,7 +216,7 @@ func TestRollupUpdateFeeAddToken(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken) assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken)
@@ -246,7 +235,7 @@ func TestRollupUpdateBucketsParameters(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
blockStampBucket = currentBlockNum blockStampBucket = currentBlockNum
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, bucketsParameters, rollupEvents.UpdateBucketsParameters[0].ArrayBuckets) assert.Equal(t, bucketsParameters, rollupEvents.UpdateBucketsParameters[0].ArrayBuckets)
} }
@@ -257,10 +246,9 @@ func TestRollupUpdateWithdrawalDelay(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, newWithdrawalDelay, assert.Equal(t, newWithdrawalDelay, int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
} }
func TestRollupUpdateTokenExchange(t *testing.T) { func TestRollupUpdateTokenExchange(t *testing.T) {
@@ -275,7 +263,7 @@ func TestRollupUpdateTokenExchange(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, addressArray, rollupEvents.UpdateTokenExchange[0].AddressArray) assert.Equal(t, addressArray, rollupEvents.UpdateTokenExchange[0].AddressArray)
assert.Equal(t, valueArray, rollupEvents.UpdateTokenExchange[0].ValueArray) assert.Equal(t, valueArray, rollupEvents.UpdateTokenExchange[0].ValueArray)
@@ -299,26 +287,23 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ) assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) { func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
key := genKeysBjj(1) key := genKeysBjj(1)
fromIdxInt64 := int64(0) fromIdxInt64 := int64(0)
@@ -334,26 +319,23 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ) assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address, assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) { func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
key := genKeysBjj(3) key := genKeysBjj(3)
fromIdxInt64 := int64(0) fromIdxInt64 := int64(0)
@@ -369,26 +351,23 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ) assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxETHDeposit(t *testing.T) { func TestRollupL1UserTxETHDeposit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(256) fromIdxInt64 := int64(256)
toIdxInt64 := int64(0) toIdxInt64 := int64(0)
@@ -404,25 +383,22 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20Deposit(t *testing.T) { func TestRollupL1UserTxERC20Deposit(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(257) fromIdxInt64 := int64(257)
toIdxInt64 := int64(0) toIdxInt64 := int64(0)
@@ -437,25 +413,22 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address, assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) { func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(258) fromIdxInt64 := int64(258)
toIdxInt64 := int64(0) toIdxInt64 := int64(0)
@@ -469,25 +442,22 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxETHDepositTransfer(t *testing.T) { func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(256) fromIdxInt64 := int64(256)
toIdxInt64 := int64(257) toIdxInt64 := int64(257)
@@ -503,25 +473,22 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) { func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(257) fromIdxInt64 := int64(257)
toIdxInt64 := int64(258) toIdxInt64 := int64(258)
@@ -536,25 +503,22 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address, assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) { func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(258) fromIdxInt64 := int64(258)
toIdxInt64 := int64(259) toIdxInt64 := int64(259)
@@ -569,25 +533,22 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) { func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(256) fromIdxInt64 := int64(256)
toIdxInt64 := int64(257) toIdxInt64 := int64(257)
@@ -603,25 +564,22 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) { func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(257) fromIdxInt64 := int64(257)
toIdxInt64 := int64(258) toIdxInt64 := int64(258)
@@ -636,25 +594,22 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address, assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) { func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(258) fromIdxInt64 := int64(258)
toIdxInt64 := int64(259) toIdxInt64 := int64(259)
@@ -669,25 +624,22 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxETHForceTransfer(t *testing.T) { func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(256) fromIdxInt64 := int64(256)
toIdxInt64 := int64(257) toIdxInt64 := int64(257)
@@ -702,25 +654,22 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) { func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(257) fromIdxInt64 := int64(257)
toIdxInt64 := int64(258) toIdxInt64 := int64(258)
@@ -734,25 +683,22 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address, assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) { func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(259) fromIdxInt64 := int64(259)
toIdxInt64 := int64(260) toIdxInt64 := int64(260)
@@ -766,25 +712,22 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxETHForceExit(t *testing.T) { func TestRollupL1UserTxETHForceExit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(256) fromIdxInt64 := int64(256)
toIdxInt64 := int64(1) toIdxInt64 := int64(1)
@@ -799,25 +742,22 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDUint32, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20ForceExit(t *testing.T) { func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, rollupClientAux2, err := NewRollupClient(ethereumClientAux2, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(257) fromIdxInt64 := int64(257)
toIdxInt64 := int64(1) toIdxInt64 := int64(1)
@@ -831,25 +771,22 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux2.RollupL1UserTxERC20ETH(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
l1Tx.DepositAmount, l1Tx.Amount, tokenHEZID, toIdxInt64)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux2.client.account.Address, assert.Equal(t, rollupClientAux2.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) { func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, rollupClientAux, err := NewRollupClient(ethereumClientAux, hermezRollupAddressConst, tokenHEZ)
tokenHEZ)
require.NoError(t, err) require.NoError(t, err)
fromIdxInt64 := int64(258) fromIdxInt64 := int64(258)
toIdxInt64 := int64(1) toIdxInt64 := int64(1)
@@ -865,20 +802,18 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
} }
L1UserTxs = append(L1UserTxs, l1Tx) L1UserTxs = append(L1UserTxs, l1Tx)
_, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, _, err = rollupClientAux.RollupL1UserTxERC20Permit(l1Tx.FromBJJ, fromIdxInt64, l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
l1Tx.DepositAmount, l1Tx.Amount, tokenIDERC777, toIdxInt64, deadline)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx) assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount) assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID) assert.Equal(t, l1Tx.TokenID, rollupEvents.L1UserTx[0].L1UserTx.TokenID)
assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount) assert.Equal(t, l1Tx.Amount, rollupEvents.L1UserTx[0].L1UserTx.Amount)
assert.Equal(t, rollupClientAux.client.account.Address, assert.Equal(t, rollupClientAux.client.account.Address, rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
rollupEvents.L1UserTx[0].L1UserTx.FromEthAddr)
} }
func TestRollupForgeBatch2(t *testing.T) { func TestRollupForgeBatch2(t *testing.T) {
@@ -887,15 +822,14 @@ func TestRollupForgeBatch2(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum) assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum)
// Forge Batch 3 // Forge Batch 3
args := new(RollupForgeBatchArgs) args := new(RollupForgeBatchArgs)
// When encoded, 64 times the 0 idx means that no idx to collect fees is specified. args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
args.FeeIdxCoordinator = []common.Idx{}
args.L1CoordinatorTxs = argsForge.L1CoordinatorTxs args.L1CoordinatorTxs = argsForge.L1CoordinatorTxs
args.L1CoordinatorTxsAuths = argsForge.L1CoordinatorTxsAuths args.L1CoordinatorTxsAuths = argsForge.L1CoordinatorTxsAuths
for i := 0; i < len(L1UserTxs); i++ { for i := 0; i < len(L1UserTxs); i++ {
@@ -903,19 +837,14 @@ func TestRollupForgeBatch2(t *testing.T) {
l1UserTx.EffectiveAmount = l1UserTx.Amount l1UserTx.EffectiveAmount = l1UserTx.Amount
l1Bytes, err := l1UserTx.BytesDataAvailability(uint32(nLevels)) l1Bytes, err := l1UserTx.BytesDataAvailability(uint32(nLevels))
require.NoError(t, err) require.NoError(t, err)
l1UserTxDataAvailability, err := common.L1TxFromDataAvailability(l1Bytes, l1UserTxDataAvailability, err := common.L1TxFromDataAvailability(l1Bytes, uint32(nLevels))
uint32(nLevels))
require.NoError(t, err) require.NoError(t, err)
args.L1UserTxs = append(args.L1UserTxs, *l1UserTxDataAvailability) args.L1UserTxs = append(args.L1UserTxs, *l1UserTxDataAvailability)
} }
newStateRoot := new(big.Int) newStateRoot := new(big.Int)
newStateRoot.SetString( newStateRoot.SetString("18317824016047294649053625209337295956588174734569560016974612130063629505228", 10)
"18317824016047294649053625209337295956588174734569560016974612130063629505228",
10)
newExitRoot := new(big.Int) newExitRoot := new(big.Int)
newExitRoot.SetString( newExitRoot.SetString("1114281409737474688393837964161044726766678436313681099613347372031079422302", 10)
"1114281409737474688393837964161044726766678436313681099613347372031079422302",
10)
amount := new(big.Int) amount := new(big.Int)
amount.SetString("79000000", 10) amount.SetString("79000000", 10)
l2Tx := common.L2Tx{ l2Tx := common.L2Tx{
@@ -947,7 +876,7 @@ func TestRollupForgeBatch2(t *testing.T) {
currentBlockNum, err = rollupClient.client.EthLastBlock() currentBlockNum, err = rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err = rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err = rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(3), rollupEvents.ForgeBatch[0].BatchNum) assert.Equal(t, int64(3), rollupEvents.ForgeBatch[0].BatchNum)
@@ -975,8 +904,7 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
var pkComp babyjub.PublicKeyComp var pkComp babyjub.PublicKeyComp
pkCompBE, err := pkCompBE, err := hex.DecodeString("adc3b754f8da621967b073a787bef8eec7052f2ba712b23af57d98f65beea8b2")
hex.DecodeString("adc3b754f8da621967b073a787bef8eec7052f2ba712b23af57d98f65beea8b2")
require.NoError(t, err) require.NoError(t, err)
pkCompLE := common.SwapEndianness(pkCompBE) pkCompLE := common.SwapEndianness(pkCompBE)
copy(pkComp[:], pkCompLE) copy(pkComp[:], pkCompLE)
@@ -986,25 +914,21 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
numExitRoot := int64(3) numExitRoot := int64(3)
fromIdx := int64(256) fromIdx := int64(256)
amount, _ := new(big.Int).SetString("20000000000000000000", 10) amount, _ := new(big.Int).SetString("20000000000000000000", 10)
// siblingBytes0, err := new(big.Int).SetString( // siblingBytes0, err := new(big.Int).SetString("19508838618377323910556678335932426220272947530531646682154552299216398748115", 10)
// "19508838618377323910556678335932426220272947530531646682154552299216398748115",
// 10)
// require.NoError(t, err) // require.NoError(t, err)
// siblingBytes1, err := new(big.Int).SetString( // siblingBytes1, err := new(big.Int).SetString("15198806719713909654457742294233381653226080862567104272457668857208564789571", 10)
// "15198806719713909654457742294233381653226080862567104272457668857208564789571", 10)
// require.NoError(t, err) // require.NoError(t, err)
var siblings []*big.Int var siblings []*big.Int
// siblings = append(siblings, siblingBytes0) // siblings = append(siblings, siblingBytes0)
// siblings = append(siblings, siblingBytes1) // siblings = append(siblings, siblingBytes1)
instantWithdraw := true instantWithdraw := true
_, err = rollupClientAux.RollupWithdrawMerkleProof(pkComp, tokenID, numExitRoot, fromIdx, _, err = rollupClientAux.RollupWithdrawMerkleProof(pkComp, tokenID, numExitRoot, fromIdx, amount, siblings, instantWithdraw)
amount, siblings, instantWithdraw)
require.NoError(t, err) require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx) assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx)
@@ -1027,7 +951,7 @@ func TestRollupSafeMode(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock() currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err) require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil) rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err) require.NoError(t, err)
auxEvent := new(RollupEventSafeMode) auxEvent := new(RollupEventSafeMode)
assert.Equal(t, auxEvent, &rollupEvents.SafeMode[0]) assert.Equal(t, auxEvent, &rollupEvents.SafeMode[0])

View File

@@ -132,10 +132,9 @@ type WDelayerInterface interface {
WDelayerDepositInfo(owner, token ethCommon.Address) (depositInfo DepositState, err error) WDelayerDepositInfo(owner, token ethCommon.Address) (depositInfo DepositState, err error)
WDelayerDeposit(onwer, token ethCommon.Address, amount *big.Int) (*types.Transaction, error) WDelayerDeposit(onwer, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error) WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error)
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
amount *big.Int) (*types.Transaction, error)
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error)
WDelayerConstants() (*common.WDelayerConstants, error) WDelayerConstants() (*common.WDelayerConstants, error)
WDelayerEventInit() (*WDelayerEventInitialize, int64, error) WDelayerEventInit() (*WDelayerEventInitialize, int64, error)
} }
@@ -144,8 +143,7 @@ type WDelayerInterface interface {
// Implementation // Implementation
// //
// WDelayerClient is the implementation of the interface to the WithdrawDelayer // WDelayerClient is the implementation of the interface to the WithdrawDelayer Smart Contract in ethereum.
// Smart Contract in ethereum.
type WDelayerClient struct { type WDelayerClient struct {
client *EthereumClient client *EthereumClient
address ethCommon.Address address ethCommon.Address
@@ -174,8 +172,7 @@ func NewWDelayerClient(client *EthereumClient, address ethCommon.Address) (*WDel
} }
// WDelayerGetHermezGovernanceAddress is the interface to call the smart contract function // WDelayerGetHermezGovernanceAddress is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerGetHermezGovernanceAddress() ( func (c *WDelayerClient) WDelayerGetHermezGovernanceAddress() (hermezGovernanceAddress *ethCommon.Address, err error) {
hermezGovernanceAddress *ethCommon.Address, err error) {
var _hermezGovernanceAddress ethCommon.Address var _hermezGovernanceAddress ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
_hermezGovernanceAddress, err = c.wdelayer.GetHermezGovernanceAddress(c.opts) _hermezGovernanceAddress, err = c.wdelayer.GetHermezGovernanceAddress(c.opts)
@@ -187,8 +184,7 @@ func (c *WDelayerClient) WDelayerGetHermezGovernanceAddress() (
} }
// WDelayerTransferGovernance is the interface to call the smart contract function // WDelayerTransferGovernance is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerTransferGovernance(newAddress ethCommon.Address) ( func (c *WDelayerClient) WDelayerTransferGovernance(newAddress ethCommon.Address) (tx *types.Transaction, err error) {
tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -214,8 +210,7 @@ func (c *WDelayerClient) WDelayerClaimGovernance() (tx *types.Transaction, err e
} }
// WDelayerGetEmergencyCouncil is the interface to call the smart contract function // WDelayerGetEmergencyCouncil is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerGetEmergencyCouncil() (emergencyCouncilAddress *ethCommon.Address, func (c *WDelayerClient) WDelayerGetEmergencyCouncil() (emergencyCouncilAddress *ethCommon.Address, err error) {
err error) {
var _emergencyCouncilAddress ethCommon.Address var _emergencyCouncilAddress ethCommon.Address
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
_emergencyCouncilAddress, err = c.wdelayer.GetEmergencyCouncil(c.opts) _emergencyCouncilAddress, err = c.wdelayer.GetEmergencyCouncil(c.opts)
@@ -227,8 +222,7 @@ func (c *WDelayerClient) WDelayerGetEmergencyCouncil() (emergencyCouncilAddress
} }
// WDelayerTransferEmergencyCouncil is the interface to call the smart contract function // WDelayerTransferEmergencyCouncil is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerTransferEmergencyCouncil(newAddress ethCommon.Address) ( func (c *WDelayerClient) WDelayerTransferEmergencyCouncil(newAddress ethCommon.Address) (tx *types.Transaction, err error) {
tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -277,8 +271,7 @@ func (c *WDelayerClient) WDelayerGetWithdrawalDelay() (withdrawalDelay int64, er
} }
// WDelayerGetEmergencyModeStartingTime is the interface to call the smart contract function // WDelayerGetEmergencyModeStartingTime is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerGetEmergencyModeStartingTime() (emergencyModeStartingTime int64, func (c *WDelayerClient) WDelayerGetEmergencyModeStartingTime() (emergencyModeStartingTime int64, err error) {
err error) {
var _emergencyModeStartingTime uint64 var _emergencyModeStartingTime uint64
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
_emergencyModeStartingTime, err = c.wdelayer.GetEmergencyModeStartingTime(c.opts) _emergencyModeStartingTime, err = c.wdelayer.GetEmergencyModeStartingTime(c.opts)
@@ -303,8 +296,7 @@ func (c *WDelayerClient) WDelayerEnableEmergencyMode() (tx *types.Transaction, e
} }
// WDelayerChangeWithdrawalDelay is the interface to call the smart contract function // WDelayerChangeWithdrawalDelay is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64) ( func (c *WDelayerClient) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64) (tx *types.Transaction, err error) {
tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -317,8 +309,7 @@ func (c *WDelayerClient) WDelayerChangeWithdrawalDelay(newWithdrawalDelay uint64
} }
// WDelayerDepositInfo is the interface to call the smart contract function // WDelayerDepositInfo is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerDepositInfo(owner, token ethCommon.Address) ( func (c *WDelayerClient) WDelayerDepositInfo(owner, token ethCommon.Address) (depositInfo DepositState, err error) {
depositInfo DepositState, err error) {
if err := c.client.Call(func(ec *ethclient.Client) error { if err := c.client.Call(func(ec *ethclient.Client) error {
amount, depositTimestamp, err := c.wdelayer.DepositInfo(c.opts, owner, token) amount, depositTimestamp, err := c.wdelayer.DepositInfo(c.opts, owner, token)
depositInfo.Amount = amount depositInfo.Amount = amount
@@ -331,8 +322,7 @@ func (c *WDelayerClient) WDelayerDepositInfo(owner, token ethCommon.Address) (
} }
// WDelayerDeposit is the interface to call the smart contract function // WDelayerDeposit is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerDeposit(owner, token ethCommon.Address, amount *big.Int) ( func (c *WDelayerClient) WDelayerDeposit(owner, token ethCommon.Address, amount *big.Int) (tx *types.Transaction, err error) {
tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -345,8 +335,7 @@ func (c *WDelayerClient) WDelayerDeposit(owner, token ethCommon.Address, amount
} }
// WDelayerWithdrawal is the interface to call the smart contract function // WDelayerWithdrawal is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.Transaction, func (c *WDelayerClient) WDelayerWithdrawal(owner, token ethCommon.Address) (tx *types.Transaction, err error) {
err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -359,8 +348,7 @@ func (c *WDelayerClient) WDelayerWithdrawal(owner, token ethCommon.Address) (tx
} }
// WDelayerEscapeHatchWithdrawal is the interface to call the smart contract function // WDelayerEscapeHatchWithdrawal is the interface to call the smart contract function
func (c *WDelayerClient) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, func (c *WDelayerClient) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (tx *types.Transaction, err error) {
amount *big.Int) (tx *types.Transaction, err error) {
if tx, err = c.client.CallAuth( if tx, err = c.client.CallAuth(
0, 0,
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) { func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
@@ -396,21 +384,14 @@ func (c *WDelayerClient) WDelayerConstants() (constants *common.WDelayerConstant
} }
var ( var (
logWDelayerDeposit = crypto.Keccak256Hash([]byte( logWDelayerDeposit = crypto.Keccak256Hash([]byte("Deposit(address,address,uint192,uint64)"))
"Deposit(address,address,uint192,uint64)")) logWDelayerWithdraw = crypto.Keccak256Hash([]byte("Withdraw(address,address,uint192)"))
logWDelayerWithdraw = crypto.Keccak256Hash([]byte( logWDelayerEmergencyModeEnabled = crypto.Keccak256Hash([]byte("EmergencyModeEnabled()"))
"Withdraw(address,address,uint192)")) logWDelayerNewWithdrawalDelay = crypto.Keccak256Hash([]byte("NewWithdrawalDelay(uint64)"))
logWDelayerEmergencyModeEnabled = crypto.Keccak256Hash([]byte( logWDelayerEscapeHatchWithdrawal = crypto.Keccak256Hash([]byte("EscapeHatchWithdrawal(address,address,address,uint256)"))
"EmergencyModeEnabled()")) logWDelayerNewEmergencyCouncil = crypto.Keccak256Hash([]byte("NewEmergencyCouncil(address)"))
logWDelayerNewWithdrawalDelay = crypto.Keccak256Hash([]byte( logWDelayerNewHermezGovernanceAddress = crypto.Keccak256Hash([]byte("NewHermezGovernanceAddress(address)"))
"NewWithdrawalDelay(uint64)")) logWDelayerInitialize = crypto.Keccak256Hash([]byte(
logWDelayerEscapeHatchWithdrawal = crypto.Keccak256Hash([]byte(
"EscapeHatchWithdrawal(address,address,address,uint256)"))
logWDelayerNewEmergencyCouncil = crypto.Keccak256Hash([]byte(
"NewEmergencyCouncil(address)"))
logWDelayerNewHermezGovernanceAddress = crypto.Keccak256Hash([]byte(
"NewHermezGovernanceAddress(address)"))
logWDelayerInitialize = crypto.Keccak256Hash([]byte(
"InitializeWithdrawalDelayerEvent(uint64,address,address)")) "InitializeWithdrawalDelayerEvent(uint64,address,address)"))
) )
@@ -443,47 +424,40 @@ func (c *WDelayerClient) WDelayerEventInit() (*WDelayerEventInitialize, int64, e
} }
// WDelayerEventsByBlock returns the events in a block that happened in the // WDelayerEventsByBlock returns the events in a block that happened in the
// WDelayer Smart Contract. // WDelayer Smart Contract and the blockHash where the eents happened. If
// To query by blockNum, set blockNum >= 0 and blockHash == nil. // there are no events in that block, blockHash is nil.
// To query by blockHash set blockHash != nil, and blockNum will be ignored. func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error) {
// If there are no events in that block the result is nil.
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*WDelayerEvents, error) {
var wdelayerEvents WDelayerEvents var wdelayerEvents WDelayerEvents
var blockHash *ethCommon.Hash
var blockNumBigInt *big.Int
if blockHash == nil {
blockNumBigInt = big.NewInt(blockNum)
}
query := ethereum.FilterQuery{ query := ethereum.FilterQuery{
BlockHash: blockHash, FromBlock: big.NewInt(blockNum),
FromBlock: blockNumBigInt, ToBlock: big.NewInt(blockNum),
ToBlock: blockNumBigInt,
Addresses: []ethCommon.Address{ Addresses: []ethCommon.Address{
c.address, c.address,
}, },
Topics: [][]ethCommon.Hash{}, BlockHash: nil,
Topics: [][]ethCommon.Hash{},
} }
logs, err := c.client.client.FilterLogs(context.Background(), query) logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
if len(logs) == 0 { if len(logs) > 0 {
return nil, nil blockHash = &logs[0].BlockHash
} }
for _, vLog := range logs { for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash { if vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String()) log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent) return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
} }
switch vLog.Topics[0] { switch vLog.Topics[0] {
case logWDelayerDeposit: case logWDelayerDeposit:
var deposit WDelayerEventDeposit var deposit WDelayerEventDeposit
err := c.contractAbi.UnpackIntoInterface(&deposit, "Deposit", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&deposit, "Deposit", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
deposit.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) deposit.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
deposit.Token = ethCommon.BytesToAddress(vLog.Topics[2].Bytes()) deposit.Token = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -494,7 +468,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
var withdraw WDelayerEventWithdraw var withdraw WDelayerEventWithdraw
err := c.contractAbi.UnpackIntoInterface(&withdraw, "Withdraw", vLog.Data) err := c.contractAbi.UnpackIntoInterface(&withdraw, "Withdraw", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
withdraw.Token = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) withdraw.Token = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
withdraw.Owner = ethCommon.BytesToAddress(vLog.Topics[2].Bytes()) withdraw.Owner = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -502,52 +476,43 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
case logWDelayerEmergencyModeEnabled: case logWDelayerEmergencyModeEnabled:
var emergencyModeEnabled WDelayerEventEmergencyModeEnabled var emergencyModeEnabled WDelayerEventEmergencyModeEnabled
wdelayerEvents.EmergencyModeEnabled = wdelayerEvents.EmergencyModeEnabled = append(wdelayerEvents.EmergencyModeEnabled, emergencyModeEnabled)
append(wdelayerEvents.EmergencyModeEnabled, emergencyModeEnabled)
case logWDelayerNewWithdrawalDelay: case logWDelayerNewWithdrawalDelay:
var withdrawalDelay WDelayerEventNewWithdrawalDelay var withdrawalDelay WDelayerEventNewWithdrawalDelay
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "NewWithdrawalDelay", vLog.Data)
"NewWithdrawalDelay", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
wdelayerEvents.NewWithdrawalDelay = wdelayerEvents.NewWithdrawalDelay = append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
case logWDelayerEscapeHatchWithdrawal: case logWDelayerEscapeHatchWithdrawal:
var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, "EscapeHatchWithdrawal", vLog.Data)
"EscapeHatchWithdrawal", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes()) escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes()) escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
escapeHatchWithdrawal.Token = ethCommon.BytesToAddress(vLog.Topics[3].Bytes()) escapeHatchWithdrawal.Token = ethCommon.BytesToAddress(vLog.Topics[3].Bytes())
wdelayerEvents.EscapeHatchWithdrawal = wdelayerEvents.EscapeHatchWithdrawal = append(wdelayerEvents.EscapeHatchWithdrawal, escapeHatchWithdrawal)
append(wdelayerEvents.EscapeHatchWithdrawal, escapeHatchWithdrawal)
case logWDelayerNewEmergencyCouncil: case logWDelayerNewEmergencyCouncil:
var emergencyCouncil WDelayerEventNewEmergencyCouncil var emergencyCouncil WDelayerEventNewEmergencyCouncil
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, "NewEmergencyCouncil", vLog.Data)
"NewEmergencyCouncil", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
wdelayerEvents.NewEmergencyCouncil = wdelayerEvents.NewEmergencyCouncil = append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
case logWDelayerNewHermezGovernanceAddress: case logWDelayerNewHermezGovernanceAddress:
var governanceAddress WDelayerEventNewHermezGovernanceAddress var governanceAddress WDelayerEventNewHermezGovernanceAddress
err := c.contractAbi.UnpackIntoInterface(&governanceAddress, err := c.contractAbi.UnpackIntoInterface(&governanceAddress, "NewHermezGovernanceAddress", vLog.Data)
"NewHermezGovernanceAddress", vLog.Data)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
wdelayerEvents.NewHermezGovernanceAddress = wdelayerEvents.NewHermezGovernanceAddress = append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
} }
} }
return &wdelayerEvents, nil return &wdelayerEvents, blockHash, nil
} }

View File

@@ -52,10 +52,9 @@ func TestWDelayerSetHermezGovernanceAddress(t *testing.T) {
assert.Equal(t, &auxAddressConst, auxAddress) assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, auxAddressConst, assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
_, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst) _, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst)
require.Nil(t, err) require.Nil(t, err)
_, err = wdelayerClientTest.WDelayerClaimGovernance() _, err = wdelayerClientTest.WDelayerClaimGovernance()
@@ -69,8 +68,7 @@ func TestWDelayerGetEmergencyCouncil(t *testing.T) {
} }
func TestWDelayerSetEmergencyCouncil(t *testing.T) { func TestWDelayerSetEmergencyCouncil(t *testing.T) {
wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil, wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil, wdelayerTestAddressConst)
wdelayerTestAddressConst)
require.Nil(t, err) require.Nil(t, err)
wdelayerClientAux, err := NewWDelayerClient(ethereumClientAux, wdelayerTestAddressConst) wdelayerClientAux, err := NewWDelayerClient(ethereumClientAux, wdelayerTestAddressConst)
require.Nil(t, err) require.Nil(t, err)
@@ -83,7 +81,7 @@ func TestWDelayerSetEmergencyCouncil(t *testing.T) {
assert.Equal(t, &auxAddressConst, auxAddress) assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewEmergencyCouncil[0].NewEmergencyCouncil) assert.Equal(t, auxAddressConst, wdelayerEvents.NewEmergencyCouncil[0].NewEmergencyCouncil)
_, err = wdelayerClientAux.WDelayerTransferEmergencyCouncil(emergencyCouncilAddressConst) _, err = wdelayerClientAux.WDelayerTransferEmergencyCouncil(emergencyCouncilAddressConst)
@@ -112,7 +110,7 @@ func TestWDelayerChangeWithdrawalDelay(t *testing.T) {
assert.Equal(t, newWithdrawalDelay, withdrawalDelay) assert.Equal(t, newWithdrawalDelay, withdrawalDelay)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, uint64(newWithdrawalDelay), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay) assert.Equal(t, uint64(newWithdrawalDelay), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay)
} }
@@ -126,7 +124,7 @@ func TestWDelayerDeposit(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount) assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner) assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
@@ -152,7 +150,7 @@ func TestWDelayerWithdrawal(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount) assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner) assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner)
@@ -168,7 +166,7 @@ func TestWDelayerSecondDeposit(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount) assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner) assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
@@ -183,7 +181,7 @@ func TestWDelayerEnableEmergencyMode(t *testing.T) {
assert.Equal(t, true, emergencyMode) assert.Equal(t, true, emergencyMode)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
auxEvent := new(WDelayerEventEmergencyModeEnabled) auxEvent := new(WDelayerEventEmergencyModeEnabled)
assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0]) assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0])
@@ -202,22 +200,17 @@ func TestWDelayerGetEmergencyModeStartingTime(t *testing.T) {
func TestWDelayerEscapeHatchWithdrawal(t *testing.T) { func TestWDelayerEscapeHatchWithdrawal(t *testing.T) {
amount := new(big.Int) amount := new(big.Int)
amount.SetString("10000000000000000", 10) amount.SetString("10000000000000000", 10)
wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil, wdelayerClientEmergencyCouncil, err := NewWDelayerClient(ethereumClientEmergencyCouncil, wdelayerTestAddressConst)
wdelayerTestAddressConst)
require.Nil(t, err) require.Nil(t, err)
_, err = _, err = wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst, tokenHEZAddressConst, amount)
wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst,
tokenHEZAddressConst, amount)
require.Contains(t, err.Error(), "NO_MAX_EMERGENCY_MODE_TIME") require.Contains(t, err.Error(), "NO_MAX_EMERGENCY_MODE_TIME")
seconds := maxEmergencyModeTime.Seconds() seconds := maxEmergencyModeTime.Seconds()
addTime(seconds, ethClientDialURL) addTime(seconds, ethClientDialURL)
_, err = _, err = wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst, tokenHEZAddressConst, amount)
wdelayerClientEmergencyCouncil.WDelayerEscapeHatchWithdrawal(governanceAddressConst,
tokenHEZAddressConst, amount)
require.Nil(t, err) require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock() currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err) require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil) wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err) require.Nil(t, err)
assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token) assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token)
assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To) assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To)

7
go.mod
View File

@@ -11,17 +11,17 @@ require (
github.com/gin-gonic/gin v1.5.0 github.com/gin-gonic/gin v1.5.0
github.com/gobuffalo/packr/v2 v2.8.1 github.com/gobuffalo/packr/v2 v2.8.1
github.com/hermeznetwork/tracerr v0.3.1-0.20210120162744-5da60b576169 github.com/hermeznetwork/tracerr v0.3.1-0.20210120162744-5da60b576169
github.com/iden3/go-iden3-crypto v0.0.6-0.20210308142348-8f85683b2cef github.com/iden3/go-iden3-crypto v0.0.6-0.20201221160344-58e589b6eb4c
github.com/iden3/go-merkletree v0.0.0-20210308143313-8b63ca866189 github.com/iden3/go-merkletree v0.0.0-20210119155851-bb53e6ad1a12
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0 github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0 github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/mapstructure v1.3.0 github.com/mitchellh/mapstructure v1.3.0
github.com/prometheus/client_golang v1.3.0
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/russross/meddler v1.0.0 github.com/russross/meddler v1.0.0
github.com/stretchr/testify v1.6.1 github.com/stretchr/testify v1.6.1
@@ -29,6 +29,5 @@ require (
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
gopkg.in/go-playground/validator.v9 v9.29.1 gopkg.in/go-playground/validator.v9 v9.29.1
) )

18
go.sum
View File

@@ -66,7 +66,6 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
@@ -330,10 +329,11 @@ github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/iden3/go-iden3-crypto v0.0.6-0.20210308142348-8f85683b2cef h1:72PG9b2eDlLqKszJVLrsoJbpt4CtgJLhKOjH1MJqCVY= github.com/iden3/go-iden3-crypto v0.0.6-0.20201218111145-a2015adb2f1b/go.mod h1:oBgthFLboAWi9feaBUFy7OxEcyn9vA1khHSL/WwWFyg=
github.com/iden3/go-iden3-crypto v0.0.6-0.20210308142348-8f85683b2cef/go.mod h1:oBgthFLboAWi9feaBUFy7OxEcyn9vA1khHSL/WwWFyg= github.com/iden3/go-iden3-crypto v0.0.6-0.20201221160344-58e589b6eb4c h1:D2u8FFYey6iFXLsqqJZ8R7ch8gZum+/b98whvoSDbyg=
github.com/iden3/go-merkletree v0.0.0-20210308143313-8b63ca866189 h1:hoarWk/SwNwnMXE0kiskcZULW0XBLQIUGYK4C39ozfs= github.com/iden3/go-iden3-crypto v0.0.6-0.20201221160344-58e589b6eb4c/go.mod h1:oBgthFLboAWi9feaBUFy7OxEcyn9vA1khHSL/WwWFyg=
github.com/iden3/go-merkletree v0.0.0-20210308143313-8b63ca866189/go.mod h1:56abMeBKD4BIFe346rk+yuJ4MQgfMHe28sRx4o2gOpk= github.com/iden3/go-merkletree v0.0.0-20210119155851-bb53e6ad1a12 h1:DXWT0BLCSm7cJmTMQy7+iOlxkA1/5ADglufhLK52e10=
github.com/iden3/go-merkletree v0.0.0-20210119155851-bb53e6ad1a12/go.mod h1:FdUFTW2qJiwHyy5R70uErwq7Kaq1uskyFdTfodcUJqA=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
@@ -415,6 +415,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@@ -438,7 +441,6 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
@@ -539,27 +541,23 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=

View File

@@ -67,11 +67,6 @@ func Init(levelStr string, outputs []string) {
func sprintStackTrace(st []tracerr.Frame) string { func sprintStackTrace(st []tracerr.Frame) string {
builder := strings.Builder{} builder := strings.Builder{}
// Skip deepest frame because it belongs to the go runtime and we don't
// care about it.
if len(st) > 0 {
st = st[:len(st)-1]
}
for _, f := range st { for _, f := range st {
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func)) builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
} }

View File

@@ -2,9 +2,7 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@@ -63,57 +61,37 @@ type Node struct {
sync *synchronizer.Synchronizer sync *synchronizer.Synchronizer
// General // General
cfg *config.Node cfg *config.Node
mode Mode mode Mode
sqlConnRead *sqlx.DB sqlConn *sqlx.DB
sqlConnWrite *sqlx.DB ctx context.Context
ctx context.Context wg sync.WaitGroup
wg sync.WaitGroup cancel context.CancelFunc
cancel context.CancelFunc
} }
// NewNode creates a Node // NewNode creates a Node
func NewNode(mode Mode, cfg *config.Node) (*Node, error) { func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
meddler.Debug = cfg.Debug.MeddlerLogs meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection // Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB( db, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite, cfg.PostgreSQL.Port,
cfg.PostgreSQL.HostWrite, cfg.PostgreSQL.Host,
cfg.PostgreSQL.UserWrite, cfg.PostgreSQL.User,
cfg.PostgreSQL.PasswordWrite, cfg.PostgreSQL.Password,
cfg.PostgreSQL.NameWrite, cfg.PostgreSQL.Name,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err)) return nil, tracerr.Wrap(err)
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
} }
var apiConnCon *dbUtils.APIConnectionController var apiConnCon *dbUtils.APIConnectionController
if cfg.API.Explorer || mode == ModeCoordinator { if cfg.API.Explorer || mode == ModeCoordinator {
apiConnCon = dbUtils.NewAPIConnectionController( apiConnCon = dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections, cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration, cfg.API.SQLConnectionTimeout.Duration,
) )
} }
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon) historyDB := historydb.NewHistoryDB(db, apiConnCon)
ethClient, err := ethclient.Dial(cfg.Web3.URL) ethClient, err := ethclient.Dial(cfg.Web3.URL)
if err != nil { if err != nil {
@@ -124,8 +102,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
var keyStore *ethKeystore.KeyStore var keyStore *ethKeystore.KeyStore
if mode == ModeCoordinator { if mode == ModeCoordinator {
ethCfg = eth.EthereumConfig{ ethCfg = eth.EthereumConfig{
CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit, CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit,
GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv, GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv,
} }
scryptN := ethKeystore.StandardScryptN scryptN := ethKeystore.StandardScryptN
@@ -137,23 +115,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path, keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
scryptN, scryptP) scryptN, scryptP)
balance, err := ethClient.BalanceAt(context.TODO(), cfg.Coordinator.ForgerAddress, nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
minForgeBalance := cfg.Coordinator.MinimumForgeAddressBalance
if minForgeBalance != nil && balance.Cmp(minForgeBalance) == -1 {
return nil, tracerr.Wrap(fmt.Errorf(
"forger account balance is less than cfg.Coordinator.MinimumForgeAddressBalance: %v < %v",
balance, minForgeBalance))
}
log.Infow("forger ethereum account balance",
"addr", cfg.Coordinator.ForgerAddress,
"balance", balance,
"minForgeBalance", minForgeBalance,
)
// Unlock Coordinator ForgerAddr in the keystore to make calls // Unlock Coordinator ForgerAddr in the keystore to make calls
// to ForgeBatch in the smart contract // to ForgeBatch in the smart contract
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) { if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
@@ -210,29 +171,13 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(fmt.Errorf("cfg.StateDB.Keep = %v < %v, which is unsafe", return nil, tracerr.Wrap(fmt.Errorf("cfg.StateDB.Keep = %v < %v, which is unsafe",
cfg.StateDB.Keep, safeStateDBKeep)) cfg.StateDB.Keep, safeStateDBKeep))
} }
stateDB, err := statedb.NewStateDB(statedb.Config{ stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, cfg.StateDB.Keep,
Path: cfg.StateDB.Path, statedb.TypeSynchronizer, 32)
Keep: cfg.StateDB.Keep,
Type: statedb.TypeSynchronizer,
NLevels: statedb.MaxNLevels,
})
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
var l2DB *l2db.L2DB sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB, synchronizer.Config{
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
dbRead, dbWrite,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
)
}
sync, err := synchronizer.NewSynchronizer(client, historyDB, l2DB, stateDB, synchronizer.Config{
StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration, StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration,
ChainID: chainIDU16, ChainID: chainIDU16,
}) })
@@ -248,7 +193,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
var l2DB *l2db.L2DB
if mode == ModeCoordinator { if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
db,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
)
// Unlock FeeAccount EthAddr in the keystore to generate the // Unlock FeeAccount EthAddr in the keystore to generate the
// account creation authorization // account creation authorization
if !keyStore.HasAddress(cfg.Coordinator.FeeAccount.Address) { if !keyStore.HasAddress(cfg.Coordinator.FeeAccount.Address) {
@@ -270,15 +224,14 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err := auth.Sign(func(msg []byte) ([]byte, error) { if err := auth.Sign(func(msg []byte) ([]byte, error) {
return keyStore.SignHash(feeAccount, msg) return keyStore.SignHash(feeAccount, msg)
}, chainIDU16, cfg.SmartContracts.Rollup); err != nil { }, chainIDU16, cfg.SmartContracts.Rollup); err != nil {
return nil, tracerr.Wrap(err) return nil, err
} }
coordAccount := &txselector.CoordAccount{ coordAccount := &txselector.CoordAccount{
Addr: cfg.Coordinator.FeeAccount.Address, Addr: cfg.Coordinator.FeeAccount.Address,
BJJ: cfg.Coordinator.FeeAccount.BJJ, BJJ: cfg.Coordinator.FeeAccount.BJJ,
AccountCreationAuth: auth.Signature, AccountCreationAuth: auth.Signature,
} }
txSelector, err := txselector.NewTxSelector(coordAccount, txSelector, err := txselector.NewTxSelector(coordAccount, cfg.Coordinator.TxSelector.Path, stateDB, l2DB)
cfg.Coordinator.TxSelector.Path, stateDB, l2DB)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -287,6 +240,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if err != nil {
return nil, tracerr.Wrap(err)
}
serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs)) serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs))
for i, serverProofCfg := range cfg.Coordinator.ServerProofs { for i, serverProofCfg := range cfg.Coordinator.ServerProofs {
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL, serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
@@ -300,37 +256,14 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator, MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
MaxL1Tx: common.RollupConstMaxL1Tx, MaxL1Tx: common.RollupConstMaxL1Tx,
} }
var verifierIdx int verifierIdx, err := scConsts.Rollup.FindVerifierIdx(
if cfg.Coordinator.Debug.RollupVerifierIndex == nil { cfg.Coordinator.Circuit.MaxTx,
verifierIdx, err = scConsts.Rollup.FindVerifierIdx( cfg.Coordinator.Circuit.NLevels,
cfg.Coordinator.Circuit.MaxTx, )
cfg.Coordinator.Circuit.NLevels, if err != nil {
) return nil, tracerr.Wrap(err)
if err != nil {
return nil, tracerr.Wrap(err)
}
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
} else {
verifierIdx = *cfg.Coordinator.Debug.RollupVerifierIndex
log.Infow("Using debug verifier index from config", "verifierIdx", verifierIdx)
if verifierIdx >= len(scConsts.Rollup.Verifiers) {
return nil, tracerr.Wrap(
fmt.Errorf("verifierIdx (%v) >= "+
"len(scConsts.Rollup.Verifiers) (%v)",
verifierIdx, len(scConsts.Rollup.Verifiers)))
}
verifier := scConsts.Rollup.Verifiers[verifierIdx]
if verifier.MaxTx != cfg.Coordinator.Circuit.MaxTx ||
verifier.NLevels != cfg.Coordinator.Circuit.NLevels {
return nil, tracerr.Wrap(
fmt.Errorf("Circuit config and verifier params don't match. "+
"circuit.MaxTx = %v, circuit.NLevels = %v, "+
"verifier.MaxTx = %v, verifier.NLevels = %v",
cfg.Coordinator.Circuit.MaxTx, cfg.Coordinator.Circuit.NLevels,
verifier.MaxTx, verifier.NLevels,
))
}
} }
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
coord, err = coordinator.NewCoordinator( coord, err = coordinator.NewCoordinator(
coordinator.Config{ coordinator.Config{
@@ -338,16 +271,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks, ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc, L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration, ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration, SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts, EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration, TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath, DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
@@ -356,7 +282,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay, PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay, InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
}, },
ForgeBatchGasCost: cfg.Coordinator.EthClient.ForgeBatchGasCost,
VerifierIdx: uint8(verifierIdx), VerifierIdx: uint8(verifierIdx),
TxProcessorConfig: txProcessorCfg, TxProcessorConfig: txProcessorCfg,
}, },
@@ -379,11 +304,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
var nodeAPI *NodeAPI var nodeAPI *NodeAPI
if cfg.API.Address != "" { if cfg.API.Address != "" {
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
if cfg.API.UpdateMetricsInterval.Duration == 0 { if cfg.API.UpdateMetricsInterval.Duration == 0 {
return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v", return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
cfg.API.UpdateMetricsInterval.Duration)) cfg.API.UpdateMetricsInterval.Duration))
@@ -412,7 +332,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ChainID: chainIDU16, ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup, HermezAddress: cfg.SmartContracts.Rollup,
}, },
cfg.Coordinator.ForgeDelay.Duration,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -439,8 +358,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
sync: sync, sync: sync,
cfg: cfg, cfg: cfg,
mode: mode, mode: mode,
sqlConnRead: dbRead, sqlConn: db,
sqlConnWrite: dbWrite,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
}, nil }, nil
@@ -468,7 +386,6 @@ func NewNodeAPI(
sdb *statedb.StateDB, sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *api.Config, config *api.Config,
forgeDelay time.Duration,
) (*NodeAPI, error) { ) (*NodeAPI, error) {
engine := gin.Default() engine := gin.Default()
engine.NoRoute(handleNoRoute) engine.NoRoute(handleNoRoute)
@@ -477,11 +394,9 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints, coordinatorEndpoints, explorerEndpoints,
engine, engine,
hdb, hdb,
sdb,
l2db, l2db,
config, config,
&api.NodeConfig{
ForgeDelay: forgeDelay.Seconds(),
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -493,24 +408,20 @@ func NewNodeAPI(
}, nil }, nil
} }
// Run starts the http server of the NodeAPI. To stop it, pass a context // Run starts the http server of the NodeAPI. To stop it, pass a context with
// with cancellation. // cancelation.
func (a *NodeAPI) Run(ctx context.Context) error { func (a *NodeAPI) Run(ctx context.Context) error {
server := &http.Server{ server := &http.Server{
Addr: a.addr,
Handler: a.engine, Handler: a.engine,
// TODO: Figure out best parameters for production // TODO: Figure out best parameters for production
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", a.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("NodeAPI is ready at %v", a.addr)
go func() { go func() {
if err := server.Serve(listener); err != nil && log.Infof("NodeAPI is ready at %v", a.addr)
tracerr.Unwrap(err) != http.ErrServerClosed { if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()
@@ -526,8 +437,8 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil return nil
} }
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr,
vars synchronizer.SCVariablesPtr, batches []common.BatchData) { batches []common.BatchData) {
if n.mode == ModeCoordinator { if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{ n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats, Stats: *stats,
@@ -549,21 +460,16 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats,
if stats.Synced() { if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo( if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum), common.BatchNum(stats.Eth.LastBatch),
stats.Sync.Auction.CurrentSlot.SlotNum, stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil { ); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err) log.Errorw("API.UpdateNetworkInfo", "err", err)
} }
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
} }
} }
} }
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
vars synchronizer.SCVariablesPtr) {
if n.mode == ModeCoordinator { if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{ n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats, Stats: *stats,
@@ -583,9 +489,8 @@ func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we // TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
// don't have to pass it around. // don't have to pass it around.
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
time.Duration, error) { blockData, discarded, err := n.sync.Sync2(ctx, lastBlock)
blockData, discarded, err := n.sync.Sync(ctx, lastBlock)
stats := n.sync.Stats() stats := n.sync.Stats()
if err != nil { if err != nil {
// case: error // case: error
@@ -641,13 +546,7 @@ func (n *Node) StartSynchronizer() {
if n.ctx.Err() != nil { if n.ctx.Err() != nil {
continue continue
} }
if errors.Is(err, eth.ErrBlockHashMismatchEvent) { log.Errorw("Synchronizer.Sync", "err", err)
log.Warnw("Synchronizer.Sync", "err", err)
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
log.Warnw("Synchronizer.Sync", "err", err)
} else {
log.Errorw("Synchronizer.Sync", "err", err)
}
} }
} }
} }
@@ -708,10 +607,6 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err)
}
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
@@ -728,10 +623,6 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err)
}
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():

View File

@@ -4,12 +4,9 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"strings"
"time" "time"
"github.com/dghubble/sling" "github.com/dghubble/sling"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
@@ -26,16 +23,12 @@ type APIType string
const ( const (
// APITypeBitFinexV2 is the http API used by bitfinex V2 // APITypeBitFinexV2 is the http API used by bitfinex V2
APITypeBitFinexV2 APIType = "bitfinexV2" APITypeBitFinexV2 APIType = "bitfinexV2"
// APITypeCoingeckoV3 is the http API used by copingecko V3
APITypeCoingeckoV3 APIType = "coingeckoV3"
) )
func (t *APIType) valid() bool { func (t *APIType) valid() bool {
switch *t { switch *t {
case APITypeBitFinexV2: case APITypeBitFinexV2:
return true return true
case APITypeCoingeckoV3:
return true
default: default:
return false return false
} }
@@ -43,23 +36,23 @@ func (t *APIType) valid() bool {
// PriceUpdater definition // PriceUpdater definition
type PriceUpdater struct { type PriceUpdater struct {
db *historydb.HistoryDB db *historydb.HistoryDB
apiURL string apiURL string
apiType APIType apiType APIType
tokens []historydb.TokenSymbolAndAddr tokenSymbols []string
} }
// NewPriceUpdater is the constructor for the updater // NewPriceUpdater is the constructor for the updater
func NewPriceUpdater(apiURL string, apiType APIType, db *historydb.HistoryDB) (*PriceUpdater, func NewPriceUpdater(apiURL string, apiType APIType, db *historydb.HistoryDB) (*PriceUpdater, error) {
error) { tokenSymbols := []string{}
if !apiType.valid() { if !apiType.valid() {
return nil, tracerr.Wrap(fmt.Errorf("Invalid apiType: %v", apiType)) return nil, tracerr.Wrap(fmt.Errorf("Invalid apiType: %v", apiType))
} }
return &PriceUpdater{ return &PriceUpdater{
db: db, db: db,
apiURL: apiURL, apiURL: apiURL,
apiType: apiType, apiType: apiType,
tokens: []historydb.TokenSymbolAndAddr{}, tokenSymbols: tokenSymbols,
}, nil }, nil
} }
@@ -80,39 +73,7 @@ func getTokenPriceBitfinex(ctx context.Context, client *sling.Sling,
return state[6], nil return state[6], nil
} }
func getTokenPriceCoingecko(ctx context.Context, client *sling.Sling, // UpdatePrices is triggered by the Coordinator, and internally will update the token prices in the db
tokenAddr ethCommon.Address) (float64, error) {
responseObject := make(map[string]map[string]float64)
var url string
var id string
if tokenAddr == common.EmptyAddr { // Special case for Ether
url = "simple/price?ids=ethereum&vs_currencies=usd"
id = "ethereum"
} else { // Common case (ERC20)
id = strings.ToLower(tokenAddr.String())
url = "simple/token_price/ethereum?contract_addresses=" +
id + "&vs_currencies=usd"
}
req, err := client.New().Get(url).Request()
if err != nil {
return 0, tracerr.Wrap(err)
}
res, err := client.Do(req.WithContext(ctx), &responseObject, nil)
if err != nil {
return 0, tracerr.Wrap(err)
}
if res.StatusCode != http.StatusOK {
return 0, tracerr.Wrap(fmt.Errorf("http response is not is %v", res.StatusCode))
}
price := responseObject[id]["usd"]
if price <= 0 {
return 0, tracerr.Wrap(fmt.Errorf("price not found for %v", id))
}
return price, nil
}
// UpdatePrices is triggered by the Coordinator, and internally will update the
// token prices in the db
func (p *PriceUpdater) UpdatePrices(ctx context.Context) { func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
tr := &http.Transport{ tr := &http.Transport{
MaxIdleConns: defaultMaxIdleConns, MaxIdleConns: defaultMaxIdleConns,
@@ -122,35 +83,33 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
httpClient := &http.Client{Transport: tr} httpClient := &http.Client{Transport: tr}
client := sling.New().Base(p.apiURL).Client(httpClient) client := sling.New().Base(p.apiURL).Client(httpClient)
for _, token := range p.tokens { for _, tokenSymbol := range p.tokenSymbols {
var tokenPrice float64 var tokenPrice float64
var err error var err error
switch p.apiType { switch p.apiType {
case APITypeBitFinexV2: case APITypeBitFinexV2:
tokenPrice, err = getTokenPriceBitfinex(ctx, client, token.Symbol) tokenPrice, err = getTokenPriceBitfinex(ctx, client, tokenSymbol)
case APITypeCoingeckoV3:
tokenPrice, err = getTokenPriceCoingecko(ctx, client, token.Addr)
} }
if ctx.Err() != nil { if ctx.Err() != nil {
return return
} }
if err != nil { if err != nil {
log.Warnw("token price not updated (get error)", log.Warnw("token price not updated (get error)",
"err", err, "token", token.Symbol, "apiType", p.apiType) "err", err, "token", tokenSymbol, "apiType", p.apiType)
} }
if err = p.db.UpdateTokenValue(token.Symbol, tokenPrice); err != nil { if err = p.db.UpdateTokenValue(tokenSymbol, tokenPrice); err != nil {
log.Errorw("token price not updated (db error)", log.Errorw("token price not updated (db error)",
"err", err, "token", token.Symbol, "apiType", p.apiType) "err", err, "token", tokenSymbol, "apiType", p.apiType)
} }
} }
} }
// UpdateTokenList get the registered token symbols from HistoryDB // UpdateTokenList get the registered token symbols from HistoryDB
func (p *PriceUpdater) UpdateTokenList() error { func (p *PriceUpdater) UpdateTokenList() error {
tokens, err := p.db.GetTokenSymbolsAndAddrs() tokenSymbols, err := p.db.GetTokenSymbols()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
p.tokens = tokens p.tokenSymbols = tokenSymbols
return nil return nil
} }

View File

@@ -2,6 +2,7 @@ package priceupdater
import ( import (
"context" "context"
"math/big"
"os" "os"
"testing" "testing"
@@ -14,45 +15,29 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var historyDB *historydb.HistoryDB func TestPriceUpdater(t *testing.T) {
func TestMain(m *testing.M) {
// Init DB // Init DB
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
if err != nil { assert.NoError(t, err)
panic(err) historyDB := historydb.NewHistoryDB(db, nil)
}
historyDB = historydb.NewHistoryDB(db, db, nil)
// Clean DB // Clean DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
// Populate DB // Populate DB
// Gen blocks and add them to DB // Gen blocks and add them to DB
blocks := test.GenBlocks(1, 2) blocks := test.GenBlocks(1, 2)
err = historyDB.AddBlocks(blocks) assert.NoError(t, historyDB.AddBlocks(blocks))
if err != nil {
panic(err)
}
// Gen tokens and add them to DB // Gen tokens and add them to DB
tokens := []common.Token{} tokens := []common.Token{}
tokens = append(tokens, common.Token{ tokens = append(tokens, common.Token{
TokenID: 1, TokenID: 1,
EthBlockNum: blocks[0].Num, EthBlockNum: blocks[0].Num,
EthAddr: ethCommon.HexToAddress("0x6b175474e89094c44da98b954eedeac495271d0f"), EthAddr: ethCommon.BigToAddress(big.NewInt(2)),
Name: "DAI", Name: "DAI",
Symbol: "DAI", Symbol: "DAI",
Decimals: 18, Decimals: 18,
}) })
err = historyDB.AddTokens(tokens) assert.NoError(t, historyDB.AddTokens(tokens))
if err != nil {
panic(err)
}
result := m.Run()
os.Exit(result)
}
func TestPriceUpdaterBitfinex(t *testing.T) {
// Init price updater // Init price updater
pu, err := NewPriceUpdater("https://api-pub.bitfinex.com/v2/", APITypeBitFinexV2, historyDB) pu, err := NewPriceUpdater("https://api-pub.bitfinex.com/v2/", APITypeBitFinexV2, historyDB)
require.NoError(t, err) require.NoError(t, err)
@@ -60,29 +45,13 @@ func TestPriceUpdaterBitfinex(t *testing.T) {
assert.NoError(t, pu.UpdateTokenList()) assert.NoError(t, pu.UpdateTokenList())
// Update prices // Update prices
pu.UpdatePrices(context.Background()) pu.UpdatePrices(context.Background())
assertTokenHasPriceAndClean(t)
}
func TestPriceUpdaterCoingecko(t *testing.T) {
// Init price updater
pu, err := NewPriceUpdater("https://api.coingecko.com/api/v3/", APITypeCoingeckoV3, historyDB)
require.NoError(t, err)
// Update token list
assert.NoError(t, pu.UpdateTokenList())
// Update prices
pu.UpdatePrices(context.Background())
assertTokenHasPriceAndClean(t)
}
func assertTokenHasPriceAndClean(t *testing.T) {
// Check that prices have been updated // Check that prices have been updated
fetchedTokens, err := historyDB.GetTokensTest() fetchedTokens, err := historyDB.GetTokensTest()
require.NoError(t, err) require.NoError(t, err)
// TokenID 0 (ETH) is always on the DB // TokenID 0 (ETH) is always on the DB
assert.Equal(t, 2, len(fetchedTokens)) assert.Equal(t, 2, len(fetchedTokens))
for _, token := range fetchedTokens { for _, token := range fetchedTokens {
require.NotNil(t, token.USD) assert.NotNil(t, token.USD)
require.NotNil(t, token.USDUpdate) assert.NotNil(t, token.USDUpdate)
assert.Greater(t, *token.USD, 0.0)
} }
} }

View File

@@ -235,7 +235,7 @@ func (p *ProofServerClient) CalculateProof(ctx context.Context, zkInputs *common
return tracerr.Wrap(p.apiInput(ctx, zkInputs)) return tracerr.Wrap(p.apiInput(ctx, zkInputs))
} }
// GetProof retrieves the Proof and Public Data (public inputs) from the // GetProof retreives the Proof and Public Data (public inputs) from the
// ServerProof, blocking until the proof is ready. // ServerProof, blocking until the proof is ready.
func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) { func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
if err := p.WaitReady(ctx); err != nil { if err := p.WaitReady(ctx); err != nil {
@@ -256,8 +256,7 @@ func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, e
} }
return &proof, pubInputs, nil return &proof, pubInputs, nil
} }
return nil, nil, tracerr.Wrap(fmt.Errorf("status != %v, status = %v", StatusCodeSuccess, return nil, nil, tracerr.Wrap(fmt.Errorf("status != %v, status = %v", StatusCodeSuccess, status.Status))
status.Status))
} }
// Cancel cancels any current proof computation // Cancel cancels any current proof computation
@@ -298,7 +297,7 @@ func (p *MockClient) CalculateProof(ctx context.Context, zkInputs *common.ZKInpu
return nil return nil
} }
// GetProof retrieves the Proof from the ServerProof // GetProof retreives the Proof from the ServerProof
func (p *MockClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) { func (p *MockClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
// Simulate a delay // Simulate a delay
select { select {

View File

@@ -1,44 +0,0 @@
package synchronizer
import "github.com/prometheus/client_golang/prometheus"
var (
metricReorgsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "sync_reorgs",
Help: "",
},
)
metricSyncedLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_block_num",
Help: "",
},
)
metricEthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_block_num",
Help: "",
},
)
metricSyncedLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_batch_num",
Help: "",
},
)
metricEthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_batch_num",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricReorgsCount)
prometheus.MustRegister(metricSyncedLastBlockNum)
prometheus.MustRegister(metricEthLastBlockNum)
prometheus.MustRegister(metricSyncedLastBatchNum)
prometheus.MustRegister(metricEthLastBatchNum)
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
@@ -19,32 +18,19 @@ import (
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
const ( // Stats of the syncrhonizer
// errStrUnknownBlock is the string returned by geth when querying an
// unknown block
errStrUnknownBlock = "unknown block"
)
var (
// ErrUnknownBlock is the error returned by the Synchronizer when a
// block is queried by hash but the ethereum node doesn't find it due
// to it being discarded from a reorg.
ErrUnknownBlock = fmt.Errorf("unknown block")
)
// Stats of the synchronizer
type Stats struct { type Stats struct {
Eth struct { Eth struct {
RefreshPeriod time.Duration RefreshPeriod time.Duration
Updated time.Time Updated time.Time
FirstBlockNum int64 FirstBlockNum int64
LastBlock common.Block LastBlock common.Block
LastBatchNum int64 LastBatch int64
} }
Sync struct { Sync struct {
Updated time.Time Updated time.Time
LastBlock common.Block LastBlock common.Block
LastBatch common.Batch LastBatch int64
// LastL1BatchBlock is the last ethereum block in which an // LastL1BatchBlock is the last ethereum block in which an
// l1Batch was forged // l1Batch was forged
LastL1BatchBlock int64 LastL1BatchBlock int64
@@ -91,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
} }
// UpdateSync updates the synchronizer stats // UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch, func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) { lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
now := time.Now() now := time.Now()
s.rw.Lock() s.rw.Lock()
s.Sync.LastBlock = *lastBlock s.Sync.LastBlock = *lastBlock
if lastBatch != nil { if lastBatch != nil {
s.Sync.LastBatch = *lastBatch s.Sync.LastBatch = int64(*lastBatch)
} }
if lastL1BatchBlock != nil { if lastL1BatchBlock != nil {
s.Sync.LastL1BatchBlock = *lastL1BatchBlock s.Sync.LastL1BatchBlock = *lastL1BatchBlock
@@ -119,16 +105,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1) lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) return tracerr.Wrap(err)
} }
lastBatchNum, err := ethClient.RollupLastForgedBatch() lastBatch, err := ethClient.RollupLastForgedBatch()
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err)) return tracerr.Wrap(err)
} }
s.rw.Lock() s.rw.Lock()
s.Eth.Updated = now s.Eth.Updated = now
s.Eth.LastBlock = *lastBlock s.Eth.LastBlock = *lastBlock
s.Eth.LastBatchNum = lastBatchNum s.Eth.LastBatch = lastBatch
s.rw.Unlock() s.rw.Unlock()
return nil return nil
} }
@@ -153,10 +139,6 @@ func (s *StatsHolder) CopyStats() *Stats {
sCopy.Sync.Auction.NextSlot.DefaultSlotBid = sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid) common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
} }
if s.Sync.LastBatch.StateRoot != nil {
sCopy.Sync.LastBatch.StateRoot =
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
}
s.rw.RUnlock() s.rw.RUnlock()
return &sCopy return &sCopy
} }
@@ -170,9 +152,9 @@ func (s *StatsHolder) blocksPerc() float64 {
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1)) float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
} }
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 { func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
return float64(batchNum) * 100.0 / return float64(batchNum) * 100.0 /
float64(s.Eth.LastBatchNum) float64(s.Eth.LastBatch)
} }
// StartBlockNums sets the first block used to start tracking the smart // StartBlockNums sets the first block used to start tracking the smart
@@ -213,22 +195,20 @@ type Config struct {
// Synchronizer implements the Synchronizer type // Synchronizer implements the Synchronizer type
type Synchronizer struct { type Synchronizer struct {
ethClient eth.ClientInterface ethClient eth.ClientInterface
consts SCConsts consts SCConsts
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
l2DB *l2db.L2DB stateDB *statedb.StateDB
stateDB *statedb.StateDB cfg Config
cfg Config initVars SCVariables
initVars SCVariables startBlockNum int64
startBlockNum int64 vars SCVariables
vars SCVariables stats *StatsHolder
stats *StatsHolder
resetStateFailed bool
} }
// NewSynchronizer creates a new Synchronizer // NewSynchronizer creates a new Synchronizer
func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.HistoryDB, func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, stateDB *statedb.StateDB, cfg Config) (*Synchronizer, error) { stateDB *statedb.StateDB, cfg Config) (*Synchronizer, error) {
auctionConstants, err := ethClient.AuctionConstants() auctionConstants, err := ethClient.AuctionConstants()
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.AuctionConstants(): %w", return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.AuctionConstants(): %w",
@@ -273,7 +253,6 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
ethClient: ethClient, ethClient: ethClient,
consts: consts, consts: consts,
historyDB: historyDB, historyDB: historyDB,
l2DB: l2DB,
stateDB: stateDB, stateDB: stateDB,
cfg: cfg, cfg: cfg,
initVars: *initVars, initVars: *initVars,
@@ -349,25 +328,23 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil return nil
} }
// updateCurrentSlot updates the slot with information of the current slot. // firstBatchBlockNum is the blockNum of first batch in that block, if any
// The information about which coordinator is allowed to forge is only updated func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
// when we are Synced. slot := common.Slot{
// hasBatch is true when the last synced block contained at least one batch. SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error { ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
}
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset { if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum) dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
hasBatch = false firstBatchBlockNum = nil
} else { } else {
hasBatch = true firstBatchBlockNum = &dbFirstBatchBlockNum
firstBatchBlockNum = dbFirstBatchBlockNum
} }
slot.ForgerCommitment = false slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum { } else if slotNum > slot.SlotNum {
@@ -378,11 +355,11 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil { if err := s.setSlotCoordinator(&slot); err != nil {
return tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if hasBatch && if firstBatchBlockNum != nil &&
s.consts.Auction.RelativeBlock(firstBatchBlockNum) < s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) { int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true slot.ForgerCommitment = true
} }
@@ -391,61 +368,57 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err)) return nil, tracerr.Wrap(err)
} }
if !canForge { if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return nil return &slot, nil
} }
// updateNextSlot updates the slot with information of the next slot. func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
// The information about which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1 slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot.SlotNum = slotNum slot := common.Slot{
slot.ForgerCommitment = false SlotNum: slotNum,
ForgerCommitment: false,
}
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil { if err := s.setSlotCoordinator(&slot); err != nil {
return tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// TODO: Remove this SANITY CHECK once this code is tested enough // TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err)) return nil, tracerr.Wrap(err)
} }
if !canForge { if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return nil return &slot, nil
} }
// updateCurrentNextSlotIfSync updates the current and next slot. Information func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
// about forger address that is allowed to forge is only updated if we are current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
// Synced. if err != nil {
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if err := s.updateNextSlot(&next); err != nil { next, err := s.getNextSlot()
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
s.stats.UpdateCurrentNextSlot(&current, &next) s.stats.UpdateCurrentNextSlot(current, next)
return nil return nil
} }
@@ -472,10 +445,8 @@ func (s *Synchronizer) init() error {
lastBlock = lastSavedBlock lastBlock = lastSavedBlock
} }
if err := s.resetState(lastBlock); err != nil { if err := s.resetState(lastBlock); err != nil {
s.resetStateFailed = true
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
s.resetStateFailed = false
log.Infow("Sync init block", log.Infow("Sync init block",
"syncLastBlock", s.stats.Sync.LastBlock, "syncLastBlock", s.stats.Sync.LastBlock,
@@ -484,44 +455,23 @@ func (s *Synchronizer) init() error {
"ethLastBlock", s.stats.Eth.LastBlock, "ethLastBlock", s.stats.Eth.LastBlock,
) )
log.Infow("Sync init batch", log.Infow("Sync init batch",
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum, "syncLastBatch", s.stats.Sync.LastBatch,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatch,
) )
return nil return nil
} }
func (s *Synchronizer) resetIntermediateState() error { // Sync2 attems to synchronize an ethereum block starting from lastSavedBlock.
lastBlock, err := s.historyDB.GetLastBlock()
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBlock = &common.Block{}
} else if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBlock: %w", err))
}
if err := s.resetState(lastBlock); err != nil {
s.resetStateFailed = true
return tracerr.Wrap(fmt.Errorf("resetState at block %v: %w", lastBlock.Num, err))
}
s.resetStateFailed = false
return nil
}
// Sync attempts to synchronize an ethereum block starting from lastSavedBlock.
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB. // If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
// If a block is synced, it will be returned and also stored in the DB. If a // If a block is synched, it will be returned and also stored in the DB. If a
// reorg is detected, the number of discarded blocks will be returned and no // reorg is detected, the number of discarded blocks will be returned and no
// synchronization will be made. // synchronization will be made.
// TODO: Be smart about locking: only lock during the read/write operations // TODO: Be smart about locking: only lock during the read/write operations
func (s *Synchronizer) Sync(ctx context.Context, func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block) (*common.BlockData, *int64, error) {
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
if s.resetStateFailed {
if err := s.resetIntermediateState(); err != nil {
return nil, nil, tracerr.Wrap(err)
}
}
var nextBlockNum int64 // next block number to sync var nextBlockNum int64 // next block number to sync
if lastSavedBlock == nil { if lastSavedBlock == nil {
var err error
// Get lastSavedBlock from History DB // Get lastSavedBlock from History DB
lastSavedBlock, err = s.historyDB.GetLastBlock() lastSavedBlock, err = s.historyDB.GetLastBlock()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
@@ -547,7 +497,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
if tracerr.Unwrap(err) == ethereum.NotFound { if tracerr.Unwrap(err) == ethereum.NotFound {
return nil, nil, nil return nil, nil, nil
} else if err != nil { } else if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
@@ -561,7 +511,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
"ethLastBlock", s.stats.Eth.LastBlock, "ethLastBlock", s.stats.Eth.LastBlock,
) )
// Check that the obtained ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg! // Check that the obtianed ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
if lastSavedBlock != nil { if lastSavedBlock != nil {
if lastSavedBlock.Hash != ethBlock.ParentHash { if lastSavedBlock.Hash != ethBlock.ParentHash {
// Reorg detected // Reorg detected
@@ -577,20 +527,6 @@ func (s *Synchronizer) Sync(ctx context.Context,
} }
} }
defer func() {
// If there was an error during sync, reset to the last block
// in the historyDB because the historyDB is written last in
// the Sync method and is the source of consistency. This
// allows resetting the stateDB in the case a batch was
// processed but the historyDB block was not committed due to an
// error.
if err != nil {
if err2 := s.resetIntermediateState(); err2 != nil {
log.Errorw("sync revert", "err", err2)
}
}
}()
// Get data from the rollup contract // Get data from the rollup contract
rollupData, err := s.rollupSync(ethBlock) rollupData, err := s.rollupSync(ethBlock)
if err != nil { if err != nil {
@@ -628,14 +564,14 @@ func (s *Synchronizer) Sync(ctx context.Context,
} }
// Group all the block data into the structs to save into HistoryDB // Group all the block data into the structs to save into HistoryDB
blockData = &common.BlockData{ blockData := common.BlockData{
Block: *ethBlock, Block: *ethBlock,
Rollup: *rollupData, Rollup: *rollupData,
Auction: *auctionData, Auction: *auctionData,
WDelayer: *wDelayerData, WDelayer: *wDelayerData,
} }
err = s.historyDB.AddBlockSCData(blockData) err = s.historyDB.AddBlockSCData(&blockData)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -653,35 +589,31 @@ func (s *Synchronizer) Sync(ctx context.Context,
} }
} }
s.stats.UpdateSync(ethBlock, s.stats.UpdateSync(ethBlock,
&rollupData.Batches[batchesLen-1].Batch, &rollupData.Batches[batchesLen-1].Batch.BatchNum,
lastL1BatchBlock, lastForgeL1TxsNum) lastL1BatchBlock, lastForgeL1TxsNum)
} }
hasBatch := false var firstBatchBlockNum *int64
if len(rollupData.Batches) > 0 { if len(rollupData.Batches) > 0 {
hasBatch = true firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
} }
if err = s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil { if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
for _, batchData := range rollupData.Batches {
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum,
)
}
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
log.Debugw("Synced block", log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num, "syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(), "syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlockNum", s.stats.Eth.LastBlock.Num, "ethLastBlockNum", s.stats.Eth.LastBlock.Num,
) )
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
"ethLastBatch", s.stats.Eth.LastBatch,
)
}
return blockData, nil, nil return &blockData, nil, nil
} }
// reorg manages a reorg, updating History and State DB as needed. Keeps // reorg manages a reorg, updating History and State DB as needed. Keeps
@@ -713,15 +645,14 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1) log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1)
// Set History DB and State DB to the correct state // Set History DB and State DB to the correct state
if err := s.historyDB.Reorg(block.Num); err != nil { err := s.historyDB.Reorg(block.Num)
if err != nil {
return 0, tracerr.Wrap(err) return 0, tracerr.Wrap(err)
} }
if err := s.resetState(block); err != nil { if err := s.resetState(block); err != nil {
s.resetStateFailed = true
return 0, tracerr.Wrap(err) return 0, tracerr.Wrap(err)
} }
s.resetStateFailed = false
return block.Num, nil return block.Num, nil
} }
@@ -730,15 +661,15 @@ func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) { consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit() auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit() wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
@@ -783,17 +714,12 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer s.vars.WDelayer = *wDelayer
} }
batch, err := s.historyDB.GetLastBatch() batchNum, err := s.historyDB.GetLastBatchNum()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
} }
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
batch = &common.Batch{} batchNum = 0
}
err = s.stateDB.Reset(batch.BatchNum)
if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
} }
lastL1BatchBlockNum, err := s.historyDB.GetLastL1BatchBlockNum() lastL1BatchBlockNum, err := s.historyDB.GetLastL1BatchBlockNum()
@@ -813,15 +739,20 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n lastForgeL1TxsNum = &n
} }
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum) err = s.stateDB.Reset(batchNum)
if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
}
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil { s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
} }
// rollupSync retrieves all the Rollup Smart Contract Data that happened at // rollupSync retreives all the Rollup Smart Contract Data that happened at
// ethBlock.blockNum with ethBlock.Hash. // ethBlock.blockNum with ethBlock.Hash.
func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, error) { func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, error) {
blockNum := ethBlock.Num blockNum := ethBlock.Num
@@ -830,16 +761,19 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// Get rollup events in the block, and make sure the block hash matches // Get rollup events in the block, and make sure the block hash matches
// the expected one. // the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash) rollupEvents, blockHash, err := s.ethClient.RollupEventsByBlock(blockNum)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock) return nil, tracerr.Wrap(err)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if rollupEvents == nil { if blockHash == nil {
return &rollupData, nil return &rollupData, nil
} }
if *blockHash != ethBlock.Hash {
log.Errorw("Block hash mismatch in Rollup events", "expected", ethBlock.Hash.String(),
"got", blockHash.String())
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
}
var nextForgeL1TxsNum int64 // forgeL1TxsNum for the next L1Batch var nextForgeL1TxsNum int64 // forgeL1TxsNum for the next L1Batch
nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum() nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum()
@@ -867,7 +801,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
forgeBatchArgs, sender, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash, forgeBatchArgs, sender, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash,
evtForgeBatch.L1UserTxsLen) evtForgeBatch.L1UserTxsLen)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupForgeBatchArgs: %w", err)) return nil, tracerr.Wrap(err)
} }
batchNum := common.BatchNum(evtForgeBatch.BatchNum) batchNum := common.BatchNum(evtForgeBatch.BatchNum)
@@ -898,9 +832,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
position = len(l1UserTxs) position = len(l1UserTxs)
} }
l1TxsAuth := make([]common.AccountCreationAuth,
0, len(forgeBatchArgs.L1CoordinatorTxsAuths))
// Get L1 Coordinator Txs // Get L1 Coordinator Txs
for i := range forgeBatchArgs.L1CoordinatorTxs { for i := range forgeBatchArgs.L1CoordinatorTxs {
l1CoordinatorTx := forgeBatchArgs.L1CoordinatorTxs[i] l1CoordinatorTx := forgeBatchArgs.L1CoordinatorTxs[i]
@@ -916,30 +847,9 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
batchData.L1CoordinatorTxs = append(batchData.L1CoordinatorTxs, *l1Tx) batchData.L1CoordinatorTxs = append(batchData.L1CoordinatorTxs, *l1Tx)
position++ position++
// Create a slice of account creation auth to be
// inserted later if not exists
if l1CoordinatorTx.FromEthAddr != common.RollupConstEthAddressInternalOnly {
l1CoordinatorTxAuth := forgeBatchArgs.L1CoordinatorTxsAuths[i]
l1TxsAuth = append(l1TxsAuth, common.AccountCreationAuth{
EthAddr: l1CoordinatorTx.FromEthAddr,
BJJ: l1CoordinatorTx.FromBJJ,
Signature: l1CoordinatorTxAuth,
})
}
// fmt.Println("DGB l1coordtx") // fmt.Println("DGB l1coordtx")
} }
// Insert the slice of account creation auth
// only if the node run as a coordinator
if s.l2DB != nil && len(l1TxsAuth) > 0 {
err = s.l2DB.AddManyAccountCreationAuth(l1TxsAuth)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Insert all the txs forged in this batch (l1UserTxs, // Insert all the txs forged in this batch (l1UserTxs,
// L1CoordinatorTxs, PoolL2Txs) into stateDB so that they are // L1CoordinatorTxs, PoolL2Txs) into stateDB so that they are
// processed. // processed.
@@ -952,8 +862,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
} }
// Transform L2 txs to PoolL2Txs // Transform L2 txs to PoolL2Txs
// NOTE: This is a big ugly, find a better way poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2TxsData) // NOTE: This is a big ugly, find a better way
poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2TxsData)
if int(forgeBatchArgs.VerifierIdx) >= len(s.consts.Rollup.Verifiers) { if int(forgeBatchArgs.VerifierIdx) >= len(s.consts.Rollup.Verifiers) {
return nil, tracerr.Wrap(fmt.Errorf("forgeBatchArgs.VerifierIdx (%v) >= "+ return nil, tracerr.Wrap(fmt.Errorf("forgeBatchArgs.VerifierIdx (%v) >= "+
@@ -975,16 +884,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if s.stateDB.CurrentBatch() != batchNum {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
"evtForgeBatch.BatchNum = (%v)",
s.stateDB.CurrentBatch(), batchNum))
}
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
"forgeBatchArgs.NewStRoot (%v)",
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
}
// Transform processed PoolL2 txs to L2 and store in BatchData // Transform processed PoolL2 txs to L2 and store in BatchData
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
@@ -1025,19 +924,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
} }
batchData.CreatedAccounts = processTxsOut.CreatedAccounts batchData.CreatedAccounts = processTxsOut.CreatedAccounts
batchData.UpdatedAccounts = make([]common.AccountUpdate, 0,
len(processTxsOut.UpdatedAccounts))
for _, acc := range processTxsOut.UpdatedAccounts {
batchData.UpdatedAccounts = append(batchData.UpdatedAccounts,
common.AccountUpdate{
EthBlockNum: blockNum,
BatchNum: batchNum,
Idx: acc.Idx,
Nonce: acc.Nonce,
Balance: acc.Balance,
})
}
slotNum := int64(0) slotNum := int64(0)
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum { if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) / slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
@@ -1180,16 +1066,19 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
var auctionData = common.NewAuctionData() var auctionData = common.NewAuctionData()
// Get auction events in the block // Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash) auctionEvents, blockHash, err := s.ethClient.AuctionEventsByBlock(blockNum)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock) return nil, tracerr.Wrap(err)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if auctionEvents == nil { if blockHash == nil {
return &auctionData, nil return &auctionData, nil
} }
if *blockHash != ethBlock.Hash {
log.Errorw("Block hash mismatch in Auction events", "expected", ethBlock.Hash.String(),
"got", blockHash.String())
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
}
// Get bids // Get bids
for _, evt := range auctionEvents.NewBid { for _, evt := range auctionEvents.NewBid {
@@ -1279,16 +1168,19 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
wDelayerData := common.NewWDelayerData() wDelayerData := common.NewWDelayerData()
// Get wDelayer events in the block // Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash) wDelayerEvents, blockHash, err := s.ethClient.WDelayerEventsByBlock(blockNum)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock) return nil, tracerr.Wrap(err)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if wDelayerEvents == nil { if blockHash == nil {
return &wDelayerData, nil return &wDelayerData, nil
} }
if *blockHash != ethBlock.Hash {
log.Errorw("Block hash mismatch in WDelayer events", "expected", ethBlock.Hash.String(),
"got", blockHash.String())
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
}
for _, evt := range wDelayerEvents.Deposit { for _, evt := range wDelayerEvents.Deposit {
wDelayerData.Deposits = append(wDelayerData.Deposits, common.WDelayerTransfer{ wDelayerData.Deposits = append(wDelayerData.Deposits, common.WDelayerTransfer{

View File

@@ -15,9 +15,9 @@ import (
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
@@ -43,8 +43,7 @@ func accountsCmp(accounts []common.Account) func(i, j int) bool {
// Check Sync output and HistoryDB state against expected values generated by // Check Sync output and HistoryDB state against expected values generated by
// til // til
func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBlock *common.BlockData) {
syncBlock *common.BlockData) {
// Check Blocks // Check Blocks
dbBlocks, err := s.historyDB.GetAllBlocks() dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err) require.NoError(t, err)
@@ -173,8 +172,6 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block,
*exit = syncBatch.ExitTree[j] *exit = syncBatch.ExitTree[j]
} }
assert.Equal(t, batch.Batch, syncBatch.Batch) assert.Equal(t, batch.Batch, syncBatch.Batch)
// Ignore updated accounts
syncBatch.UpdatedAccounts = nil
assert.Equal(t, batch, syncBatch) assert.Equal(t, batch, syncBatch)
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
@@ -304,36 +301,24 @@ func TestMain(m *testing.M) {
os.Exit(exitVal) os.Exit(exitVal)
} }
func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db.L2DB) { func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
// Int State DB // Int State DB
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
deleteme = append(deleteme, dir) deleteme = append(deleteme, dir)
stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, stateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
Type: statedb.TypeSynchronizer, NLevels: 32})
require.NoError(t, err) require.NoError(t, err)
// Init History DB // Init History DB
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
historyDB := historydb.NewHistoryDB(db, db, nil) historyDB := historydb.NewHistoryDB(db, nil)
// Clear DB // Clear DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
// Init L2 DB return stateDB, historyDB
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
return stateDB, historyDB, l2DB
}
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
} }
func TestSyncGeneral(t *testing.T) { func TestSyncGeneral(t *testing.T) {
@@ -341,7 +326,7 @@ func TestSyncGeneral(t *testing.T) {
// Setup // Setup
// //
stateDB, historyDB, l2DB := newTestModules(t) stateDB, historyDB := newTestModules(t)
// Init eth client // Init eth client
var timer timer var timer timer
@@ -351,9 +336,10 @@ func TestSyncGeneral(t *testing.T) {
client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup) client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
// Create Synchronizer // Create Synchronizer
s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{ s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
}) })
log.Error(err)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@@ -365,7 +351,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
// Test Sync for rollup genesis block // Test Sync for rollup genesis block
syncBlock, discards, err := s.Sync(ctx, nil) syncBlock, discards, err := s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -388,7 +374,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), dbBlocks[1].Num) assert.Equal(t, int64(1), dbBlocks[1].Num)
// Sync again and expect no new blocks // Sync again and expect no new blocks
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.Nil(t, syncBlock) require.Nil(t, syncBlock)
@@ -448,22 +434,12 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs)) require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("11432094872416618651837327395264042968926668786266585816625577088890451620254")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("16914212635847451457076355431350059348585556180740555407203882688922702410093")
// blocks 1 (blockNum=3) // blocks 1 (blockNum=3)
i = 1 i = 1
require.Equal(t, 3, int(blocks[i].Block.Num)) require.Equal(t, 3, int(blocks[i].Block.Num))
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs)) require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("13535760140937349829640752733057594576151546047374619177689224612061148090678")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("19413739476363469870744893742469056615496274423228302914851564791727474664804")
// Generate extra required data // Generate extra required data
ethAddTokens(blocks, client) ethAddTokens(blocks, client)
@@ -485,7 +461,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 2 // Block 2
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -502,7 +478,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 3 // Block 3
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
@@ -520,15 +496,13 @@ func TestSyncGeneral(t *testing.T) {
// Block 4 // Block 4
// Generate 2 withdraws manually // Generate 2 withdraws manually
_, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256, _, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256, big.NewInt(100), []*big.Int{}, true)
big.NewInt(100), []*big.Int{}, true)
require.NoError(t, err) require.NoError(t, err)
_, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258, _, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258, big.NewInt(50), []*big.Int{}, false)
big.NewInt(50), []*big.Int{}, false)
require.NoError(t, err) require.NoError(t, err)
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -579,7 +553,7 @@ func TestSyncGeneral(t *testing.T) {
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -640,12 +614,6 @@ func TestSyncGeneral(t *testing.T) {
blocks, err = tc.GenerateBlocks(set2) blocks, err = tc.GenerateBlocks(set2)
require.NoError(t, err) require.NoError(t, err)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("14095767774967159269372103336737817266053275274769794195030162905513860477094")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("2095674348545184674850951945506660952512376416769035169971006930847780339914")
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
client.CtlRollback() client.CtlRollback()
} }
@@ -664,7 +632,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// First sync detects the reorg and discards 4 blocks // First sync detects the reorg and discards 4 blocks
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
expetedDiscards := int64(4) expetedDiscards := int64(4)
require.Equal(t, &expetedDiscards, discards) require.Equal(t, &expetedDiscards, discards)
@@ -692,7 +660,7 @@ func TestSyncGeneral(t *testing.T) {
// Sync blocks 2-6 // Sync blocks 2-6
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -731,7 +699,7 @@ func TestSyncGeneral(t *testing.T) {
} }
func TestSyncForgerCommitment(t *testing.T) { func TestSyncForgerCommitment(t *testing.T) {
stateDB, historyDB, l2DB := newTestModules(t) stateDB, historyDB := newTestModules(t)
// Init eth client // Init eth client
var timer timer var timer timer
@@ -744,7 +712,7 @@ func TestSyncForgerCommitment(t *testing.T) {
client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup) client := test.NewClient(true, &timer, &ethCommon.Address{}, clientSetup)
// Create Synchronizer // Create Synchronizer
s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{ s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
}) })
require.NoError(t, err) require.NoError(t, err)
@@ -815,7 +783,7 @@ func TestSyncForgerCommitment(t *testing.T) {
// be in sync // be in sync
for { for {
syncBlock, discards, err := s.Sync(ctx, nil) syncBlock, discards, err := s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -827,14 +795,14 @@ func TestSyncForgerCommitment(t *testing.T) {
// Store ForgerComitmnent observed at every block by the live synchronizer // Store ForgerComitmnent observed at every block by the live synchronizer
syncCommitment := map[int64]bool{} syncCommitment := map[int64]bool{}
// Store ForgerComitmnent observed at every block by a synchronizer that is restarted // Store ForgerComitmnent observed at every block by a syncrhonizer that is restarted
syncRestartedCommitment := map[int64]bool{} syncRestartedCommitment := map[int64]bool{}
for _, block := range blocks { for _, block := range blocks {
// Add block data to the smart contracts // Add block data to the smart contracts
err = client.CtlAddBlocks([]common.BlockData{block}) err = client.CtlAddBlocks([]common.BlockData{block})
require.NoError(t, err) require.NoError(t, err)
syncBlock, discards, err := s.Sync(ctx, nil) syncBlock, discards, err := s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -844,7 +812,7 @@ func TestSyncForgerCommitment(t *testing.T) {
require.True(t, stats.Synced()) require.True(t, stats.Synced())
syncCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment syncCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment
s2, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{ s2, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
}) })
require.NoError(t, err) require.NoError(t, err)

View File

@@ -2,7 +2,6 @@ package debugapi
import ( import (
"context" "context"
"net"
"net/http" "net/http"
"time" "time"
@@ -13,7 +12,6 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
func handleNoRoute(c *gin.Context) { func handleNoRoute(c *gin.Context) {
@@ -101,16 +99,14 @@ func (a *DebugAPI) handleSyncStats(c *gin.Context) {
c.JSON(http.StatusOK, stats) c.JSON(http.StatusOK, stats)
} }
// Run starts the http server of the DebugAPI. To stop it, pass a context // Run starts the http server of the DebugAPI. To stop it, pass a context with
// with cancellation (see `debugapi_test.go` for an example). // cancelation (see `debugapi_test.go` for an example).
func (a *DebugAPI) Run(ctx context.Context) error { func (a *DebugAPI) Run(ctx context.Context) error {
api := gin.Default() api := gin.Default()
api.NoRoute(handleNoRoute) api.NoRoute(handleNoRoute)
api.Use(cors.Default()) api.Use(cors.Default())
debugAPI := api.Group("/debug") debugAPI := api.Group("/debug")
debugAPI.GET("/metrics", gin.WrapH(promhttp.Handler()))
debugAPI.GET("sdb/batchnum", a.handleCurrentBatch) debugAPI.GET("sdb/batchnum", a.handleCurrentBatch)
debugAPI.GET("sdb/mtroot", a.handleMTRoot) debugAPI.GET("sdb/mtroot", a.handleMTRoot)
// Accounts returned by these endpoints will always have BatchNum = 0, // Accounts returned by these endpoints will always have BatchNum = 0,
@@ -122,20 +118,16 @@ func (a *DebugAPI) Run(ctx context.Context) error {
debugAPI.GET("sync/stats", a.handleSyncStats) debugAPI.GET("sync/stats", a.handleSyncStats)
debugAPIServer := &http.Server{ debugAPIServer := &http.Server{
Addr: a.addr,
Handler: api, Handler: api,
// Use some hardcoded numbers that are suitable for testing // Use some hardcoded numberes that are suitable for testing
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", a.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("DebugAPI is ready at %v", a.addr)
go func() { go func() {
if err := debugAPIServer.Serve(listener); err != nil && log.Infof("DebugAPI is ready at %v", a.addr)
tracerr.Unwrap(err) != http.ErrServerClosed { if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()

Some files were not shown because too many files have changed in this diff Show More