Compare commits

..

10 Commits

Author SHA1 Message Date
Eduard S
b2b86d0069 Add logs 2021-02-11 18:27:01 +01:00
Eduard S
60b7bef8ab WIP6 2021-02-11 18:27:01 +01:00
Eduard S
bea042ea52 Verify stateroot at sync 2021-02-11 18:27:01 +01:00
Eduard S
db46f61d64 WIP5 2021-02-11 18:26:59 +01:00
Eduard S
cdf110b8e7 WIP4 2021-02-11 18:22:54 +01:00
Eduard S
10edd5f2c2 WIP3 2021-02-11 18:22:54 +01:00
Eduard S
124d2e84f2 WIP 2021-02-11 18:22:54 +01:00
Eduard S
aa9367f7af Make TxManager more robust 2021-02-11 18:22:52 +01:00
Eduard S
7e6d820ac1 WIP 2021-02-11 18:21:36 +01:00
Eduard S
7982d9292a Update coordinator to work better under real net
- cli / node
    - Update handler of SIGINT so that after 3 SIGINTs, the process terminates
      unconditionally
- coordinator
    - Store stats without pointer
    - In all functions that send a variable via channel, check for context done
      to avoid deadlock (due to no process reading from the channel, which has
      no queue) when the node is stopped.
    - Abstract `canForge` so that it can be used outside of the `Coordinator`
    - In `canForge` check the blockNumber in current and next slot.
    - Update tests due to smart contract changes in slot handling, and minimum
      bid defaults
    - TxManager
        - Add consts, vars and stats to allow evaluating `canForge`
        - Add `canForge` method (not used yet)
        - Store batch and nonces status (last success and last pending)
        - Track nonces internally instead of relying on the ethereum node (this
          is required to work with ganache when there are pending txs)
        - Handle the (common) case of the receipt not being found after the tx
          is sent.
        - Don't start the main loop until we get an initial messae fo the stats
          and vars (so that in the loop the stats and vars are set to
          synchronizer values)
- eth / ethereum client
    - Add necessary methods to create the auth object for transactions manually
      so that we can set the nonce, gas price, gas limit, etc manually
    - Update `RollupForgeBatch` to take an auth object as input (so that the
      coordinator can set parameters manually)
- synchronizer
    - In stats, add `NextSlot`
2021-02-11 18:21:36 +01:00
77 changed files with 2155 additions and 4558 deletions

View File

@@ -4,10 +4,6 @@ Go implementation of the Hermez node.
## Developing ## Developing
### Go version
The `hermez-node` has been tested with go version 1.14
### Unit testing ### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can Running the unit tests requires a connection to a PostgreSQL database. You can
@@ -15,7 +11,7 @@ start PostgreSQL with docker easily this way (where `yourpasswordhere` should
be your password): be your password):
``` ```
POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres POSTGRES_PASS=yourpasswordhere sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
``` ```
Afterwards, run the tests with the password as env var: Afterwards, run the tests with the password as env var:

View File

@@ -4,7 +4,10 @@ import (
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/tracerr"
) )
func (a *API) getAccount(c *gin.Context) { func (a *API) getAccount(c *gin.Context) {
@@ -20,6 +23,16 @@ func (a *API) getAccount(c *gin.Context) {
return return
} }
// Get balance from stateDB
account, err := a.s.LastGetAccount(*idx)
if err != nil {
retSQLErr(err, c)
return
}
apiAccount.Balance = apitypes.NewBigIntStr(account.Balance)
apiAccount.Nonce = account.Nonce
c.JSON(http.StatusOK, apiAccount) c.JSON(http.StatusOK, apiAccount)
} }
@@ -44,6 +57,26 @@ func (a *API) getAccounts(c *gin.Context) {
return return
} }
// Get balances from stateDB
if err := a.s.LastRead(func(sdb *statedb.Last) error {
for x, apiAccount := range apiAccounts {
idx, err := stringToIdx(string(apiAccount.Idx), "Account Idx")
if err != nil {
return tracerr.Wrap(err)
}
account, err := sdb.GetAccount(*idx)
if err != nil {
return tracerr.Wrap(err)
}
apiAccounts[x].Balance = apitypes.NewBigIntStr(account.Balance)
apiAccounts[x].Nonce = account.Nonce
}
return nil
}); err != nil {
retSQLErr(err, c)
return
}
// Build succesfull response // Build succesfull response
type accountResponse struct { type accountResponse struct {
Accounts []historydb.AccountAPI `json:"accounts"` Accounts []historydb.AccountAPI `json:"accounts"`

View File

@@ -2,19 +2,41 @@ package api
import ( import (
"errors" "errors"
"sync"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// TODO: Add correct values to constants
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Status define status of the network
type Status struct {
sync.RWMutex
Network Network `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// API serves HTTP requests to allow external interaction with the Hermez node // API serves HTTP requests to allow external interaction with the Hermez node
type API struct { type API struct {
h *historydb.HistoryDB h *historydb.HistoryDB
cg *configAPI cg *configAPI
s *statedb.StateDB
l2 *l2db.L2DB l2 *l2db.L2DB
status Status
chainID uint16 chainID uint16
hermezAddress ethCommon.Address hermezAddress ethCommon.Address
} }
@@ -24,7 +46,9 @@ func NewAPI(
coordinatorEndpoints, explorerEndpoints bool, coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *Config,
) (*API, error) { ) (*API, error) {
// Check input // Check input
// TODO: is stateDB only needed for explorer endpoints or for both? // TODO: is stateDB only needed for explorer endpoints or for both?
@@ -34,20 +58,19 @@ func NewAPI(
if explorerEndpoints && hdb == nil { if explorerEndpoints && hdb == nil {
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB")) return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
} }
consts, err := hdb.GetConstants()
if err != nil {
return nil, err
}
a := &API{ a := &API{
h: hdb, h: hdb,
cg: &configAPI{ cg: &configAPI{
RollupConstants: *newRollupConstants(consts.Rollup), RollupConstants: *newRollupConstants(config.RollupConstants),
AuctionConstants: consts.Auction, AuctionConstants: config.AuctionConstants,
WDelayerConstants: consts.WDelayer, WDelayerConstants: config.WDelayerConstants,
}, },
s: sdb,
l2: l2db, l2: l2db,
chainID: consts.ChainID, status: Status{},
hermezAddress: consts.HermezAddress, chainID: config.ChainID,
hermezAddress: config.HermezAddress,
} }
// Add coordinator endpoints // Add coordinator endpoints

View File

@@ -8,7 +8,6 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"net"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
@@ -23,6 +22,7 @@ import (
"github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
@@ -39,8 +39,8 @@ type Pendinger interface {
New() Pendinger New() Pendinger
} }
const apiAddr = ":4010" const apiPort = ":4010"
const apiURL = "http://localhost" + apiAddr + "/" const apiURL = "http://localhost" + apiPort + "/"
var SetBlockchain = ` var SetBlockchain = `
Type: Blockchain Type: Blockchain
@@ -180,13 +180,12 @@ type testCommon struct {
auctionVars common.AuctionVariables auctionVars common.AuctionVariables
rollupVars common.RollupVariables rollupVars common.RollupVariables
wdelayerVars common.WDelayerVariables wdelayerVars common.WDelayerVariables
nextForgers []historydb.NextForgerAPI nextForgers []NextForger
} }
var tc testCommon var tc testCommon
var config configAPI var config configAPI
var api *API var api *API
var stateAPIUpdater *StateAPIUpdater
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data, // TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
// emulating the task of the synchronizer in order to have data to be returned // emulating the task of the synchronizer in order to have data to be returned
@@ -203,12 +202,26 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
apiConnCon := db.NewAPICnnectionController(1, time.Second) apiConnCon := db.NewAPICnnectionController(1, time.Second)
hdb := historydb.NewHistoryDB(database, database, apiConnCon) hdb := historydb.NewHistoryDB(database, apiConnCon)
if err != nil {
panic(err)
}
// StateDB
dir, err := ioutil.TempDir("", "tmpdb")
if err != nil {
panic(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
}()
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeTxSelector, NLevels: 0})
if err != nil { if err != nil {
panic(err) panic(err)
} }
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants) // Config (smart contract constants)
chainID := uint16(0) chainID := uint16(0)
@@ -221,53 +234,29 @@ func TestMain(m *testing.M) {
// API // API
apiGin := gin.Default() apiGin := gin.Default()
// Reset DB
test.WipeDB(hdb.DB())
constants := &historydb.Constants{
SCConsts: common.SCConsts{
Rollup: _config.RollupConstants,
Auction: _config.AuctionConstants,
WDelayer: _config.WDelayerConstants,
},
ChainID: chainID,
HermezAddress: _config.HermezAddress,
}
if err := hdb.SetConstants(constants); err != nil {
panic(err)
}
nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10,
MinFeeUSD: 0,
}
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err)
}
api, err = NewAPI( api, err = NewAPI(
true, true,
true, true,
apiGin, apiGin,
hdb, hdb,
sdb,
l2DB, l2DB,
&_config,
) )
if err != nil { if err != nil {
log.Error(err)
panic(err) panic(err)
} }
// Start server // Start server
listener, err := net.Listen("tcp", apiAddr) //nolint:gosec server := &http.Server{Addr: apiPort, Handler: apiGin}
if err != nil {
panic(err)
}
server := &http.Server{Handler: apiGin}
go func() { go func() {
if err := server.Serve(listener); err != nil && if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
tracerr.Unwrap(err) != http.ErrServerClosed {
panic(err) panic(err)
} }
}() }()
// Reset DB
test.WipeDB(api.h.DB())
// Genratre blockchain data with til // Genratre blockchain data with til
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{ tilCfgExtra := til.ConfigExtra{
@@ -361,6 +350,19 @@ func TestMain(m *testing.M) {
} }
} }
// lastBlockNum2 := blocksData[len(blocksData)-1].Block.EthBlockNum
// Add accounts to StateDB
for i := 0; i < len(commonAccounts); i++ {
if _, err := api.s.CreateAccount(commonAccounts[i].Idx, &commonAccounts[i]); err != nil {
panic(err)
}
}
// Make a checkpoint to make the accounts available in Last
if err := api.s.MakeCheckpoint(); err != nil {
panic(err)
}
// Generate Coordinators and add them to HistoryDB // Generate Coordinators and add them to HistoryDB
const nCoords = 10 const nCoords = 10
commonCoords := test.GenCoordinators(nCoords, commonBlocks) commonCoords := test.GenCoordinators(nCoords, commonBlocks)
@@ -468,19 +470,19 @@ func TestMain(m *testing.M) {
if err = api.h.AddBids(bids); err != nil { if err = api.h.AddBids(bids); err != nil {
panic(err) panic(err)
} }
bootForger := historydb.NextForgerAPI{ bootForger := NextForger{
Coordinator: historydb.CoordinatorAPI{ Coordinator: historydb.CoordinatorAPI{
Forger: auctionVars.BootCoordinator, Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL, URL: auctionVars.BootCoordinatorURL,
}, },
} }
// Set next forgers: set all as boot coordinator then replace the non boot coordinators // Set next forgers: set all as boot coordinator then replace the non boot coordinators
nextForgers := []historydb.NextForgerAPI{} nextForgers := []NextForger{}
var initBlock int64 = 140 var initBlock int64 = 140
var deltaBlocks int64 = 40 var deltaBlocks int64 = 40
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ { for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
fromBlock := initBlock + deltaBlocks*int64(i-1) fromBlock := initBlock + deltaBlocks*int64(i-1)
bootForger.Period = historydb.Period{ bootForger.Period = Period{
SlotNum: int64(i), SlotNum: int64(i),
FromBlock: fromBlock, FromBlock: fromBlock,
ToBlock: fromBlock + deltaBlocks - 1, ToBlock: fromBlock + deltaBlocks - 1,
@@ -520,12 +522,6 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000), WithdrawalDelay: uint64(3000),
} }
stateAPIUpdater = NewStateAPIUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
// Generate test data, as expected to be received/sended from/to the API // Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords) testCoords := genTestCoordinators(commonCoords)
testBids := genTestBids(commonBlocks, testCoords, bids) testBids := genTestBids(commonBlocks, testCoords, bids)
@@ -533,41 +529,13 @@ func TestMain(m *testing.M) {
testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks) testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks)
testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs) testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs)
poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts) poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts)
// Add balance and nonce to historyDB
accounts := genTestAccounts(commonAccounts, testTokens)
accUpdates := []common.AccountUpdate{}
for i := 0; i < len(accounts); i++ {
balance := new(big.Int)
balance.SetString(string(*accounts[i].Balance), 10)
idx, err := stringToIdx(string(accounts[i].Idx), "foo")
if err != nil {
panic(err)
}
accUpdates = append(accUpdates, common.AccountUpdate{
EthBlockNum: 0,
BatchNum: 1,
Idx: *idx,
Nonce: 0,
Balance: balance,
})
accUpdates = append(accUpdates, common.AccountUpdate{
EthBlockNum: 0,
BatchNum: 1,
Idx: *idx,
Nonce: accounts[i].Nonce,
Balance: balance,
})
}
if err := api.h.AddAccountUpdates(accUpdates); err != nil {
panic(err)
}
tc = testCommon{ tc = testCommon{
blocks: commonBlocks, blocks: commonBlocks,
tokens: testTokens, tokens: testTokens,
batches: testBatches, batches: testBatches,
fullBatches: testFullBatches, fullBatches: testFullBatches,
coordinators: testCoords, coordinators: testCoords,
accounts: accounts, accounts: genTestAccounts(commonAccounts, testTokens),
txs: testTxs, txs: testTxs,
exits: testExits, exits: testExits,
poolTxsToSend: poolTxsToSend, poolTxsToSend: poolTxsToSend,
@@ -603,18 +571,21 @@ func TestMain(m *testing.M) {
if err := database.Close(); err != nil { if err := database.Close(); err != nil {
panic(err) panic(err)
} }
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
os.Exit(result) os.Exit(result)
} }
func TestTimeout(t *testing.T) { func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez") databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond) apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO) hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
require.NoError(t, err) require.NoError(t, err)
// L2DB // L2DB
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO) l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
// API // API
apiGinTO := gin.Default() apiGinTO := gin.Default()
@@ -629,21 +600,21 @@ func TestTimeout(t *testing.T) {
<-finishWait <-finishWait
}) })
// Start server // Start server
serverTO := &http.Server{Handler: apiGinTO} serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
listener, err := net.Listen("tcp", ":4444") //nolint:gosec
require.NoError(t, err)
go func() { go func() {
if err := serverTO.Serve(listener); err != nil && if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
tracerr.Unwrap(err) != http.ErrServerClosed {
require.NoError(t, err) require.NoError(t, err)
} }
}() }()
_config := getConfigTest(0)
_, err = NewAPI( _, err = NewAPI(
true, true,
true, true,
apiGinTO, apiGinTO,
hdbTO, hdbTO,
nil,
l2DBTO, l2DBTO,
&_config,
) )
require.NoError(t, err) require.NoError(t, err)

View File

@@ -10,7 +10,6 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/russross/meddler"
) )
const ( const (
@@ -47,33 +46,24 @@ var (
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error() errMsg := tracerr.Unwrap(err).Error()
retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
} else {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg,
})
}
}
if errMsg == errCtxTimeout { if errMsg == errCtxTimeout {
c.JSON(http.StatusServiceUnavailable, errorMsg{ c.JSON(http.StatusServiceUnavailable, errorMsg{
Message: errSQLTimeout, Message: errSQLTimeout,
}) })
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok { } else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
retDupKey(sqlErr.Code) // https://www.postgresql.org/docs/current/errcodes-appendix.html
} else if sqlErr, ok := meddler.DriverErr(tracerr.Unwrap(err)); ok { if sqlErr.Code == "23505" {
retDupKey(sqlErr.(*pq.Error).Code) c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
}
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
c.JSON(http.StatusNotFound, errorMsg{ c.JSON(http.StatusNotFound, errorMsg{
Message: errMsg, Message: err.Error(),
}) })
} else { } else {
c.JSON(http.StatusInternalServerError, errorMsg{ c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg, Message: err.Error(),
}) })
} }
} }

View File

@@ -99,9 +99,7 @@ func TestGetSlot(t *testing.T) {
nil, &fetchedSlot, nil, &fetchedSlot,
), ),
) )
// ni, err := api.h.GetNodeInfoAPI() emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars)
// assert.NoError(t, err)
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
assertSlot(t, emptySlot, fetchedSlot) assertSlot(t, emptySlot, fetchedSlot)
// Invalid slotNum // Invalid slotNum
@@ -129,10 +127,8 @@ func TestGetSlots(t *testing.T) {
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter) err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
assert.NoError(t, err) assert.NoError(t, err)
allSlots := tc.slots allSlots := tc.slots
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ { for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars) emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars)
allSlots = append(allSlots, emptySlot) allSlots = append(allSlots, emptySlot)
} }
assertSlots(t, allSlots, fetchedSlots) assertSlots(t, allSlots, fetchedSlots)

View File

@@ -2,160 +2,305 @@ package api
import ( import (
"database/sql" "database/sql"
"fmt"
"math/big"
"net/http" "net/http"
"sync" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// Network define status of the network
type Network struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *historydb.BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
}
// NextForger is a representation of the information of a coordinator and the period will forge
type NextForger struct {
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// Period is a representation of a period
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
func (a *API) getState(c *gin.Context) { func (a *API) getState(c *gin.Context) {
stateAPI, err := a.h.GetStateAPI() // TODO: There are no events for the buckets information, so now this information will be 0
if err != nil { a.status.RLock()
retBadReq(err, c) status := a.status //nolint
return a.status.RUnlock()
} c.JSON(http.StatusOK, status) //nolint
c.JSON(http.StatusOK, stateAPI)
} }
// StateAPIUpdater is an utility object to facilitate updating the StateAPI // SC Vars
type StateAPIUpdater struct {
hdb *historydb.HistoryDB // SetRollupVariables set Status.Rollup variables
state historydb.StateAPI func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
config historydb.NodeConfig a.status.Lock()
vars common.SCVariablesPtr var rollupVAPI historydb.RollupVariablesAPI
consts historydb.Constants rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
rw sync.RWMutex rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
for i, bucket := range rollupVariables.Buckets {
var apiBucket historydb.BucketParamsAPI
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
rollupVAPI.Buckets[i] = apiBucket
} }
// NewStateAPIUpdater creates a new StateAPIUpdater rollupVAPI.SafeMode = rollupVariables.SafeMode
func NewStateAPIUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables, a.status.Rollup = rollupVAPI
consts *historydb.Constants) *StateAPIUpdater { a.status.Unlock()
u := StateAPIUpdater{
hdb: hdb,
config: *config,
consts: *consts,
}
u.SetSCVars(vars.AsPtr())
return &u
} }
// Store the State in the HistoryDB // SetWDelayerVariables set Status.WithdrawalDelayer variables
func (u *StateAPIUpdater) Store() error { func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) {
u.rw.RLock() a.status.Lock()
defer u.rw.RUnlock() a.status.WithdrawalDelayer = wDelayerVariables
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state)) a.status.Unlock()
} }
// SetSCVars sets the smart contract vars (ony updates those that are not nil) // SetAuctionVariables set Status.Auction variables
func (u *StateAPIUpdater) SetSCVars(vars *common.SCVariablesPtr) { func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) {
u.rw.Lock() a.status.Lock()
defer u.rw.Unlock() var auctionAPI historydb.AuctionVariablesAPI
if vars.Rollup != nil {
u.vars.Rollup = vars.Rollup auctionAPI.EthBlockNum = auctionVariables.EthBlockNum
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup) auctionAPI.DonationAddress = auctionVariables.DonationAddress
u.state.Rollup = *rollupVars auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
} auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
if vars.Auction != nil { auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
u.vars.Auction = vars.Auction auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction) auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
u.state.Auction = *auctionVars auctionAPI.Outbidding = auctionVariables.Outbidding
} auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
if vars.WDelayer != nil {
u.vars.WDelayer = vars.WDelayer for i, slot := range auctionVariables.DefaultSlotSetBid {
u.state.WithdrawalDelayer = *u.vars.WDelayer auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
} }
// UpdateRecommendedFee update Status.RecommendedFee information for i, ratio := range auctionVariables.AllocationRatio {
func (u *StateAPIUpdater) UpdateRecommendedFee() error { auctionAPI.AllocationRatio[i] = ratio
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
} }
// UpdateMetrics update Status.Metrics information a.status.Auction = auctionAPI
func (u *StateAPIUpdater) UpdateMetrics() error { a.status.Unlock()
u.rw.RLock()
lastBatch := u.state.Network.LastBatch
u.rw.RUnlock()
if lastBatch == nil {
return nil
}
lastBatchNum := lastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.Metrics = *metrics
u.rw.Unlock()
return nil
} }
// Network
// UpdateNetworkInfoBlock update Status.Network block related information // UpdateNetworkInfoBlock update Status.Network block related information
func (u *StateAPIUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) { func (a *API) UpdateNetworkInfoBlock(
u.rw.Lock() lastEthBlock, lastSyncBlock common.Block,
u.state.Network.LastSyncBlock = lastSyncBlock.Num ) {
u.state.Network.LastEthBlock = lastEthBlock.Num a.status.Network.LastSyncBlock = lastSyncBlock.Num
u.rw.Unlock() a.status.Network.LastEthBlock = lastEthBlock.Num
} }
// UpdateNetworkInfo update Status.Network information // UpdateNetworkInfo update Status.Network information
func (u *StateAPIUpdater) UpdateNetworkInfo( func (a *API) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block, lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64, lastBatchNum common.BatchNum, currentSlot int64,
) error { ) error {
// Get last batch in API format lastBatch, err := a.h.GetBatchAPI(lastBatchNum)
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil lastBatch = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
u.rw.RLock() lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots)
auctionVars := u.vars.Auction nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot)
u.rw.RUnlock()
// Get next forgers
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil nextForgers = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
a.status.Lock()
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastBatch = lastBatch
a.status.Network.CurrentSlot = currentSlot
a.status.Network.NextForgers = nextForgers
bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI() // Update buckets withdrawals
if err == sql.ErrNoRows { bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
bucketUpdates = nil if tracerr.Unwrap(err) == sql.ErrNoRows {
bucketsUpdate = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
u.rw.Lock() for i, bucketParams := range a.status.Rollup.Buckets {
// Update NodeInfo struct for _, bucketUpdate := range bucketsUpdate {
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i { if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals bucketParams.Withdrawals = bucketUpdate.Withdrawals
u.state.Rollup.Buckets[i] = bucketParams a.status.Rollup.Buckets[i] = bucketParams
break break
} }
} }
} }
u.state.Network.LastSyncBlock = lastSyncBlock.Num a.status.Unlock()
u.state.Network.LastEthBlock = lastEthBlock.Num return nil
u.state.Network.LastBatch = lastBatch }
u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers // apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
u.rw.Unlock() func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
var slots [6]*big.Int
for i, slot := range defaultSlotSetBid {
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
if !ok {
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
}
slots[i] = bigInt
}
return slots, nil
}
// getNextForgers returns next forgers
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := a.h.GetBestBidsAPI(&currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForger{}
// Get min bid info
var minBidInfo []historydb.MinBidInfo
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
if err != nil {
return nil, tracerr.Wrap(err)
}
minBidInfo = []historydb.MinBidInfo{{
DefaultSlotSetBid: bigIntSlots,
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
nextForger := NextForger{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = historydb.CoordinatorAPI{
Forger: a.status.Auction.BootCoordinator,
URL: a.status.Auction.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// Metrics
// UpdateMetrics update Status.Metrics information
func (a *API) UpdateMetrics() error {
a.status.RLock()
if a.status.Network.LastBatch == nil {
a.status.RUnlock()
return nil
}
batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock()
metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Metrics = *metrics
a.status.Unlock()
return nil
}
// Recommended fee
// UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.RecommendedFee.ExistingAccount = feeExistingAccount
a.status.RecommendedFee.CreatesAccount = createAccountExtraFeePercentage * feeExistingAccount
a.status.RecommendedFee.CreatesAccountAndRegister = createAccountInternalExtraFeePercentage * feeExistingAccount
a.status.Unlock()
return nil return nil
} }

View File

@@ -13,7 +13,7 @@ import (
type testStatus struct { type testStatus struct {
Network testNetwork `json:"network"` Network testNetwork `json:"network"`
Metrics historydb.MetricsAPI `json:"metrics"` Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"` Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"` Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"` WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@@ -25,15 +25,14 @@ type testNetwork struct {
LastSyncBlock int64 `json:"lastSynchedBlock"` LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"` LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"` CurrentSlot int64 `json:"currentSlot"`
NextForgers []historydb.NextForgerAPI `json:"nextForgers"` NextForgers []NextForger `json:"nextForgers"`
} }
func TestSetRollupVariables(t *testing.T) { func TestSetRollupVariables(t *testing.T) {
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars}) rollupVars := &common.RollupVariables{}
require.NoError(t, stateAPIUpdater.Store()) assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true)
ni, err := api.h.GetNodeInfoAPI() api.SetRollupVariables(tc.rollupVars)
require.NoError(t, err) assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true)
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
} }
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) { func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
@@ -52,19 +51,17 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
} }
func TestSetWDelayerVariables(t *testing.T) { func TestSetWDelayerVariables(t *testing.T) {
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars}) wdelayerVars := &common.WDelayerVariables{}
require.NoError(t, stateAPIUpdater.Store()) assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer)
ni, err := api.h.GetNodeInfoAPI() api.SetWDelayerVariables(tc.wdelayerVars)
require.NoError(t, err) assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer)
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
} }
func TestSetAuctionVariables(t *testing.T) { func TestSetAuctionVariables(t *testing.T) {
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars}) auctionVars := &common.AuctionVariables{}
require.NoError(t, stateAPIUpdater.Store()) assertEqualAuctionVariables(t, *auctionVars, api.status.Auction)
ni, err := api.h.GetNodeInfoAPI() api.SetAuctionVariables(tc.auctionVars)
require.NoError(t, err) assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction)
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
} }
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) { func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
@@ -88,6 +85,11 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
} }
func TestUpdateNetworkInfo(t *testing.T) { func TestUpdateNetworkInfo(t *testing.T) {
status := &Network{}
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
@@ -116,79 +118,62 @@ func TestUpdateNetworkInfo(t *testing.T) {
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates) err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
require.NoError(t, err) require.NoError(t, err)
// stateAPIUpdater := NewStateAPIUpdater(hdb) err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) assert.NoError(t, err)
require.NoError(t, err) assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock)
require.NoError(t, stateAPIUpdater.Store()) assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum)
ni, err := api.h.GetNodeInfoAPI() assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot)
require.NoError(t, err) assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers))
assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock) assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum) assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
} }
func TestUpdateMetrics(t *testing.T) { func TestUpdateMetrics(t *testing.T) {
// Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated // Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
require.NoError(t, err) assert.NoError(t, err)
err = stateAPIUpdater.UpdateMetrics() err = api.UpdateMetrics()
require.NoError(t, err) assert.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store()) assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0))
ni, err := api.h.GetNodeInfoAPI() assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0))
require.NoError(t, err) assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0)) assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0)) assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0)) assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
} }
func TestUpdateRecommendedFee(t *testing.T) { func TestUpdateRecommendedFee(t *testing.T) {
err := stateAPIUpdater.UpdateRecommendedFee() err := api.UpdateRecommendedFee()
require.NoError(t, err) assert.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store()) assert.Greater(t, api.status.RecommendedFee.ExistingAccount, float64(0))
var minFeeUSD float64 assert.Equal(t, api.status.RecommendedFee.CreatesAccount,
if api.l2 != nil { api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
minFeeUSD = api.l2.MinFeeUSD() assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister,
} api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountAndRegister,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
} }
func TestGetState(t *testing.T) { func TestGetState(t *testing.T) {
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(12) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{ api.SetRollupVariables(tc.rollupVars)
Rollup: &tc.rollupVars, api.SetWDelayerVariables(tc.wdelayerVars)
Auction: &tc.auctionVars, api.SetAuctionVariables(tc.auctionVars)
WDelayer: &tc.wdelayerVars, err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
}) assert.NoError(t, err)
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) err = api.UpdateMetrics()
require.NoError(t, err) assert.NoError(t, err)
err = stateAPIUpdater.UpdateMetrics() err = api.UpdateRecommendedFee()
require.NoError(t, err) assert.NoError(t, err)
err = stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
endpoint := apiURL + "state" endpoint := apiURL + "state"
var status testStatus var status testStatus
require.NoError(t, doGoodReq("GET", endpoint, nil, &status)) assert.NoError(t, doGoodReq("GET", endpoint, nil, &status))
// SC vars // SC vars
// UpdateNetworkInfo will overwrite buckets withdrawal values // UpdateNetworkInfo will overwrite buckets withdrawal values
@@ -215,13 +200,13 @@ func TestGetState(t *testing.T) {
// Recommended fee // Recommended fee
// TODO: perform real asserts (not just greater than 0) // TODO: perform real asserts (not just greater than 0)
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0)) assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
// assert.Equal(t, status.RecommendedFee.CreatesAccount, assert.Equal(t, status.RecommendedFee.CreatesAccount,
// status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage) status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
// assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister, assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
// status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage) status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
} }
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) { func assertNextForgers(t *testing.T, expected, actual []NextForger) {
assert.Equal(t, len(expected), len(actual)) assert.Equal(t, len(expected), len(actual))
for i := range expected { for i := range expected {
// ignore timestamps and other metadata // ignore timestamps and other metadata

View File

@@ -1329,6 +1329,13 @@ components:
type: string type: string
description: Moment in which the transaction was added to the pool. description: Moment in which the transaction was added to the pool.
format: date-time format: date-time
batchNum:
type: integer
description: Identifier of a batch. Every new forged batch increases by one the batchNum, starting at 0.
minimum: 0
maximum: 4294967295
nullable: true
example: null
requestFromAccountIndex: requestFromAccountIndex:
type: string type: string
description: >- description: >-
@@ -1383,6 +1390,7 @@ components:
$ref: '#/components/schemas/Token' $ref: '#/components/schemas/Token'
example: example:
amount: '100000000000000' amount: '100000000000000'
batchNum:
fee: 0 fee: 0
fromAccountIndex: hez:SCC:256 fromAccountIndex: hez:SCC:256
fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn
@@ -1430,6 +1438,7 @@ components:
- info - info
- signature - signature
- timestamp - timestamp
- batchNum
- requestFromAccountIndex - requestFromAccountIndex
- requestToAccountIndex - requestToAccountIndex
- requestToHezEthereumAddress - requestToHezEthereumAddress
@@ -2569,21 +2578,6 @@ components:
description: List of next coordinators to forge. description: List of next coordinators to forge.
items: items:
$ref: '#/components/schemas/NextForger' $ref: '#/components/schemas/NextForger'
NodeConfig:
type: object
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
properties:
forgeDelay:
type: number
description: |
Delay in seconds after which a batch is forged if the slot is
already committed. If set to 0s, the coordinator will continuously
forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value.
example: 193.4
additionalProperties: false
required:
- forgeDelay
State: State:
type: object type: object
description: Gobal variables of the network description: Gobal variables of the network
@@ -2600,8 +2594,6 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer' $ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee: recommendedFee:
$ref: '#/components/schemas/RecommendedFee' $ref: '#/components/schemas/RecommendedFee'
nodeConfig:
$ref: '#/components/schemas/NodeConfig'
additionalProperties: false additionalProperties: false
required: required:
- network - network
@@ -2610,7 +2602,6 @@ components:
- auction - auction
- withdrawalDelayer - withdrawalDelayer
- recommendedFee - recommendedFee
- nodeConfig
StateNetwork: StateNetwork:
type: object type: object
description: Gobal statistics of the network description: Gobal statistics of the network
@@ -2821,10 +2812,6 @@ components:
type: number type: number
description: Average fee percentage paid for L2 transactions in the last 24 hours. description: Average fee percentage paid for L2 transactions in the last 24 hours.
example: 1.54 example: 1.54
estimatedTimeToForgeL1:
type: number
description: Estimated time needed to forge a L1 transaction, from the time it's added on the smart contract, until it's actualy forged. In seconds.
example: 193.4
additionalProperties: false additionalProperties: false
required: required:
- transactionsPerBatch - transactionsPerBatch
@@ -2833,7 +2820,6 @@ components:
- totalAccounts - totalAccounts
- totalBJJs - totalBJJs
- avgTransactionFee - avgTransactionFee
- estimatedTimeToForgeL1
PendingItems: PendingItems:
type: integer type: integer
description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent. description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent.
@@ -2930,7 +2916,7 @@ components:
example: 101 example: 101
l1UserTotalBytes: l1UserTotalBytes:
type: integer type: integer
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx). description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
example: 72 example: 72
maxL1UserTx: maxL1UserTx:
type: integer type: integer

View File

@@ -2,7 +2,6 @@ package api
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"net/http" "net/http"
@@ -28,7 +27,6 @@ func (a *API) postPoolTx(c *gin.Context) {
retBadReq(err, c) retBadReq(err, c)
return return
} }
writeTx.ClientIP = c.ClientIP()
// Insert to DB // Insert to DB
if err := a.l2.AddTxAPI(writeTx); err != nil { if err := a.l2.AddTxAPI(writeTx); err != nil {
retSQLErr(err, c) retSQLErr(err, c)
@@ -171,21 +169,16 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Get public key
account, err := a.s.LastGetAccount(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(err)
}
// Validate feeAmount // Validate feeAmount
_, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee) _, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Get public key
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(err)
}
// Validate TokenID
if poolTx.TokenID != account.TokenID {
return tracerr.Wrap(fmt.Errorf("tx.TokenID (%v) != account.TokenID (%v)",
poolTx.TokenID, account.TokenID))
}
// Check signature // Check signature
if !poolTx.VerifySignature(a.chainID, account.BJJ) { if !poolTx.VerifySignature(a.chainID, account.BJJ) {
return tracerr.Wrap(errors.New("wrong signature")) return tracerr.Wrap(errors.New("wrong signature"))

View File

@@ -10,7 +10,6 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// testPoolTxReceive is a struct to be used to assert the response // testPoolTxReceive is a struct to be used to assert the response
@@ -171,9 +170,9 @@ func TestPoolTxs(t *testing.T) {
fetchedTxID := common.TxID{} fetchedTxID := common.TxID{}
for _, tx := range tc.poolTxsToSend { for _, tx := range tc.poolTxsToSend {
jsonTxBytes, err := json.Marshal(tx) jsonTxBytes, err := json.Marshal(tx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
require.NoError( assert.NoError(
t, doGoodReq( t, doGoodReq(
"POST", "POST",
endpoint, endpoint,
@@ -188,42 +187,42 @@ func TestPoolTxs(t *testing.T) {
badTx.Amount = "99950000000000000" badTx.Amount = "99950000000000000"
badTx.Fee = 255 badTx.Fee = 255
jsonTxBytes, err := json.Marshal(badTx) jsonTxBytes, err := json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong signature // Wrong signature
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
badTx.FromIdx = "hez:foo:1000" badTx.FromIdx = "hez:foo:1000"
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong to // Wrong to
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
badTx.ToEthAddr = &ethAddr badTx.ToEthAddr = &ethAddr
badTx.ToIdx = nil badTx.ToIdx = nil
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong rq // Wrong rq
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
rqFromIdx := "hez:foo:30" rqFromIdx := "hez:foo:30"
badTx.RqFromIdx = &rqFromIdx badTx.RqFromIdx = &rqFromIdx
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// GET // GET
endpoint += "/" endpoint += "/"
for _, tx := range tc.poolTxsToReceive { for _, tx := range tc.poolTxsToReceive {
fetchedTx := testPoolTxReceive{} fetchedTx := testPoolTxReceive{}
require.NoError( assert.NoError(
t, doGoodReq( t, doGoodReq(
"GET", "GET",
endpoint+tx.TxID.String(), endpoint+tx.TxID.String(),
@@ -234,10 +233,10 @@ func TestPoolTxs(t *testing.T) {
} }
// 400, due invalid TxID // 400, due invalid TxID
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400) err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
require.NoError(t, err) assert.NoError(t, err)
// 404, due inexistent TxID in DB // 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404) err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
require.NoError(t, err) assert.NoError(t, err)
} }
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) { func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {

View File

@@ -64,10 +64,7 @@ func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBa
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig) tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs) ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
if err != nil { return ptOut.ZKInputs, tracerr.Wrap(err)
return nil, tracerr.Wrap(err)
}
return ptOut.ZKInputs, nil
} }
// LocalStateDB returns the underlying LocalStateDB // LocalStateDB returns the underlying LocalStateDB

1
cli/node/.gitignore vendored
View File

@@ -1,3 +1,2 @@
cfg.example.secret.toml cfg.example.secret.toml
cfg.toml cfg.toml
node

View File

@@ -2,10 +2,6 @@
This is the main cli for the node This is the main cli for the node
## Go version
The `hermez-node` has been tested with go version 1.14
## Usage ## Usage
``` ```
@@ -69,64 +65,29 @@ when running the coordinator in sync mode
- The node requires a PostgreSQL database. The parameters of the server and - The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section. database must be set in the `PostgreSQL` section.
## Building
*All commands assume you are at the `cli/node` directory.*
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
```
The executable is `node`.
## Usage Examples ## Usage Examples
The following commands assume you have built the node previously. You can also
run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer: Run the node in mode synchronizer:
``` ```
./node --mode sync --cfg cfg.buidler.toml run go run . --mode sync --cfg cfg.buidler.toml run
``` ```
Run the node in mode coordinator: Run the node in mode coordinator:
``` ```
./node --mode coord --cfg cfg.buidler.toml run go run . --mode coord --cfg cfg.buidler.toml run
``` ```
Import an ethereum private key into the keystore: Import an ethereum private key into the keystore:
``` ```
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
``` ```
Generate a new BabyJubJub key pair: Generate a new BabyJubJub key pair:
``` ```
./node --mode coord --cfg cfg.buidler.toml genbjj go run . --mode coord --cfg cfg.buidler.toml genbjj
``` ```
Wipe the entier SQL database (this will destroy all synchronized and pool Wipe the entier SQL database (this will destroy all synchronized and pool data):
data):
``` ```
./node --mode coord --cfg cfg.buidler.toml wipesql go run . --mode coord --cfg cfg.buidler.toml wipesql
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
``` ```

View File

@@ -14,23 +14,17 @@ Type = "bitfinexV2"
[Debug] [Debug]
APIAddress = "localhost:12345" APIAddress = "localhost:12345"
MeddlerLogs = true MeddlerLogs = true
GinDebugMode = true
[StateDB] [StateDB]
Path = "/tmp/iden3-test/hermez/statedb" Path = "/tmp/iden3-test/hermez/statedb"
Keep = 256 Keep = 256
[PostgreSQL] [PostgreSQL]
PortWrite = 5432 Port = 5432
HostWrite = "localhost" Host = "localhost"
UserWrite = "hermez" User = "hermez"
PasswordWrite = "yourpasswordhere" Password = "yourpasswordhere"
NameWrite = "hermez" Name = "hermez"
# PortRead = 5432
# HostRead = "localhost"
# UserRead = "hermez"
# PasswordRead = "yourpasswordhere"
# NameRead = "hermez"
[Web3] [Web3]
URL = "http://localhost:8545" URL = "http://localhost:8545"
@@ -51,7 +45,6 @@ ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordina
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator # ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
MinimumForgeAddressBalance = 0
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6 L1BatchTimeoutPerc = 0.6
StartSlotBlocksDelay = 2 StartSlotBlocksDelay = 2
@@ -60,9 +53,6 @@ SendBatchBlocksMarginCheck = 1
ProofServerPollInterval = "1s" ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms" ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s" SyncRetryInterval = "1s"
ForgeDelay = "10s"
ForgeNoTxsDelay = "0s"
PurgeByExtDelInterval = "1m"
[Coordinator.FeeAccount] [Coordinator.FeeAccount]
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E" Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
@@ -73,7 +63,6 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
[Coordinator.L2DB] [Coordinator.L2DB]
SafetyPeriod = 10 SafetyPeriod = 10
MaxTxs = 512 MaxTxs = 512
MinFeeUSD = 0.0
TTL = "24h" TTL = "24h"
PurgeBatchDelay = 10 PurgeBatchDelay = 10
InvalidateBatchDelay = 20 InvalidateBatchDelay = 20
@@ -94,24 +83,21 @@ MaxTx = 512
NLevels = 32 NLevels = 32
[Coordinator.EthClient] [Coordinator.EthClient]
ReceiptTimeout = "60s"
ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms" CheckLoopInterval = "500ms"
Attempts = 4 Attempts = 4
AttemptsDelay = "500ms" AttemptsDelay = "500ms"
TxResendTimeout = "2m" TxResendTimeout = "2m"
NoReuseNonce = false NoReuseNonce = false
CallGasLimit = 300000
GasPriceDiv = 100
MaxGasPrice = "5000000000" MaxGasPrice = "5000000000"
GasPriceIncPerc = 10
[Coordinator.EthClient.Keystore] [Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"
Password = "yourpasswordhere" Password = "yourpasswordhere"
[Coordinator.EthClient.ForgeBatchGasCost]
Fixed = 500000
L1UserTx = 8000
L1CoordTx = 9000
L2Tx = 1
[Coordinator.API] [Coordinator.API]
Coordinator = true Coordinator = true

View File

@@ -11,13 +11,10 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/config" "github.com/hermeznetwork/hermez-node/config"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node" "github.com/hermeznetwork/hermez-node/node"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@@ -26,7 +23,6 @@ const (
flagMode = "mode" flagMode = "mode"
flagSK = "privatekey" flagSK = "privatekey"
flagYes = "yes" flagYes = "yes"
flagBlock = "block"
modeSync = "sync" modeSync = "sync"
modeCoord = "coord" modeCoord = "coord"
) )
@@ -91,11 +87,11 @@ func cmdWipeSQL(c *cli.Context) error {
} }
} }
db, err := dbUtils.ConnectSQLDB( db, err := dbUtils.ConnectSQLDB(
cfg.PostgreSQL.PortWrite, cfg.PostgreSQL.Port,
cfg.PostgreSQL.HostWrite, cfg.PostgreSQL.Host,
cfg.PostgreSQL.UserWrite, cfg.PostgreSQL.User,
cfg.PostgreSQL.PasswordWrite, cfg.PostgreSQL.Password,
cfg.PostgreSQL.NameWrite, cfg.PostgreSQL.Name,
) )
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -107,7 +103,17 @@ func cmdWipeSQL(c *cli.Context) error {
return nil return nil
} }
func waitSigInt() { func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
stopCh := make(chan interface{}) stopCh := make(chan interface{})
// catch ^C to send the stop signal // catch ^C to send the stop signal
@@ -128,101 +134,11 @@ func waitSigInt() {
} }
}() }()
<-stopCh <-stopCh
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
waitSigInt()
node.Stop() node.Stop()
return nil return nil
} }
func cmdServeAPI(c *cli.Context) error {
cfg, err := parseCliAPIServer(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
}
srv.Start()
waitSigInt()
srv.Stop()
return nil
}
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
cfg := _cfg.node
blockNum := c.Int64(flagBlock)
log.Infof("Discarding all blocks up to block %v...", blockNum)
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, nil)
if err := historyDB.Reorg(blockNum); err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
}
batchNum, err := historyDB.GetLastBatchNum()
if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
l2DB := l2db.NewL2DB(
dbRead, dbWrite,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)
if err := l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
}
return nil
}
// Config is the configuration of the hermez node execution // Config is the configuration of the hermez node execution
type Config struct { type Config struct {
mode node.Mode mode node.Mode
@@ -244,59 +160,20 @@ func getConfig(c *cli.Context) (*Config, error) {
var cfg Config var cfg Config
mode := c.String(flagMode) mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg) nodeCfgPath := c.String(flagCfg)
if nodeCfgPath == "" {
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
}
var err error var err error
switch mode { switch mode {
case modeSync: case modeSync:
cfg.mode = node.ModeSynchronizer cfg.mode = node.ModeSynchronizer
cfg.node, err = config.LoadNode(nodeCfgPath, false) cfg.node, err = config.LoadNode(nodeCfgPath)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
case modeCoord: case modeCoord:
cfg.mode = node.ModeCoordinator cfg.mode = node.ModeCoordinator
cfg.node, err = config.LoadNode(nodeCfgPath, true) cfg.node, err = config.LoadCoordinator(nodeCfgPath)
if err != nil {
return nil, tracerr.Wrap(err)
}
default:
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
}
return &cfg, nil
}
// ConfigAPIServer is the configuration of the api server execution
type ConfigAPIServer struct {
mode node.Mode
server *config.APIServer
}
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
cfg, err := getConfigAPIServer(c)
if err != nil {
if err := cli.ShowAppHelp(c); err != nil {
panic(err)
}
return nil, tracerr.Wrap(err)
}
return cfg, nil
}
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
var cfg ConfigAPIServer
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -362,24 +239,6 @@ func main() {
Usage: "Run the hermez-node in the indicated mode", Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun, Action: cmdRun,
}, },
{
Name: "serveapi",
Aliases: []string{},
Usage: "Serve the API only",
Action: cmdServeAPI,
},
{
Name: "discard",
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: []cli.Flag{
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}},
},
} }
err := app.Run(os.Args) err := app.Run(os.Args)

View File

@@ -263,13 +263,3 @@ type IdxNonce struct {
Idx Idx `db:"idx"` Idx Idx `db:"idx"`
Nonce Nonce `db:"nonce"` Nonce Nonce `db:"nonce"`
} }
// AccountUpdate represents an account balance and/or nonce update after a
// processed batch
type AccountUpdate struct {
EthBlockNum int64 `meddler:"eth_block_num"`
BatchNum BatchNum `meddler:"batch_num"`
Idx Idx `meddler:"idx"`
Nonce Nonce `meddler:"nonce"`
Balance *big.Int `meddler:"balance,bigint"`
}

View File

@@ -1,30 +1,21 @@
package common package common
import ( import (
"encoding/binary"
"strconv"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
ethMath "github.com/ethereum/go-ethereum/common/math"
ethCrypto "github.com/ethereum/go-ethereum/crypto" ethCrypto "github.com/ethereum/go-ethereum/crypto"
ethSigner "github.com/ethereum/go-ethereum/signer/core"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez // AccountCreationAuthMsg is the message that is signed to authorize a Hermez
// account creation // account creation
const AccountCreationAuthMsg = "Account creation" const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollup account creation"
// EIP712Version is the used version of the EIP-712 // EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
const EIP712Version = "1" const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
// EIP712Provider defines the Provider for the EIP-712
const EIP712Provider = "Hermez Network"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for // AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary // account creations when necessary
@@ -35,64 +26,27 @@ type AccountCreationAuth struct {
Timestamp time.Time `meddler:"timestamp,utctime"` Timestamp time.Time `meddler:"timestamp,utctime"`
} }
// toHash returns a byte array to be hashed from the AccountCreationAuth, which
// follows the EIP-712 encoding
func (a *AccountCreationAuth) toHash(chainID uint16, func (a *AccountCreationAuth) toHash(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) { hermezContractAddr ethCommon.Address) []byte {
chainIDFormatted := ethMath.NewHexOrDecimal256(int64(chainID)) var chainIDBytes [2]byte
binary.BigEndian.PutUint16(chainIDBytes[:], chainID)
// [EthPrefix | AccountCreationAuthMsg | compressedBJJ | chainID | hermezContractAddr]
var b []byte
b = append(b, []byte(AccountCreationAuthMsg)...)
b = append(b, SwapEndianness(a.BJJ[:])...) // for js implementation compatibility
b = append(b, chainIDBytes[:]...)
b = append(b, hermezContractAddr[:]...)
signerData := ethSigner.TypedData{ ethPrefix := EthMsgPrefix + strconv.Itoa(len(b))
Types: ethSigner.Types{ return append([]byte(ethPrefix), b...)
"EIP712Domain": []ethSigner.Type{
{Name: "name", Type: "string"},
{Name: "version", Type: "string"},
{Name: "chainId", Type: "uint256"},
{Name: "verifyingContract", Type: "address"},
},
"Authorise": []ethSigner.Type{
{Name: "Provider", Type: "string"},
{Name: "Authorisation", Type: "string"},
{Name: "BJJKey", Type: "bytes32"},
},
},
PrimaryType: "Authorise",
Domain: ethSigner.TypedDataDomain{
Name: EIP712Provider,
Version: EIP712Version,
ChainId: chainIDFormatted,
VerifyingContract: hermezContractAddr.Hex(),
},
Message: ethSigner.TypedDataMessage{
"Provider": EIP712Provider,
"Authorisation": AccountCreationAuthMsg,
"BJJKey": SwapEndianness(a.BJJ[:]),
},
}
domainSeparator, err := signerData.HashStruct("EIP712Domain", signerData.Domain.Map())
if err != nil {
return nil, tracerr.Wrap(err)
}
typedDataHash, err := signerData.HashStruct(signerData.PrimaryType, signerData.Message)
if err != nil {
return nil, tracerr.Wrap(err)
}
rawData := []byte{0x19, 0x01} // "\x19\x01"
rawData = append(rawData, domainSeparator...)
rawData = append(rawData, typedDataHash...)
return rawData, nil
} }
// HashToSign returns the hash to be signed by the Etherum address to authorize // HashToSign returns the hash to be signed by the Etherum address to authorize
// the account creation, which follows the EIP-712 encoding // the account creation
func (a *AccountCreationAuth) HashToSign(chainID uint16, func (a *AccountCreationAuth) HashToSign(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) { hermezContractAddr ethCommon.Address) ([]byte, error) {
b, err := a.toHash(chainID, hermezContractAddr) b := a.toHash(chainID, hermezContractAddr)
if err != nil { return ethCrypto.Keccak256Hash(b).Bytes(), nil
return nil, tracerr.Wrap(err)
}
return ethCrypto.Keccak256(b), nil
} }
// Sign signs the account creation authorization message using the provided // Sign signs the account creation authorization message using the provided
@@ -100,17 +54,16 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
// should do an ethereum signature using the account corresponding to // should do an ethereum signature using the account corresponding to
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in // `a.EthAddr`. The `signHash` function is used to make signig flexible: in
// tests we sign directly using the private key, outside tests we sign using // tests we sign directly using the private key, outside tests we sign using
// the keystore (which never exposes the private key). Sign follows the EIP-712 // the keystore (which never exposes the private key).
// encoding.
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error), func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
chainID uint16, hermezContractAddr ethCommon.Address) error { chainID uint16, hermezContractAddr ethCommon.Address) error {
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
if err != nil { if err != nil {
return tracerr.Wrap(err) return err
} }
sig, err := signHash(hash) sig, err := signHash(hash)
if err != nil { if err != nil {
return tracerr.Wrap(err) return err
} }
sig[64] += 27 sig[64] += 27
a.Signature = sig a.Signature = sig
@@ -119,8 +72,7 @@ func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
} }
// VerifySignature ensures that the Signature is done with the EthAddr, for the // VerifySignature ensures that the Signature is done with the EthAddr, for the
// chainID and hermezContractAddress passed by parameter. VerifySignature // chainID and hermezContractAddress passed by parameter
// follows the EIP-712 encoding.
func (a *AccountCreationAuth) VerifySignature(chainID uint16, func (a *AccountCreationAuth) VerifySignature(chainID uint16,
hermezContractAddr ethCommon.Address) bool { hermezContractAddr ethCommon.Address) bool {
// Calculate hash to be signed // Calculate hash to be signed

View File

@@ -39,7 +39,7 @@ func TestAccountCreationAuthSignVerify(t *testing.T) {
// Hash and sign manually and compare the generated signature // Hash and sign manually and compare the generated signature
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "9414667457e658dd31949b82996b75c65a055512244c3bbfd22ff56add02ba65", assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e",
hex.EncodeToString(hash)) hex.EncodeToString(hash))
sig, err := ethCrypto.Sign(hash, ethSk) sig, err := ethCrypto.Sign(hash, ethSk)
require.NoError(t, err) require.NoError(t, err)
@@ -75,9 +75,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7",
chainID: uint16(4), chainID: uint16(4),
hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf", hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
toHashExpected: "190189658bba487e11c7da602676ee32bc90b77d3f32a305b147e4f3c3b35f19672e5d84ccc38d0ab245c469b719549d837113465c2abf9972c49403ca6fd10ed3dc", toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700047e5f4552091a69125d5dfcb7b8c2659029395bdf",
hashExpected: "c56eba41e511df100c804c5c09288f35887efea4f033be956481af335df3bea2", hashExpected: "39afea52d843a4de905b6b5ebb0ee8c678141f711d96d9b429c4aec10ef9911f",
sigExpected: "dbedcc5ce02db8f48afbdb2feba9a3a31848eaa8fca5f312ce37b01db45d2199208335330d4445bd2f51d1db68dbc0d0bf3585c4a07504b4efbe46a69eaae5a21b", sigExpected: "73d10d6ecf06ee8a5f60ac90f06b78bef9c650f414ba3ac73e176dc32e896159147457e9c86f0b4bd60fdaf2c0b2aec890a7df993d69a4805e242a6b845ebf231c",
} }
tv1 := testVector{ tv1 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000002", ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
@@ -85,9 +85,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d", pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d",
chainID: uint16(0), chainID: uint16(0),
hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf", hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
toHashExpected: "1901dafbc253dedf90d6421dc6e25d5d9efc6985133cb2a8d363d0a081a0e3eddddc65f603a88de36aaeabd3b4cf586538c7f3fd50c94780530a3707c8c14ad9fd11", toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00002b5ad5c4795c026514f8317c7a215e218dccd6cf",
hashExpected: "deb9afa479282cf27b442ce8ba86b19448aa87eacef691521a33db5d0feb9959", hashExpected: "89a3895993a4736232212e59566294feb3da227af44375daf3307dcad5451d5d",
sigExpected: "6a0da90ba2d2b1be679a28ebe54ee03082d44b836087391cd7d2607c1e4dafe04476e6e88dccb8707c68312512f16c947524b35c80f26c642d23953e9bb84c701c", sigExpected: "bb4156156c705494ad5f99030342c64657e51e2994750f92125717c40bf56ad632044aa6bd00979feea92c417b552401e65fe5f531f15010d9d1c278da8be1df1b",
} }
tv2 := testVector{ tv2 := testVector{
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122", ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
@@ -95,9 +95,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52", pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52",
chainID: uint16(31337), // =0x7a69 chainID: uint16(31337), // =0x7a69
hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c", hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c",
toHashExpected: "190167617949b934d7e01add4009cd3d47415a26727b7d6288e5dce33fb3721d5a1a9ce511b19b694c9aaf8183f4987ed752f24884c54c003d11daa2e98c7547a79e", toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b527a69f4e77e5da47ac3125140c470c71cbca77b5c638c",
hashExpected: "157b570c597e615b8356ce008ac39f43bc9b6d50080bc07d968031b9378acbbb", hashExpected: "4f6ead01278ba4597d4720e37482f585a713497cea994a95209f4c57a963b4a7",
sigExpected: "a0766181102428b5672e523dc4b905c10ddf025c10dbd0b3534ef864632a14652737610041c670b302fc7dca28edd5d6eac42b72d69ce58da8ce21287b244e381b", sigExpected: "43b5818802a137a72a190c1d8d767ca507f7a4804b1b69b5e055abf31f4f2b476c80bb1ba63260d95610f6f831420d32130e7f22fec5d76e16644ddfcedd0d441c",
} }
tvs = append(tvs, tv0) tvs = append(tvs, tv0)
tvs = append(tvs, tv1) tvs = append(tvs, tv1)
@@ -122,10 +122,10 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
BJJ: pkComp, BJJ: pkComp,
} }
toHash, err := a.toHash(chainID, hermezContractAddr) toHash := a.toHash(chainID, hermezContractAddr)
require.NoError(t, err)
assert.Equal(t, tv.toHashExpected, assert.Equal(t, tv.toHashExpected,
hex.EncodeToString(toHash)) hex.EncodeToString(toHash))
assert.Equal(t, 120+len(EthMsgPrefix)+len([]byte("120")), len(toHash))
msg, err := a.HashToSign(chainID, hermezContractAddr) msg, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -77,7 +77,6 @@ type BatchData struct {
L1CoordinatorTxs []L1Tx L1CoordinatorTxs []L1Tx
L2Txs []L2Tx L2Txs []L2Tx
CreatedAccounts []Account CreatedAccounts []Account
UpdatedAccounts []AccountUpdate
ExitTree []ExitInfo ExitTree []ExitInfo
Batch Batch Batch Batch
} }
@@ -94,3 +93,23 @@ func NewBatchData() *BatchData {
Batch: Batch{}, Batch: Batch{},
} }
} }
// BatchSync is a subset of Batch that contains fileds needed for the
// synchronizer and coordinator
// type BatchSync struct {
// BatchNum BatchNum `meddler:"batch_num"`
// EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
// ForgerAddr ethCommon.Address `meddler:"forger_addr"`
// StateRoot *big.Int `meddler:"state_root,bigint"`
// SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
// }
//
// func NewBatchSync() *BatchSync {
// return &BatchSync{
// BatchNum: 0,
// EthBlockNum: 0,
// ForgerAddr: ethCommon.Address,
// StateRoot: big.NewInt(0),
// SlotNum: 0,
// }
// }

View File

@@ -1,33 +0,0 @@
package common
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup RollupVariables `validate:"required"`
Auction AuctionVariables `validate:"required"`
WDelayer WDelayerVariables `validate:"required"`
}
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
// original SCVariables
func (v *SCVariables) AsPtr() *SCVariablesPtr {
return &SCVariablesPtr{
Rollup: &v.Rollup,
Auction: &v.Auction,
WDelayer: &v.WDelayer,
}
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *RollupVariables `validate:"required"`
Auction *AuctionVariables `validate:"required"`
WDelayer *WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup RollupConstants
Auction AuctionConstants
WDelayer WDelayerConstants
}

View File

@@ -24,8 +24,8 @@ const (
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature // RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
RollupConstL1CoordinatorTotalBytes = 101 RollupConstL1CoordinatorTotalBytes = 101
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + // RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx // [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 78 RollupConstL1UserTotalBytes = 72
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch // RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
RollupConstMaxL1UserTx = 128 RollupConstMaxL1UserTx = 128
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch // RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch

131
common/float16.go Normal file
View File

@@ -0,0 +1,131 @@
// Package common Float16 provides methods to work with Hermez custom half float
// precision, 16 bits, codification internally called Float16 has been adopted
// to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
var (
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
ErrRoundingLoss = errors.New("input value causes rounding loss")
)
// Float16 represents a float in a 16 bit format
type Float16 uint16
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
func (f16 Float16) Bytes() []byte {
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(f16))
return b[:]
}
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 {
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16
}
// BigInt converts the Float16 to a *big.Int integer
func (f16 *Float16) BigInt() *big.Int {
fl := int64(*f16)
m := big.NewInt(fl & 0x3FF)
e := big.NewInt(fl >> 11)
e5 := (fl >> 10) & 0x01
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
res := m.Mul(m, exp)
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
res.Add(res, exp.Div(exp, big.NewInt(2)))
}
return res
}
// floorFix2Float converts a fix to a float, always rounding down
func floorFix2Float(_f *big.Int) Float16 {
zero := big.NewInt(0)
ten := big.NewInt(10)
e := int64(0)
m := big.NewInt(0)
m.Set(_f)
if m.Cmp(zero) == 0 {
return 0
}
s := big.NewInt(0).Rsh(m, 10)
for s.Cmp(zero) != 0 {
m.Div(m, ten)
s.Rsh(m, 10)
e++
}
return Float16(m.Int64() | e<<11)
}
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
// case of loss during the encoding.
func NewFloat16(f *big.Int) (Float16, error) {
fl1 := floorFix2Float(f)
fi1 := fl1.BigInt()
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
m3 := (fl1 & 0x3FF) + 1
e3 := fl1 >> 11
if m3&0x400 == 0 {
m3 = 0x66
e3++
}
fl3 := m3 + e3<<11
fi3 := fl3.BigInt()
res := fl1
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
if d.Cmp(d2) == 1 {
res = fl2
d = d2
}
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
if d.Cmp(d3) == 1 {
res = fl3
}
// Do rounding check
if res.BigInt().Cmp(f) == 0 {
return res, nil
}
return res, tracerr.Wrap(ErrRoundingLoss)
}
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
// case of loss during the encoding.
func NewFloat16Floor(f *big.Int) Float16 {
fl1 := floorFix2Float(f)
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
if fi2.Cmp(f) < 1 {
return fl2
}
return fl1
}

132
common/float16_test.go Normal file
View File

@@ -0,0 +1,132 @@
package common
import (
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
)
func TestConversions(t *testing.T) {
testVector := map[Float16]string{
0x307B: "123000000",
0x1DC6: "454500",
0xFFFF: "10235000000000000000000000000000000",
0x0000: "0",
0x0400: "0",
0x0001: "1",
0x0401: "1",
0x0800: "0",
0x0c00: "5",
0x0801: "10",
0x0c01: "15",
}
for test := range testVector {
fix := test.BigInt()
assert.Equal(t, fix.String(), testVector[test])
bi := big.NewInt(0)
bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi)
assert.Equal(t, nil, err)
fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestFloorFix2Float(t *testing.T) {
testVector := map[string]Float16{
"87999990000000000": 0x776f,
"87950000000000001": 0x776f,
"87950000000000000": 0x776f,
"87949999999999999": 0x736f,
}
for test := range testVector {
bi := big.NewInt(0)
bi.SetString(test, 10)
testFloat := NewFloat16Floor(bi)
assert.Equal(t, testFloat, testVector[test])
}
}
func TestConversionLosses(t *testing.T) {
a := big.NewInt(1000)
b, err := NewFloat16(a)
assert.Equal(t, nil, err)
c := b.BigInt()
assert.Equal(t, c, a)
a = big.NewInt(1024)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32767)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32768)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(65536000)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
}
func BenchmarkFloat16(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Bad big int")
}
return bigInt
}
type pair struct {
Float16 Float16
BigInt *big.Int
}
testVector := []pair{
{0x307B, newBigInt("123000000")},
{0x1DC6, newBigInt("454500")},
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
{0x0000, newBigInt("0")},
{0x0400, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1")},
{0x0800, newBigInt("0")},
{0x0c00, newBigInt("5")},
{0x0801, newBigInt("10")},
{0x0c01, newBigInt("15")},
}
b.Run("floorFix2Float()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
}
})
b.Run("NewFloat16()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float16.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
testVector[i%len(testVector)].Float16.BigInt()
}
})
}

View File

@@ -1,103 +0,0 @@
// Package common Float40 provides methods to work with Hermez custom half
// float precision, 40 bits, codification internally called Float40 has been
// adopted to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
const (
// maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
)
var (
// ErrFloat40Overflow is used when a given Float40 overflows the
// maximum capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40
ErrFloat40E31 = errors.New("Float40 error, e > 31")
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
// not be represented as Float40 due not enough precission
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
)
// Float40 represents a float in a 64 bit format
type Float40 uint64
// Bytes return a byte array of length 5 with the Float40 value encoded in
// BigEndian
func (f40 Float40) Bytes() ([]byte, error) {
if f40 > maxFloat40Value {
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
}
var f40Bytes [8]byte
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
var b [5]byte
copy(b[:], f40Bytes[3:])
return b[:], nil
}
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
// representation.
func Float40FromBytes(b []byte) Float40 {
var f40Bytes [8]byte
copy(f40Bytes[3:], b[:])
f40 := binary.BigEndian.Uint64(f40Bytes[:])
return Float40(f40)
}
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
// [ e | m ]
// [ 5 bits | 35 bits ]
func (f40 Float40) BigInt() (*big.Int, error) {
// take the 5 used bytes (FF * 5)
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
f40Bytes, err := f40.Bytes()
if err != nil {
return nil, err
}
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
return r, nil
}
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
// of loss during the encoding.
func NewFloat40(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
zero := big.NewInt(0)
ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00)
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, ErrFloat40E31
}
if m.Cmp(thres) >= 0 {
return 0, ErrFloat40NotEnoughPrecission
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}

View File

@@ -1,95 +0,0 @@
package common
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConversionsFloat40(t *testing.T) {
testVector := map[Float40]string{
6*0x800000000 + 123: "123000000",
2*0x800000000 + 4545: "454500",
30*0x800000000 + 10235: "10235000000000000000000000000000000",
0x000000000: "0",
0x800000000: "0",
0x0001: "1",
0x0401: "1025",
0x800000000 + 1: "10",
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
}
for test := range testVector {
fix, err := test.BigInt()
require.NoError(t, err)
assert.Equal(t, fix.String(), testVector[test])
bi, ok := new(big.Int).SetString(testVector[test], 10)
require.True(t, ok)
fl, err := NewFloat40(bi)
assert.NoError(t, err)
fx2, err := fl.BigInt()
require.NoError(t, err)
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestExpectError(t *testing.T) {
testVector := map[string]error{
"9922334455000000000000000000000000000000": nil,
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
"42949672950000000000000000000000000000000": nil,
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
"992233445500000000000000000000000000000000": ErrFloat40E31,
"343597383670000000000000000000000000000000": nil,
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383700000000000000000000000000000000": ErrFloat40E31,
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
_, err := NewFloat40(bi)
assert.Equal(t, testVector[test], err)
}
}
func BenchmarkFloat40(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Can not convert string to *big.Int")
}
return bigInt
}
type pair struct {
Float40 Float40
BigInt *big.Int
}
testVector := []pair{
{6*0x800000000 + 123, newBigInt("123000000")},
{2*0x800000000 + 4545, newBigInt("454500")},
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
{0x000000000, newBigInt("0")},
{0x800000000, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1025")},
{0x800000000 + 1, newBigInt("10")},
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
}
b.Run("NewFloat40()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float40.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = testVector[i%len(testVector)].Float40.BigInt()
}
})
}

View File

@@ -11,11 +11,18 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
const (
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
L1UserTxBytesLen = 72
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
L1CoordinatorTxBytesLen = 101
)
// L1Tx is a struct that represents a L1 tx // L1Tx is a struct that represents a L1 tx
type L1Tx struct { type L1Tx struct {
// Stored in DB: mandatory fileds // Stored in DB: mandatory fileds
// TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of: // TxID (12 bytes) for L1Tx is:
// bytes: | 1 | 8 | 2 | 1 | // bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) | // values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type: // where type:
@@ -172,38 +179,45 @@ func (tx L1Tx) Tx() Tx {
// [ 8 bits ] empty (userFee) // 1 byte // [ 8 bits ] empty (userFee) // 1 byte
// [ 40 bits ] empty (nonce) // 5 bytes // [ 40 bits ] empty (nonce) // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes // [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation // Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
// b[0:7] empty: no ToBJJSign, no fee, no nonce // b[0:7] empty: no ToBJJSign, no fee, no nonce
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[11:17], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[17:23], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID) binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[25:29], SignatureConstantBytes[:]) copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
} }
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability // BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat40 | Fee ] // [ fromIdx | toIdx | amountFloat16 | Fee ]
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
@@ -217,17 +231,13 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
if tx.EffectiveAmount != nil { if tx.EffectiveAmount != nil {
amountFloat40, err := NewFloat40(tx.EffectiveAmount) amountFloat16, err := NewFloat16(tx.EffectiveAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes() copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
if err != nil {
return nil, tracerr.Wrap(err)
} }
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes) // fee = 0 (as is L1Tx) b[10:11]
}
// fee = 0 (as is L1Tx)
return b[:], nil return b[:], nil
} }
@@ -237,7 +247,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen] fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2] toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength] amountBytes := b[idxLen*2 : idxLen*2+2]
l1tx := L1Tx{} l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6)) fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
@@ -250,8 +260,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
l1tx.ToIdx = toIdx l1tx.ToIdx = toIdx
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt() l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
return &l1tx, err return &l1tx, nil
} }
// BytesGeneric returns the generic representation of a L1Tx. This method is // BytesGeneric returns the generic representation of a L1Tx. This method is
@@ -259,7 +269,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method // the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case). // for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
func (tx *L1Tx) BytesGeneric() ([]byte, error) { func (tx *L1Tx) BytesGeneric() ([]byte, error) {
var b [RollupConstL1UserTotalBytes]byte var b [L1UserTxBytesLen]byte
copy(b[0:20], tx.FromEthAddr.Bytes()) copy(b[0:20], tx.FromEthAddr.Bytes())
if tx.FromBJJ != EmptyBJJComp { if tx.FromBJJ != EmptyBJJComp {
pkCompL := tx.FromBJJ pkCompL := tx.FromBJJ
@@ -271,33 +281,22 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[52:58], fromIdxBytes[:]) copy(b[52:58], fromIdxBytes[:])
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes() copy(b[58:60], depositAmountFloat16.Bytes())
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[58:63], depositAmountFloat40Bytes) copy(b[60:62], amountFloat16.Bytes())
copy(b[62:66], tx.TokenID.Bytes())
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[63:68], amountFloat40Bytes)
copy(b[68:72], tx.TokenID.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[72:78], toIdxBytes[:]) copy(b[66:72], toIdxBytes[:])
return b[:], nil return b[:], nil
} }
@@ -314,7 +313,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
if tx.UserOrigin { if tx.UserOrigin {
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx")) return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
} }
var b [RollupConstL1CoordinatorTotalBytes]byte var b [L1CoordinatorTxBytesLen]byte
v := compressedSignatureBytes[64] v := compressedSignatureBytes[64]
s := compressedSignatureBytes[32:64] s := compressedSignatureBytes[32:64]
r := compressedSignatureBytes[0:32] r := compressedSignatureBytes[0:32]
@@ -330,7 +329,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
// L1UserTxFromBytes decodes a L1Tx from []byte // L1UserTxFromBytes decodes a L1Tx from []byte
func L1UserTxFromBytes(b []byte) (*L1Tx, error) { func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
if len(b) != RollupConstL1UserTotalBytes { if len(b) != L1UserTxBytesLen {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b))) return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
} }
@@ -348,19 +347,13 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.FromIdx = fromIdx tx.FromIdx = fromIdx
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt() tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
tx.TokenID, err = TokenIDFromBytes(b[62:66])
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt() tx.ToIdx, err = IdxFromBytes(b[66:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.TokenID, err = TokenIDFromBytes(b[68:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.ToIdx, err = IdxFromBytes(b[72:78])
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -368,12 +361,19 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return tx, nil return tx, nil
} }
func signHash(data []byte) []byte {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
return ethCrypto.Keccak256([]byte(msg))
}
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte // L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) { func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != RollupConstL1CoordinatorTotalBytes { if len(b) != L1CoordinatorTxBytesLen {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b))) return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
} }
bytesMessage := []byte("I authorize this babyjubjub key for hermez rollup account creation")
tx := &L1Tx{ tx := &L1Tx{
UserOrigin: false, UserOrigin: false,
} }
@@ -394,20 +394,18 @@ func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommo
// L1CoordinatorTX ETH // L1CoordinatorTX ETH
// Ethereum adds 27 to v // Ethereum adds 27 to v
v = b[0] - byte(27) //nolint:gomnd v = b[0] - byte(27) //nolint:gomnd
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
var data []byte
data = append(data, bytesMessage...)
data = append(data, pkCompB...)
data = append(data, chainIDBytes[:]...)
data = append(data, hermezAddress.Bytes()...)
var signature []byte var signature []byte
signature = append(signature, r[:]...) signature = append(signature, r[:]...)
signature = append(signature, s[:]...) signature = append(signature, s[:]...)
signature = append(signature, v) signature = append(signature, v)
hash := signHash(data)
accCreationAuth := AccountCreationAuth{ pubKeyBytes, err := ethCrypto.Ecrecover(hash, signature)
BJJ: tx.FromBJJ,
}
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
if err != nil {
return nil, tracerr.Wrap(err)
}
pubKeyBytes, err := ethCrypto.Ecrecover(h, signature)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -50,110 +50,64 @@ func TestNewL1CoordinatorTx(t *testing.T) {
} }
func TestL1TxCompressedData(t *testing.T) { func TestL1TxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation (using
// PoolL2Tx values)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{ tx := L1Tx{
FromIdx: (1 << 48) - 1, FromIdx: 2,
ToIdx: (1 << 48) - 1, ToIdx: 3,
Amount: amount, Amount: big.NewInt(4),
TokenID: (1 << 32) - 1, TokenID: 5,
} }
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1)) chainID := uint16(0)
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err) assert.NoError(t, err)
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{ // test vector value generated from javascript implementation
FromIdx: 0, expectedStr := "7307597389635308713748674793997299267459594577423"
ToIdx: 0, assert.Equal(t, expectedStr, txCompressedData.String())
Amount: big.NewInt(0), assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
TokenID: 0,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = L1Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
assert.NoError(t, err)
expectedStr = "7b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestBytesDataAvailability(t *testing.T) { func TestBytesDataAvailability(t *testing.T) {
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{ tx := L1Tx{
ToIdx: (1 << 16) - 1, FromIdx: 2,
FromIdx: (1 << 16) - 1, ToIdx: 3,
EffectiveAmount: amount, Amount: big.NewInt(4),
TokenID: 5,
} }
txCompressedData, err := tx.BytesDataAvailability(16) txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData)) assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{ tx = L1Tx{
ToIdx: (1 << 32) - 1, FromIdx: 2,
FromIdx: (1 << 32) - 1, ToIdx: 3,
EffectiveAmount: amount, EffectiveAmount: big.NewInt(4),
TokenID: 5,
} }
txCompressedData, err = tx.BytesDataAvailability(32) txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData)) assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32) }
func TestL1TxFromDataAvailability(t *testing.T) {
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
}
txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err)
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx) assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx) assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{ tx = L1Tx{
ToIdx: 0, FromIdx: 2,
FromIdx: 0, ToIdx: 3,
EffectiveAmount: big.NewInt(0), EffectiveAmount: big.NewInt(4),
} }
txCompressedData, err = tx.BytesDataAvailability(32) txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 635,
FromIdx: 296,
EffectiveAmount: big.NewInt(1000000000000000000),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32) l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx) assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
@@ -218,15 +172,18 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
UserOrigin: true, UserOrigin: true,
} }
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
require.NoError(t, err)
encodedData, err := l1Tx.BytesUser() encodedData, err := l1Tx.BytesUser()
require.NoError(t, err) require.NoError(t, err)
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d" assert.Equal(t, expected, encodedData)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
} }
func TestL1CoordinatorTxByteParsers(t *testing.T) { func TestL1CoordinatorTxByteParsers(t *testing.T) {
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe") hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
chainID := big.NewInt(1337) chainID := big.NewInt(1337)
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19") privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err) require.NoError(t, err)
@@ -244,16 +201,18 @@ func TestL1CoordinatorTxByteParsers(t *testing.T) {
pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c") pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
err = pkComp.UnmarshalText(pkCompL) err = pkComp.UnmarshalText(pkCompL)
require.NoError(t, err) require.NoError(t, err)
bytesMessage1 := []byte("\x19Ethereum Signed Message:\n120")
bytesMessage2 := []byte("I authorize this babyjubjub key for hermez rollup account creation")
accCreationAuth := AccountCreationAuth{ babyjubB := SwapEndianness(pkComp[:])
EthAddr: fromEthAddr, var data []byte
BJJ: pkComp, data = append(data, bytesMessage1...)
} data = append(data, bytesMessage2...)
data = append(data, babyjubB[:]...)
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress) data = append(data, chainIDBytes...)
require.NoError(t, err) data = append(data, hermezAddress.Bytes()...)
hash := crypto.Keccak256Hash(data)
signature, err := crypto.Sign(h, privateKey) signature, err := crypto.Sign(hash.Bytes(), privateKey)
require.NoError(t, err) require.NoError(t, err)
// Ethereum adds 27 to v // Ethereum adds 27 to v
v := int(signature[64]) v := int(signature[64])

View File

@@ -89,15 +89,11 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
// TokenID // TokenID
b = append(b, tx.TokenID.Bytes()[:]...) b = append(b, tx.TokenID.Bytes()[:]...)
// Amount // Amount
amountFloat40, err := NewFloat40(tx.Amount) amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount)) return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
} }
amountFloat40Bytes, err := amountFloat40.Bytes() b = append(b, amountFloat16.Bytes()...)
if err != nil {
return txID, tracerr.Wrap(err)
}
b = append(b, amountFloat40Bytes...)
// Nonce // Nonce
nonceBytes, err := tx.Nonce.Bytes() nonceBytes, err := tx.Nonce.Bytes()
if err != nil { if err != nil {
@@ -174,11 +170,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
} }
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability // BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat40 | Fee ] // [ fromIdx | toIdx | amountFloat16 | Fee ]
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
@@ -192,16 +188,13 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
} }
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
amountFloat40, err := NewFloat40(tx.Amount) amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil { copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
return nil, tracerr.Wrap(err) b[idxLen*2+2] = byte(tx.Fee)
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
return b[:], nil return b[:], nil
} }
@@ -226,10 +219,7 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt() tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
if err != nil { tx.Fee = FeeSelector(b[idxLen*2+2])
return nil, tracerr.Wrap(err)
}
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
return tx, nil return tx, nil
} }

View File

@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err := NewL2Tx(l2Tx) l2Tx, err := NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String()) assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String()) assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String()) assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String()) assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 1, FromIdx: 1,
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String()) assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 999, FromIdx: 999,
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String()) assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 4444, FromIdx: 4444,
@@ -90,85 +90,25 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String()) assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
} }
func TestL2TxByteParsers(t *testing.T) { func TestL2TxByteParsers(t *testing.T) {
// test vectors values generated from javascript implementation amount := new(big.Int)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10) amount.SetString("79000000", 10)
require.True(t, ok)
l2Tx := &L2Tx{ l2Tx := &L2Tx{
ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected := "ffffffffffffffffffff"
encodedData, err := l2Tx.BytesDataAvailability(16)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected = "ffffffffffffffffffffffffffff"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 0,
Amount: big.NewInt(0),
Fee: 0,
}
expected = "0000000000000000000000000000"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 1061,
Amount: big.NewInt(420000000000),
Fee: 127,
}
expected = "000004250000000010fa56ea007f"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 256, ToIdx: 256,
Amount: amount,
FromIdx: 257, FromIdx: 257,
Amount: big.NewInt(79000000),
Fee: 201, Fee: 201,
} }
expected = "00000101000001000004b571c0c9" // Data from the compatibility test
encodedData, err = l2Tx.BytesDataAvailability(32) expected := "00000101000001002b16c9"
encodedData, err := l2Tx.BytesDataAvailability(32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData)) assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32) decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData) assert.Equal(t, l2Tx, decodedData)
} }

View File

@@ -36,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"` ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"` ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"` TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"` Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
Fee FeeSelector `meddler:"fee"` Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"` State PoolL2TxState `meddler:"state"`
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"` RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"` RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"` RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"` RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"` AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -73,7 +73,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original Type doesn't match the correct one, return error // If original Type doesn't match the correct one, return error
if txTypeOld != "" && txTypeOld != tx.Type { if txTypeOld != "" && txTypeOld != tx.Type {
return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s", return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s",
txTypeOld, tx.Type)) tx.Type, txTypeOld))
} }
txIDOld := tx.TxID txIDOld := tx.TxID
@@ -83,7 +83,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original TxID doesn't match the correct one, return error // If original TxID doesn't match the correct one, return error
if txIDOld != (TxID{}) && txIDOld != tx.TxID { if txIDOld != (TxID{}) && txIDOld != tx.TxID {
return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s", return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s",
txIDOld.String(), tx.TxID.String())) tx.TxID.String(), txIDOld.String()))
} }
return tx, nil return tx, nil
@@ -122,13 +122,18 @@ func (tx *PoolL2Tx) SetID() error {
// [ 8 bits ] userFee // 1 byte // [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes // [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes // [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation // Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
toBJJSign := byte(0) toBJJSign := byte(0)
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ) pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -144,18 +149,19 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[11:17], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[17:23], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID) binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[25:29], SignatureConstantBytes[:]) copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -164,9 +170,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
// TxCompressedDataEmpty calculates the TxCompressedData of an empty // TxCompressedDataEmpty calculates the TxCompressedData of an empty
// transaction // transaction
func TxCompressedDataEmpty(chainID uint16) *big.Int { func TxCompressedDataEmpty(chainID uint16) *big.Int {
var b [29]byte var b [31]byte
binary.BigEndian.PutUint16(b[23:25], chainID) binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[25:29], SignatureConstantBytes[:]) copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi return bi
} }
@@ -176,24 +182,19 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 8 bits ] userFee // 1 byte // [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes // [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 40 bits ] amountFloat40 // 5 bytes // [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil { if tx.Amount == nil {
tx.Amount = big.NewInt(0) tx.Amount = big.NewInt(0)
} }
amountFloat40, err := NewFloat40(tx.Amount) amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes() var b [25]byte
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
toBJJSign := byte(0) toBJJSign := byte(0)
if tx.ToBJJ != EmptyBJJComp { if tx.ToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.ToBJJ) sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -209,17 +210,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:16], amountFloat40Bytes) copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[16:22], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[22:28], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -235,24 +236,19 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 8 bits ] rqUserFee // 1 byte // [ 8 bits ] rqUserFee // 1 byte
// [ 40 bits ] rqNonce // 5 bytes // [ 40 bits ] rqNonce // 5 bytes
// [ 32 bits ] rqTokenID // 4 bytes // [ 32 bits ] rqTokenID // 4 bytes
// [ 40 bits ] rqAmountFloat40 // 5 bytes // [ 16 bits ] rqAmountFloat16 // 2 bytes
// [ 48 bits ] rqToIdx // 6 bytes // [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes // [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil { if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0) tx.RqAmount = big.NewInt(0)
} }
amountFloat40, err := NewFloat40(tx.RqAmount) amountFloat16, err := NewFloat16(tx.RqAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes() var b [25]byte
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
rqToBJJSign := byte(0) rqToBJJSign := byte(0)
if tx.RqToBJJ != EmptyBJJComp { if tx.RqToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ) sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
@@ -268,17 +264,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.RqTokenID.Bytes()) copy(b[7:11], tx.RqTokenID.Bytes())
copy(b[11:16], amountFloat40Bytes) copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.RqToIdx.Bytes() toIdxBytes, err := tx.RqToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[16:22], toIdxBytes[:]) copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.RqFromIdx.Bytes() fromIdxBytes, err := tx.RqFromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[22:28], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -291,22 +287,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
var e1B [25]byte
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(e1B[0:5], amountFloat40Bytes)
toEthAddr := EthAddrToBigInt(tx.ToEthAddr) toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
copy(e1B[5:25], toEthAddr.Bytes())
e1 := new(big.Int).SetBytes(e1B[:])
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr) rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ) _, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
@@ -318,7 +299,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ) _, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY}) return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
} }
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp // VerifySignature returns true if the signature verification is correct for the given PublicKeyComp

View File

@@ -21,104 +21,80 @@ func TestNewPoolL2Tx(t *testing.T) {
} }
poolL2Tx, err := NewPoolL2Tx(poolL2Tx) poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String()) assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
} }
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) { func TestTxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation chainID := uint16(0)
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign var sk babyjub.PrivateKey
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001")) _, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
assert.NoError(t, err)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: (1 << 48) - 1,
ToIdx: (1 << 48) - 1,
Amount: amount,
TokenID: (1 << 32) - 1,
Nonce: (1 << 40) - 1,
Fee: (1 << 3) - 1,
ToBJJ: skPositive.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
require.NoError(t, err)
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err := tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 0,
ToIdx: 0,
Amount: big.NewInt(0),
TokenID: 0,
Nonce: 0,
Fee: 0,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
assert.Equal(t, "0", txCompressedDataV2.String())
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = PoolL2Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
Nonce: 76,
Fee: 214,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
require.NoError(t, err)
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
Nonce: 4,
Fee: 5,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
FromIdx: 2, FromIdx: 2,
ToIdx: 3, ToIdx: 3,
TokenID: 4, Amount: big.NewInt(4),
Nonce: 5, TokenID: 5,
Fee: 6, Nonce: 6,
ToBJJ: skPositive.Public().Compress(), ToBJJ: sk.Public().Compress(),
} }
txCompressedData, err = tx.TxCompressedData(uint16(0)) txCompressedData, err := tx.TxCompressedData(chainID)
require.NoError(t, err) assert.NoError(t, err)
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f" // test vector value generated from javascript implementation
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes())) expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
// using a different chainID
txCompressedData, err = tx.TxCompressedData(uint16(100))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err = tx.TxCompressedData(uint16(65535))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
RqFromIdx: 7,
RqToIdx: 8,
RqAmount: big.NewInt(9),
RqTokenID: 10,
RqNonce: 11,
RqFee: 12,
RqToBJJ: sk.Public().Compress(),
}
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, rqTxCompressedData.String())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
}
func TestTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 7,
ToIdx: 8,
Amount: big.NewInt(9),
TokenID: 10,
Nonce: 11,
Fee: 12,
ToBJJ: sk.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestRqTxCompressedDataV2(t *testing.T) { func TestRqTxCompressedDataV2(t *testing.T) {
@@ -137,16 +113,19 @@ func TestRqTxCompressedDataV2(t *testing.T) {
txCompressedData, err := tx.RqTxCompressedDataV2() txCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err) assert.NoError(t, err)
// test vector value generated from javascript implementation // test vector value generated from javascript implementation
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663" expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String()) assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10) expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes()) assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes())) assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestHashToSign(t *testing.T) { func TestHashToSign(t *testing.T) {
chainID := uint16(0) chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: 2, FromIdx: 2,
ToIdx: 3, ToIdx: 3,
@@ -157,7 +136,7 @@ func TestHashToSign(t *testing.T) {
} }
toSign, err := tx.HashToSign(chainID) toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes())) assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
} }
func TestVerifyTxSignature(t *testing.T) { func TestVerifyTxSignature(t *testing.T) {
@@ -177,7 +156,7 @@ func TestVerifyTxSignature(t *testing.T) {
} }
toSign, err := tx.HashToSign(chainID) toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String()) assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
sig := sk.SignPoseidon(toSign) sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress() tx.Signature = sig.Compress()

View File

@@ -62,17 +62,3 @@ func RmEndingZeroes(siblings []*merkletree.Hash) []*merkletree.Hash {
} }
return siblings[:pos] return siblings[:pos]
} }
// TokensToUSD is a helper function to calculate the USD value of a certain
// amount of tokens considering the normalized token price (which is the price
// commonly reported by exhanges)
func TokensToUSD(amount *big.Int, decimals uint64, valueUSD float64) float64 {
amountF := new(big.Float).SetInt(amount)
// Divide by 10^decimals to normalize the amount
baseF := new(big.Float).SetInt(new(big.Int).Exp(
big.NewInt(10), big.NewInt(int64(decimals)), nil)) //nolint:gomnd
amountF.Mul(amountF, big.NewFloat(valueUSD))
amountF.Quo(amountF, baseF)
amountUSD, _ := amountF.Float64()
return amountUSD
}

View File

@@ -102,8 +102,6 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx] ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr // ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx] ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
// OnChain determines if is L1 (1/true) or L2 (0/false) // OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx] OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -114,8 +112,8 @@ type ZKInputs struct {
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new // NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
// account (fromIdx==0) // account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx] NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float40 // DepositAmountF encoded as float16
DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx] DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
// FromEthAddr // FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx] FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int // FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -328,7 +326,6 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
zki.AuxToIdx = newSlice(maxTx) zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx) zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx) zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx) zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx) zki.NewAccount = newSlice(maxTx)
@@ -479,8 +476,8 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes()) copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
b = append(b, newExitRoot...) b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData // [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 528) l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen) l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ { for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -497,9 +494,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
} }
b = append(b, l1TxsDataAvailability...) b = append(b, l1TxsDataAvailability...)
// [MAX_TX*(2*NLevels + 48) bits] L2TxsData // [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen) l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ { for i := 0; i < len(z.Metadata.L2TxsData); i++ {

View File

@@ -35,30 +35,10 @@ type ServerProof struct {
URL string `validate:"required"` URL string `validate:"required"`
} }
// ForgeBatchGasCost is the costs associated to a ForgeBatch transaction, split
// into different parts to be used in a formula.
type ForgeBatchGasCost struct {
Fixed uint64 `validate:"required"`
L1UserTx uint64 `validate:"required"`
L1CoordTx uint64 `validate:"required"`
L2Tx uint64 `validate:"required"`
}
// CoordinatorAPI specifies the configuration parameters of the API in mode
// coordinator
type CoordinatorAPI struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
}
// Coordinator is the coordinator specific configuration. // Coordinator is the coordinator specific configuration.
type Coordinator struct { type Coordinator struct {
// ForgerAddress is the address under which this coordinator is forging // ForgerAddress is the address under which this coordinator is forging
ForgerAddress ethCommon.Address `validate:"required"` ForgerAddress ethCommon.Address `validate:"required"`
// MinimumForgeAddressBalance is the minimum balance the forger address
// needs to start the coordinator in wei. Of set to 0, the coordinator
// will not check the balance before starting.
MinimumForgeAddressBalance *big.Int
// FeeAccount is the Hermez account that the coordinator uses to receive fees // FeeAccount is the Hermez account that the coordinator uses to receive fees
FeeAccount struct { FeeAccount struct {
// Address is the ethereum address of the account to receive fees // Address is the ethereum address of the account to receive fees
@@ -99,58 +79,27 @@ type Coordinator struct {
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval Duration `validate:"required"` ForgeRetryInterval Duration `validate:"required"`
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay Duration `validate:"-"`
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay Duration `validate:"-"`
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"` SyncRetryInterval Duration `validate:"required"`
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval Duration `validate:"required"`
// L2DB is the DB that holds the pool of L2Txs // L2DB is the DB that holds the pool of L2Txs
L2DB struct { L2DB struct {
// SafetyPeriod is the number of batches after which // SafetyPeriod is the number of batches after which
// non-pending L2Txs are deleted from the pool // non-pending L2Txs are deleted from the pool
SafetyPeriod common.BatchNum `validate:"required"` SafetyPeriod common.BatchNum `validate:"required"`
// MaxTxs is the maximum number of pending L2Txs that can be // MaxTxs is the number of L2Txs that once reached triggers
// stored in the pool. Once this number of pending L2Txs is // deletion of old L2Txs
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"` MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs // TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted. // L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"` TTL Duration `validate:"required"`
// PurgeBatchDelay is the delay between batches to purge // PurgeBatchDelay is the delay between batches to purge outdated transactions
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBatchDelay int64 `validate:"required"` PurgeBatchDelay int64 `validate:"required"`
// InvalidateBatchDelay is the delay between batches to mark // InvalidateBatchDelay is the delay between batches to mark invalid transactions
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBatchDelay int64 `validate:"required"` InvalidateBatchDelay int64 `validate:"required"`
// PurgeBlockDelay is the delay between blocks to purge // PurgeBlockDelay is the delay between blocks to purge outdated transactions
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBlockDelay int64 `validate:"required"` PurgeBlockDelay int64 `validate:"required"`
// InvalidateBlockDelay is the delay between blocks to mark // InvalidateBlockDelay is the delay between blocks to mark invalid transactions
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBlockDelay int64 `validate:"required"` InvalidateBlockDelay int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
TxSelector struct { TxSelector struct {
@@ -170,13 +119,15 @@ type Coordinator struct {
NLevels int64 `validate:"required"` NLevels int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
EthClient struct { EthClient struct {
// CallGasLimit is the default gas limit set for ethereum
// calls, except for methods where a particular gas limit is
// harcoded because it's known to be a big value
CallGasLimit uint64 `validate:"required"`
// MaxGasPrice is the maximum gas price allowed for ethereum // MaxGasPrice is the maximum gas price allowed for ethereum
// transactions // transactions
MaxGasPrice *big.Int `validate:"required"` MaxGasPrice *big.Int `validate:"required"`
// GasPriceIncPerc is the percentage increase of gas price set // GasPriceDiv is the gas price division
// in an ethereum transaction from the suggested gas price by GasPriceDiv uint64 `validate:"required"`
// the ehtereum node
GasPriceIncPerc int64
// CheckLoopInterval is the waiting interval between receipt // CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
CheckLoopInterval Duration `validate:"required"` CheckLoopInterval Duration `validate:"required"`
@@ -200,11 +151,11 @@ type Coordinator struct {
// Password used to decrypt the keys in the keystore // Password used to decrypt the keys in the keystore
Password string `validate:"required"` Password string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
} `validate:"required"` } `validate:"required"`
API CoordinatorAPI `validate:"required"` API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
Debug struct { Debug struct {
// BatchPath if set, specifies the path where batchInfo is stored // BatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
@@ -219,45 +170,6 @@ type Coordinator struct {
} }
} }
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
// diferentiated SQL connections for read/write. If the read configuration is
// not provided, the write one it's going to be used for both reads and writes
type PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
HostWrite string `validate:"required"`
// User of the PostgreSQL write server
UserWrite string `validate:"required"`
// Password of the PostgreSQL write server
PasswordWrite string `validate:"required"`
// Name of the PostgreSQL write server database
NameWrite string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
}
// NodeDebug specifies debug configuration parameters
type NodeDebug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
// Node is the hermez node configuration. // Node is the hermez node configuration.
type Node struct { type Node struct {
PriceUpdater struct { PriceUpdater struct {
@@ -274,7 +186,18 @@ type Node struct {
// Keep is the number of checkpoints to keep // Keep is the number of checkpoints to keep
Keep int `validate:"required"` Keep int `validate:"required"`
} `validate:"required"` } `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"` PostgreSQL struct {
// Port of the PostgreSQL server
Port int `validate:"required"`
// Host of the PostgreSQL server
Host string `validate:"required"`
// User of the PostgreSQL server
User string `validate:"required"`
// Password of the PostgreSQL server
Password string `validate:"required"`
// Name of the PostgreSQL server database
Name string `validate:"required"`
} `validate:"required"`
Web3 struct { Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server // URL is the URL of the web3 ethereum-node RPC server
URL string `validate:"required"` URL string `validate:"required"`
@@ -305,7 +228,6 @@ type Node struct {
// TokenHEZ address // TokenHEZ address
TokenHEZName string `validate:"required"` TokenHEZName string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// API specifies the configuration parameters of the API
API struct { API struct {
// Address where the API will listen if set // Address where the API will listen if set
Address string Address string
@@ -323,45 +245,17 @@ type Node struct {
// can wait to stablish a SQL connection // can wait to stablish a SQL connection
SQLConnectionTimeout Duration SQLConnectionTimeout Duration
} `validate:"required"` } `validate:"required"`
Debug NodeDebug `validate:"required"` Debug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
}
Coordinator Coordinator `validate:"-"` Coordinator Coordinator `validate:"-"`
} }
// APIServer is the api server configuration parameters
type APIServer struct {
// NodeAPI specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string `validate:"required"`
// Explorer enables the Explorer API endpoints
Explorer bool
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Coordinator struct {
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
L2DB struct {
// MaxTxs is the maximum number of pending L2Txs that can be
// stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
} `validate:"required"`
}
Debug NodeDebug `validate:"required"`
}
// Load loads a generic config. // Load loads a generic config.
func Load(path string, cfg interface{}) error { func Load(path string, cfg interface{}) error {
bs, err := ioutil.ReadFile(path) //nolint:gosec bs, err := ioutil.ReadFile(path) //nolint:gosec
@@ -375,8 +269,8 @@ func Load(path string, cfg interface{}) error {
return nil return nil
} }
// LoadNode loads the Node configuration from path. // LoadCoordinator loads the Coordinator configuration from path.
func LoadNode(path string, coordinator bool) (*Node, error) { func LoadCoordinator(path string) (*Node, error) {
var cfg Node var cfg Node
if err := Load(path, &cfg); err != nil { if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
@@ -385,28 +279,21 @@ func LoadNode(path string, coordinator bool) (*Node, error) {
if err := validate.Struct(cfg); err != nil { if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil { if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
}
return &cfg, nil return &cfg, nil
} }
// LoadAPIServer loads the APIServer configuration from path. // LoadNode loads the Node configuration from path.
func LoadAPIServer(path string, coordinator bool) (*APIServer, error) { func LoadNode(path string) (*Node, error) {
var cfg APIServer var cfg Node
if err := Load(path, &cfg); err != nil { if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
} }
validate := validator.New() validate := validator.New()
if err := validate.Struct(cfg); err != nil { if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil return &cfg, nil
} }

View File

@@ -92,7 +92,6 @@ type BatchInfo struct {
ForgeBatchArgs *eth.RollupForgeBatchArgs ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo // FeesInfo
EthTx *types.Transaction EthTx *types.Transaction
EthTxErr error
// SendTimestamp the time of batch sent to ethereum // SendTimestamp the time of batch sent to ethereum
SendTimestamp time.Time SendTimestamp time.Time
Receipt *types.Receipt Receipt *types.Receipt

View File

@@ -11,7 +11,6 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
@@ -25,8 +24,6 @@ import (
var ( var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet") errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
) )
const ( const (
@@ -74,22 +71,9 @@ type Config struct {
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval time.Duration ForgeRetryInterval time.Duration
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay time.Duration
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay time.Duration
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval time.Duration SyncRetryInterval time.Duration
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval time.Duration
// EthClientAttemptsDelay is delay between attempts do do an eth client // EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
EthClientAttemptsDelay time.Duration EthClientAttemptsDelay time.Duration
@@ -103,10 +87,6 @@ type Config struct {
// MaxGasPrice is the maximum gas price allowed for ethereum // MaxGasPrice is the maximum gas price allowed for ethereum
// transactions // transactions
MaxGasPrice *big.Int MaxGasPrice *big.Int
// GasPriceIncPerc is the percentage increase of gas price set in an
// ethereum transaction from the suggested gas price by the ehtereum
// node
GasPriceIncPerc int64
// TxManagerCheckInterval is the waiting interval between receipt // TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration TxManagerCheckInterval time.Duration
@@ -117,9 +97,6 @@ type Config struct {
// VerifierIdx is the index of the verifier contract registered in the // VerifierIdx is the index of the verifier contract registered in the
// smart contract // smart contract
VerifierIdx uint8 VerifierIdx uint8
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost config.ForgeBatchGasCost
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -144,8 +121,8 @@ type Coordinator struct {
pipelineNum int // Pipeline sequential number. The first pipeline is 1 pipelineNum int // Pipeline sequential number. The first pipeline is 1
pipelineFromBatch fromBatch // batch from which we started the pipeline pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client provers []prover.Client
consts common.SCConsts consts synchronizer.SCConsts
vars common.SCVariables vars synchronizer.SCVariables
stats synchronizer.Stats stats synchronizer.Stats
started bool started bool
@@ -161,15 +138,6 @@ type Coordinator struct {
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
// mutexL2DBUpdateDelete protects updates to the L2DB so that
// these two processes always happen exclusively:
// - Pipeline taking pending txs, running through the TxProcessor and
// marking selected txs as forging
// - Coordinator deleting pending txs that have been marked with
// `external_delete`.
// Without this mutex, the coordinator could delete a pending txs that
// has just been selected by the TxProcessor in the pipeline.
mutexL2DBUpdateDelete sync.Mutex
pipeline *Pipeline pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum lastNonFailedBatchNum common.BatchNum
@@ -185,8 +153,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client, serverProofs []prover.Client,
ethClient eth.ClientInterface, ethClient eth.ClientInterface,
scConsts *common.SCConsts, scConsts *synchronizer.SCConsts,
initSCVars *common.SCVariables, initSCVars *synchronizer.SCVariables,
) (*Coordinator, error) { ) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100% // nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
@@ -265,8 +233,7 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) { func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
c.pipelineNum++ c.pipelineNum++
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector, return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager, c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
c.provers, &c.consts)
} }
// MsgSyncBlock indicates an update to the Synchronizer stats // MsgSyncBlock indicates an update to the Synchronizer stats
@@ -275,19 +242,19 @@ type MsgSyncBlock struct {
Batches []common.BatchData Batches []common.BatchData
// Vars contains each Smart Contract variables if they are updated, or // Vars contains each Smart Contract variables if they are updated, or
// nil if they haven't changed. // nil if they haven't changed.
Vars common.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
// MsgSyncReorg indicates a reorg // MsgSyncReorg indicates a reorg
type MsgSyncReorg struct { type MsgSyncReorg struct {
Stats synchronizer.Stats Stats synchronizer.Stats
Vars common.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct { type MsgStopPipeline struct {
Reason string Reason string
// FailedBatchNum indicates the first batchNum that failed in the // FailedBatchNum indicates the first batchNum that faile in the
// pipeline. If FailedBatchNum is 0, it should be ignored. // pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum FailedBatchNum common.BatchNum
} }
@@ -300,7 +267,7 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) { func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil { if update.Rollup != nil {
vars.Rollup = *update.Rollup vars.Rollup = *update.Rollup
} }
@@ -312,7 +279,7 @@ func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
} }
} }
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) { func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&c.vars, vars) updateSCVars(&c.vars, vars)
} }
@@ -371,43 +338,25 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
if c.pipeline == nil { if c.pipeline == nil {
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock) relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay { if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugf("Coordinator: delaying pipeline start due to "+ log.Debugw("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)", "relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay) relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge { } else if canForge {
log.Infow("Coordinator: forging state begin", "block", log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum) stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
fromBatch := fromBatch{ batchNum := stats.Sync.LastBatch.BatchNum
BatchNum: stats.Sync.LastBatch.BatchNum, if c.lastNonFailedBatchNum > batchNum {
ForgerAddr: stats.Sync.LastBatch.ForgerAddr, batchNum = c.lastNonFailedBatchNum
StateRoot: stats.Sync.LastBatch.StateRoot,
}
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
fromBatch.BatchNum = c.lastNonFailedBatchNum
fromBatch.ForgerAddr = c.cfg.ForgerAddress
fromBatch.StateRoot = big.NewInt(0)
}
// Before starting the pipeline make sure we reset any
// l2tx from the pool that was forged in a batch that
// didn't end up being mined. We are already doing
// this in handleStopPipeline, but we do it again as a
// failsafe in case the last synced batchnum is
// different than in the previous call to l2DB.Reorg,
// or in case the node was restarted when there was a
// started batch that included l2txs but was not mined.
if err := c.l2DB.Reorg(fromBatch.BatchNum); err != nil {
return tracerr.Wrap(err)
} }
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineFromBatch = fromBatch if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil {
// Start the pipeline
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// c.pipelineBatchNum = batchNum
} }
} else { } else {
if !canForge { if !canForge {
@@ -417,6 +366,18 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
} }
} }
if c.pipeline == nil { if c.pipeline == nil {
// Mark invalid in Pool due to forged L2Txs
// for _, batch := range batches {
// if err := c.l2DB.InvalidateOldNonces(
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
// return err
// }
// }
// if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)) {
// if err := c.txSelector.Reset(stats.Sync.LastBatch.BatchNum); err != nil {
// return tracerr.Wrap(err)
// }
// }
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(), if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil { stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -450,8 +411,7 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars) c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
} }
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress && if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
(c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil || c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0 {
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) {
// There's been a reorg and the batch state root from which the // There's been a reorg and the batch state root from which the
// pipeline was started has changed (probably because it was in // pipeline was started has changed (probably because it was in
// a block that was discarded), and it was sent by a different // a block that was discarded), and it was sent by a different
@@ -524,7 +484,7 @@ func (c *Coordinator) Start() {
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
timer := time.NewTimer(longWaitDuration) waitDuration := longWaitDuration
for { for {
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
@@ -536,45 +496,23 @@ func (c *Coordinator) Start() {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.handleMsg", "err", err) log.Errorw("Coordinator.handleMsg", "err", err)
if !timer.Stop() { waitDuration = c.cfg.SyncRetryInterval
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
case <-timer.C: waitDuration = longWaitDuration
timer.Reset(longWaitDuration) case <-time.After(waitDuration):
if !c.stats.Synced() { if !c.stats.Synced() {
waitDuration = longWaitDuration
continue continue
} }
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil { if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.syncStats", "err", err) log.Errorw("Coordinator.syncStats", "err", err)
if !timer.Stop() { waitDuration = c.cfg.SyncRetryInterval
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
} waitDuration = longWaitDuration
}
}()
c.wg.Add(1)
go func() {
for {
select {
case <-c.ctx.Done():
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
c.wg.Done()
return
case <-time.After(c.cfg.PurgeByExtDelInterval):
c.mutexL2DBUpdateDelete.Lock()
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
}
c.mutexL2DBUpdateDelete.Unlock()
} }
} }
}() }()

View File

@@ -2,6 +2,7 @@ package coordinator
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
@@ -11,6 +12,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
@@ -105,8 +107,8 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil) historyDB := historydb.NewHistoryDB(db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB") txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
require.NoError(t, err) require.NoError(t, err)
@@ -187,12 +189,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
&prover.MockClient{Delay: 400 * time.Millisecond}, &prover.MockClient{Delay: 400 * time.Millisecond},
} }
scConsts := &common.SCConsts{ scConsts := &synchronizer.SCConsts{
Rollup: *ethClientSetup.RollupConstants, Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants, Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants, WDelayer: *ethClientSetup.WDelayerConstants,
} }
initSCVars := &common.SCVariables{ initSCVars := &synchronizer.SCVariables{
Rollup: *ethClientSetup.RollupVariables, Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables, Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables, WDelayer: *ethClientSetup.WDelayerVariables,
@@ -517,7 +519,7 @@ func TestCoordinatorStress(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
for { for {
blockData, _, err := syn.Sync(ctx, nil) blockData, _, err := syn.Sync2(ctx, nil)
if ctx.Err() != nil { if ctx.Err() != nil {
wg.Done() wg.Done()
return return
@@ -528,7 +530,7 @@ func TestCoordinatorStress(t *testing.T) {
coord.SendMsg(ctx, MsgSyncBlock{ coord.SendMsg(ctx, MsgSyncBlock{
Stats: *stats, Stats: *stats,
Batches: blockData.Rollup.Batches, Batches: blockData.Rollup.Batches,
Vars: common.SCVariablesPtr{ Vars: synchronizer.SCVariablesPtr{
Rollup: blockData.Rollup.Vars, Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars, Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars, WDelayer: blockData.WDelayer.Vars,
@@ -566,3 +568,8 @@ func TestCoordinatorStress(t *testing.T) {
// TODO: Test forgeBatch // TODO: Test forgeBatch
// TODO: Test waitServerProof // TODO: Test waitServerProof
// TODO: Test handleReorg // TODO: Test handleReorg
func TestFoo(t *testing.T) {
a := tracerr.Wrap(fmt.Errorf("AAA: %w", core.ErrNonceTooLow))
fmt.Println(errors.Is(a, core.ErrNonceTooLow))
}

View File

@@ -22,28 +22,29 @@ import (
type statsVars struct { type statsVars struct {
Stats synchronizer.Stats Stats synchronizer.Stats
Vars common.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
type state struct { type state struct {
batchNum common.BatchNum batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64 lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64 lastForgeL1TxsNum int64
lastSlotForged int64
} }
// Pipeline manages the forging of batches with parallel server proofs // Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct { type Pipeline struct {
num int num int
cfg Config cfg Config
consts common.SCConsts consts synchronizer.SCConsts
// state // state
state state state state
// batchNum common.BatchNum
// lastScheduledL1BatchBlockNum int64
// lastForgeL1TxsNum int64
started bool started bool
rw sync.RWMutex rw sync.RWMutex
errAtBatchNum common.BatchNum errAtBatchNum common.BatchNum
lastForgeTime time.Time
proversPool *ProversPool proversPool *ProversPool
provers []prover.Client provers []prover.Client
@@ -53,11 +54,10 @@ type Pipeline struct {
l2DB *l2db.L2DB l2DB *l2db.L2DB
txSelector *txselector.TxSelector txSelector *txselector.TxSelector
batchBuilder *batchbuilder.BatchBuilder batchBuilder *batchbuilder.BatchBuilder
mutexL2DBUpdateDelete *sync.Mutex
purger *Purger purger *Purger
stats synchronizer.Stats stats synchronizer.Stats
vars common.SCVariables vars synchronizer.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
ctx context.Context ctx context.Context
@@ -85,12 +85,11 @@ func NewPipeline(ctx context.Context,
l2DB *l2db.L2DB, l2DB *l2db.L2DB,
txSelector *txselector.TxSelector, txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
mutexL2DBUpdateDelete *sync.Mutex,
purger *Purger, purger *Purger,
coord *Coordinator, coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *common.SCConsts, scConsts *synchronizer.SCConsts,
) (*Pipeline, error) { ) (*Pipeline, error) {
proversPool := NewProversPool(len(provers)) proversPool := NewProversPool(len(provers))
proversPoolSize := 0 proversPoolSize := 0
@@ -114,7 +113,6 @@ func NewPipeline(ctx context.Context,
batchBuilder: batchBuilder, batchBuilder: batchBuilder,
provers: provers, provers: provers,
proversPool: proversPool, proversPool: proversPool,
mutexL2DBUpdateDelete: mutexL2DBUpdateDelete,
purger: purger, purger: purger,
coord: coord, coord: coord,
txManager: txManager, txManager: txManager,
@@ -124,7 +122,7 @@ func NewPipeline(ctx context.Context,
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) { func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
select { select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
@@ -133,12 +131,11 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *common.SCVariables) error { stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.state = state{ p.state = state{
batchNum: batchNum, batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum, lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0, lastScheduledL1BatchBlockNum: 0,
lastSlotForged: -1,
} }
p.stats = *stats p.stats = *stats
p.vars = *vars p.vars = *vars
@@ -194,38 +191,15 @@ func (p *Pipeline) reset(batchNum common.BatchNum,
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&p.vars, vars) updateSCVars(&p.vars, vars)
} }
// handleForgeBatch waits for an available proof server, calls p.forgeBatch to // handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
// forge the batch and get the zkInputs, and then sends the zkInputs to the // and then waits for an available proof server and sends the zkInputs to it so
// selected proof server so that the proof computation begins. // that the proof computation begins.
func (p *Pipeline) handleForgeBatch(ctx context.Context, func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
batchNum common.BatchNum) (batchInfo *BatchInfo, err error) { batchInfo, err := p.forgeBatch(batchNum)
// 1. Wait for an available serverProof (blocking call)
serverProof, err := p.proversPool.Get(ctx)
if ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {
log.Errorw("proversPool.Get", "err", err)
return nil, err
}
defer func() {
// If we encounter any error (notice that this function returns
// errors to notify that a batch is not forged not only because
// of unexpected errors but also due to benign causes), add the
// serverProof back to the pool
if err != nil {
p.proversPool.Add(ctx, serverProof)
}
}()
// 2. Forge the batch internally (make a selection of txs and prepare
// all the smart contract arguments)
p.mutexL2DBUpdateDelete.Lock()
batchInfo, err = p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil { if ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
@@ -233,21 +207,26 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err, log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum, "lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum) "syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else { } else {
log.Errorw("forgeBatch", "err", err) log.Errorw("forgeBatch", "err", err)
} }
return nil, err return nil, err
} }
// 6. Wait for an available server proof (blocking call)
// 3. Send the ZKInputs to the proof server serverProof, err := p.proversPool.Get(ctx)
if ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {
log.Errorw("proversPool.Get", "err", err)
return nil, err
}
batchInfo.ServerProof = serverProof batchInfo.ServerProof = serverProof
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil { if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
log.Errorw("sendServerProof", "err", err) log.Errorw("sendServerProof", "err", err)
batchInfo.ServerProof = nil
p.proversPool.Add(ctx, serverProof)
return nil, err return nil, err
} }
return batchInfo, nil return batchInfo, nil
@@ -255,7 +234,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// Start the forging pipeline // Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum, func (p *Pipeline) Start(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *common.SCVariables) error { stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
if p.started { if p.started {
log.Fatal("Pipeline already started") log.Fatal("Pipeline already started")
} }
@@ -271,7 +250,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Add(1) p.wg.Add(1)
go func() { go func() {
timer := time.NewTimer(zeroDuration) waitDuration := zeroDuration
for { for {
select { select {
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -281,24 +260,24 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case statsVars := <-p.statsVarsCh: case statsVars := <-p.statsVarsCh:
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-timer.C: case <-time.After(waitDuration):
timer.Reset(p.cfg.ForgeRetryInterval)
// Once errAtBatchNum != 0, we stop forging // Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we // batches because there's been an error and we
// wait for the pipeline to be stopped. // wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 { if p.getErrAtBatchNum() != 0 {
waitDuration = p.cfg.ForgeRetryInterval
continue continue
} }
batchNum = p.state.batchNum + 1 batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum) batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced || } else if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay || waitDuration = p.cfg.ForgeRetryInterval
tracerr.Unwrap(err) == errForgeBeforeDelay {
continue continue
} else if err != nil { } else if err != nil {
p.setErrAtBatchNum(batchNum) p.setErrAtBatchNum(batchNum)
waitDuration = p.cfg.ForgeRetryInterval
p.coord.SendMsg(p.ctx, MsgStopPipeline{ p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf( Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err), "Pipeline.handleForgBatch: %v", err),
@@ -306,17 +285,12 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
}) })
continue continue
} }
p.lastForgeTime = time.Now()
p.state.batchNum = batchNum p.state.batchNum = batchNum
select { select {
case batchChSentServerProof <- batchInfo: case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done(): case <-p.ctx.Done():
} }
if !timer.Stop() {
<-timer.C
}
timer.Reset(zeroDuration)
} }
} }
}() }()
@@ -351,6 +325,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
} }
// We are done with this serverProof, add it back to the pool // We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof) p.proversPool.Add(p.ctx, batchInfo.ServerProof)
// batchInfo.ServerProof = nil
p.txManager.AddBatch(p.ctx, batchInfo) p.txManager.AddBatch(p.ctx, batchInfo)
} }
} }
@@ -401,9 +376,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Structure to accumulate data and metadata of the batch // Structure to accumulate data and metadata of the batch
now := time.Now()
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum} batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
batchInfo.Debug.StartTimestamp = now batchInfo.Debug.StartTimestamp = time.Now()
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1 batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
selectionCfg := &txselector.SelectionConfig{ selectionCfg := &txselector.SelectionConfig{
@@ -417,17 +391,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var auths [][]byte var auths [][]byte
var coordIdxs []common.Idx var coordIdxs []common.Idx
// Check if the slot is not yet fulfilled // TODO: If there are no txs and we are behind the timeout, skip
slotCommitted := false // forging a batch and return a particular error that can be handleded
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment || // in the loop where handleForgeBatch is called to retry after an
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged { // interval
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, errForgeBeforeDelay
}
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) { if p.shouldL1L2Batch(batchInfo) {
@@ -445,6 +412,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
} else { } else {
// 2b: only L2 txs // 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
@@ -455,43 +425,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
l1UserTxsExtra = nil l1UserTxsExtra = nil
} }
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the L1UserTxs in the queue following
// the one we are trying to forge.
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues and forge the
// L1UserTxs in the future. Otherwise, skip.
if len(nextL1UserTxs) == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, errForgeNoTxsBeforeDelay
}
}
if batchInfo.L1Batch {
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
}
// 3. Save metadata from TxSelector output for BatchNum // 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxsExtra = l1UserTxsExtra batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs batchInfo.L1CoordTxs = l1CoordTxs
@@ -536,8 +469,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
p.cfg.debugBatchStore(batchInfo) p.cfg.debugBatchStore(batchInfo)
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum) log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil return batchInfo, nil
} }

View File

@@ -25,14 +25,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestPipelineShouldL1L2Batch(t *testing.T) { func TestPipelineShouldL1L2Batch(t *testing.T) {
ethClientSetup := test.NewClientSetupExample() ethClientSetup := test.NewClientSetupExample()
ethClientSetup.ChainID = big.NewInt(int64(chainID)) ethClientSetup.ChainID = big.NewInt(int64(chainID))
@@ -136,11 +128,6 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
blocks, err := tc.GenerateBlocksFromInstructions(set) blocks, err := tc.GenerateBlocksFromInstructions(set)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, blocks) require.NotNil(t, blocks)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("0")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
ethAddTokens(blocks, ethClient) ethAddTokens(blocks, ethClient)
err = ethClient.CtlAddBlocks(blocks) err = ethClient.CtlAddBlocks(blocks)
@@ -148,7 +135,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
ctx := context.Background() ctx := context.Background()
for { for {
syncBlock, discards, err := sync.Sync(ctx, nil) syncBlock, discards, err := sync.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -206,7 +193,11 @@ PoolTransfer(0) User2-User3: 300 (126)
require.NoError(t, err) require.NoError(t, err)
} }
err = pipeline.reset(batchNum, syncStats, syncSCVars) err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{
Rollup: *syncSCVars.Rollup,
Auction: *syncSCVars.Auction,
WDelayer: *syncSCVars.WDelayer,
})
require.NoError(t, err) require.NoError(t, err)
// Sanity check // Sanity check
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts() sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()

View File

@@ -13,23 +13,13 @@ import (
// PurgerCfg is the purger configuration // PurgerCfg is the purger configuration
type PurgerCfg struct { type PurgerCfg struct {
// PurgeBatchDelay is the delay between batches to purge outdated // PurgeBatchDelay is the delay between batches to purge outdated transactions
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBatchDelay int64 PurgeBatchDelay int64
// InvalidateBatchDelay is the delay between batches to mark invalid // InvalidateBatchDelay is the delay between batches to mark invalid transactions
// transactions due to nonce lower than the account nonce.
InvalidateBatchDelay int64 InvalidateBatchDelay int64
// PurgeBlockDelay is the delay between blocks to purge outdated // PurgeBlockDelay is the delay between blocks to purge outdated transactions
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBlockDelay int64 PurgeBlockDelay int64
// InvalidateBlockDelay is the delay between blocks to mark invalid // InvalidateBlockDelay is the delay between blocks to mark invalid transactions
// transactions due to nonce lower than the account nonce.
InvalidateBlockDelay int64 InvalidateBlockDelay int64
} }

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
} }
func newStateDB(t *testing.T) *statedb.LocalStateDB { func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -2,9 +2,9 @@ package coordinator
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
@@ -31,10 +31,10 @@ type TxManager struct {
batchCh chan *BatchInfo batchCh chan *BatchInfo
chainID *big.Int chainID *big.Int
account accounts.Account account accounts.Account
consts common.SCConsts consts synchronizer.SCConsts
stats synchronizer.Stats stats synchronizer.Stats
vars common.SCVariables vars synchronizer.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum discardPipelineCh chan int // int refers to the pipelineNum
@@ -49,13 +49,15 @@ type TxManager struct {
// accNextNonce is the nonce that we should use to send the next tx. // accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx. // In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64 accNextNonce uint64
// accPendingNonce is the pending nonce of the account due to pending txs
// accPendingNonce uint64
lastSentL1BatchBlockNum int64 lastSentL1BatchBlockNum int64
} }
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB, func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (*TxManager, error) { coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (*TxManager, error) {
chainID, err := ethClient.EthChainID() chainID, err := ethClient.EthChainID()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -68,6 +70,14 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil { if err != nil {
return nil, err return nil, err
} }
// accPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
// if err != nil {
// return nil, err
// }
// if accNonce != accPendingNonce {
// return nil, tracerr.Wrap(fmt.Errorf("currentNonce (%v) != accPendingNonce (%v)",
// accNonce, accPendingNonce))
// }
log.Infow("TxManager started", "nonce", accNonce) log.Infow("TxManager started", "nonce", accNonce)
return &TxManager{ return &TxManager{
cfg: *cfg, cfg: *cfg,
@@ -89,6 +99,7 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
queue: NewQueue(), queue: NewQueue(),
accNonce: accNonce, accNonce: accNonce,
accNextNonce: accNonce, accNextNonce: accNonce,
// accPendingNonce: accPendingNonce,
}, nil }, nil
} }
@@ -102,7 +113,7 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) { func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
select { select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
@@ -118,24 +129,21 @@ func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
} }
} }
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) { func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&t.vars, vars) updateSCVars(&t.vars, vars)
} }
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.TransactOpts, error) { func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx) gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if t.cfg.GasPriceIncPerc != 0 {
inc := new(big.Int).Set(gasPrice) inc := new(big.Int).Set(gasPrice)
inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc)) // TODO: Replace this by a value of percentage
// nolint reason: to calculate percentages we use 100 const gasPriceDiv = 100
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
gasPrice.Add(gasPrice, inc) gasPrice.Add(gasPrice, inc)
}
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice) // log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID) auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
@@ -143,12 +151,8 @@ func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.Tr
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
auth.Value = big.NewInt(0) // in wei auth.Value = big.NewInt(0) // in wei
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
gasLimit := t.cfg.ForgeBatchGasCost.Fixed + auth.GasLimit = 1000000
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
auth.GasLimit = gasLimit
auth.GasPrice = gasPrice auth.GasPrice = gasPrice
auth.Nonce = nil auth.Nonce = nil
@@ -180,7 +184,7 @@ func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
func addPerc(v *big.Int, p int64) *big.Int { func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v) r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p)) r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percentages we divide by 100 // nolint reason: to calculate percetnages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd r.Div(r, big.NewInt(100)) //nolit:gomnd
return r.Add(v, r) return r.Add(v, r)
} }
@@ -188,7 +192,7 @@ func addPerc(v *big.Int, p int64) *big.Int {
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error { func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
auth, err := t.NewAuth(ctx, batchInfo) auth, err := t.NewAuth(ctx)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -203,35 +207,32 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
} }
// RollupForgeBatch() calls ethclient.SendTransaction() // RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth) ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
// We check the errors via strings because we match the if errors.Is(err, core.ErrNonceTooLow) {
// definition of the error from geth, with the string returned
// via RPC obtained by the client.
if err == nil {
break
} else if strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce", log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum) "err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Add(auth.Nonce, big.NewInt(1)) auth.Nonce.Add(auth.Nonce, big.NewInt(1))
attempt-- attempt--
} else if strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) { } else if errors.Is(err, core.ErrNonceTooHigh) {
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce", log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum) "err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1)) auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt-- attempt--
} else if strings.Contains(err.Error(), core.ErrReplaceUnderpriced.Error()) { } else if errors.Is(err, core.ErrUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice", log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum) "err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10) auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt-- attempt--
} else if strings.Contains(err.Error(), core.ErrUnderpriced.Error()) { } else if errors.Is(err, core.ErrReplaceUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice", log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum) "err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10) auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt-- attempt--
} else { } else if err != nil {
log.Errorw("TxManager ethClient.RollupForgeBatch", log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1, "attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum) "batchNum", batchInfo.BatchNum)
} else {
break
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -260,6 +261,7 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
batchInfo.Debug.StartTimestamp).Seconds() batchInfo.Debug.StartTimestamp).Seconds()
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
// t.lastPendingBatch = batchInfo.BatchNum
if !resend { if !resend {
if batchInfo.L1Batch { if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1 t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
@@ -312,15 +314,14 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
} }
if receipt.Status == types.ReceiptStatusFailed { if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber) _, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash, log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(), "batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err) "err", err)
batchInfo.EthTxErr = err
if batchInfo.BatchNum <= t.lastSuccessBatch { if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1 t.lastSuccessBatch = batchInfo.BatchNum - 1
} }
t.cfg.debugBatchStore(batchInfo)
return nil, tracerr.Wrap(fmt.Errorf( return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err)) "ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful { } else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -416,6 +417,8 @@ func (q *Queue) Push(batchInfo *BatchInfo) {
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
waitDuration := longWaitDuration
var statsVars statsVars var statsVars statsVars
select { select {
case statsVars = <-t.statsVarsCh: case statsVars = <-t.statsVarsCh:
@@ -426,7 +429,6 @@ func (t *TxManager) Run(ctx context.Context) {
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -470,17 +472,13 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} }
t.queue.Push(batchInfo) t.queue.Push(batchInfo)
if !timer.Stop() { waitDuration = t.cfg.TxManagerCheckInterval
<-timer.C case <-time.After(waitDuration):
}
timer.Reset(t.cfg.TxManagerCheckInterval)
case <-timer.C:
queuePosition, batchInfo := t.queue.Next() queuePosition, batchInfo := t.queue.Next()
if batchInfo == nil { if batchInfo == nil {
timer.Reset(longWaitDuration) waitDuration = longWaitDuration
continue continue
} }
timer.Reset(t.cfg.TxManagerCheckInterval)
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
@@ -541,6 +539,7 @@ func (t *TxManager) Run(ctx context.Context) {
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error { func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
next := 0 next := 0
// batchNum := 0
for { for {
batchInfo := t.queue.At(next) batchInfo := t.queue.At(next)
if batchInfo == nil { if batchInfo == nil {
@@ -573,6 +572,7 @@ func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
// from the queue // from the queue
if confirm == nil { if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum { if batchInfo.PipelineNum < t.minPipelineNum {
// batchNum++
t.queue.Remove(next) t.queue.Remove(next)
continue continue
} }

View File

@@ -1,11 +1,8 @@
package historydb package historydb
import ( import (
"database/sql"
"errors" "errors"
"fmt" "fmt"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
@@ -35,18 +32,9 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
// GetBatchInternalAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{} batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow( return batch, tracerr.Wrap(meddler.QueryRow(
d, batch, hdb.db, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num, `SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root, batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num, batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
@@ -145,10 +133,10 @@ func (hdb *HistoryDB) GetBatchesAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
batchPtrs := []*BatchAPI{} batchPtrs := []*BatchAPI{}
if err := meddler.QueryAll(hdb.dbRead, &batchPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI) batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
@@ -168,7 +156,7 @@ func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url hdb.db, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
INNER JOIN ( INNER JOIN (
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
@@ -192,14 +180,6 @@ func (hdb *HistoryDB) GetBestBidsAPI(
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
}
func (hdb *HistoryDB) getBestBidsAPI(
d meddler.DB,
minSlotNum, maxSlotNum *int64,
bidderAddr *ethCommon.Address,
limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string var query string
var args []interface{} var args []interface{}
// JOIN the best bid of each slot with the latest update of each coordinator // JOIN the best bid of each slot with the latest update of each coordinator
@@ -232,9 +212,9 @@ func (hdb *HistoryDB) getBestBidsAPI(
if limit != nil { if limit != nil {
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
} }
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
bidPtrs := []*BidAPI{} bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// log.Debug(query) // log.Debug(query)
@@ -316,9 +296,9 @@ func (hdb *HistoryDB) GetBidsAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.dbRead.Rebind(query) query = hdb.db.Rebind(query)
bids := []*BidAPI{} bids := []*BidAPI{}
if err := meddler.QueryAll(hdb.dbRead, &bids, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.db, &bids, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(bids) == 0 { if len(bids) == 0 {
@@ -404,9 +384,9 @@ func (hdb *HistoryDB) GetTokensAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.dbRead.Rebind(query) query = hdb.db.Rebind(query)
tokens := []*TokenWithUSD{} tokens := []*TokenWithUSD{}
if err := meddler.QueryAll(hdb.dbRead, &tokens, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.db, &tokens, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(tokens) == 0 { if len(tokens) == 0 {
@@ -428,7 +408,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
tx := &TxAPI{} tx := &TxAPI{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position, hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj, hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj, hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd, tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
@@ -561,10 +541,10 @@ func (hdb *HistoryDB) GetTxsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
txsPtrs := []*TxAPI{} txsPtrs := []*TxAPI{}
if err := meddler.QueryAll(hdb.dbRead, &txsPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI) txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
@@ -584,7 +564,7 @@ func (hdb *HistoryDB) GetExitAPI(batchNum *uint, idx *common.Idx) (*ExitAPI, err
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
exit := &ExitAPI{} exit := &ExitAPI{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, exit, `SELECT exit_tree.item_id, exit_tree.batch_num, hdb.db, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx, hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
account.bjj, account.eth_addr, account.bjj, account.eth_addr,
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
@@ -705,10 +685,10 @@ func (hdb *HistoryDB) GetExitsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
exits := []*ExitAPI{} exits := []*ExitAPI{}
if err := meddler.QueryAll(hdb.dbRead, &exits, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(exits) == 0 { if len(exits) == 0 {
@@ -717,6 +697,25 @@ func (hdb *HistoryDB) GetExitsAPI(
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
} }
// GetBucketUpdatesAPI retrieves latest values for each bucket
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
var bucketUpdates []*BucketUpdateAPI
err = meddler.QueryAll(
hdb.db, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info // GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
func (hdb *HistoryDB) GetCoordinatorsAPI( func (hdb *HistoryDB) GetCoordinatorsAPI(
bidderAddr, forgerAddr *ethCommon.Address, bidderAddr, forgerAddr *ethCommon.Address,
@@ -773,10 +772,10 @@ func (hdb *HistoryDB) GetCoordinatorsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.dbRead.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
coordinators := []*CoordinatorAPI{} coordinators := []*CoordinatorAPI{}
if err := meddler.QueryAll(hdb.dbRead, &coordinators, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &coordinators, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(coordinators) == 0 { if len(coordinators) == 0 {
@@ -796,11 +795,34 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
auctionVars := &common.AuctionVariables{} auctionVars := &common.AuctionVariables{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, auctionVars, `SELECT * FROM auction_vars;`, hdb.db, auctionVars, `SELECT * FROM auction_vars;`,
) )
return auctionVars, tracerr.Wrap(err) return auctionVars, tracerr.Wrap(err)
} }
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
auctionVars := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;
`
err = meddler.QueryAll(hdb.db, &auctionVars, query, slotNum, maxItems)
if err != nil {
return nil, tracerr.Wrap(err)
}
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
}
// GetAccountAPI returns an account by its index // GetAccountAPI returns an account by its index
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) { func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
@@ -810,19 +832,11 @@ func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
account := &AccountAPI{} account := &AccountAPI{}
err = meddler.QueryRow(hdb.dbRead, account, `SELECT account.item_id, hez_idx(account.idx, err = meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx,
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr, token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
token.usd_update, account_update.nonce, account_update.balance FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx)
FROM account inner JOIN (
SELECT idx, nonce, balance
FROM account_update
WHERE idx = $1
ORDER BY item_id DESC LIMIT 1
) AS account_update ON account_update.idx = account.idx
INNER JOIN token ON account.token_id = token.token_id
WHERE account.idx = $1;`, idx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -850,13 +864,8 @@ func (hdb *HistoryDB) GetAccountsAPI(
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num, queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block, account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update, token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
account_update.nonce, account_update.balance, COUNT(*) OVER() AS total_items COUNT(*) OVER() AS total_items
FROM account inner JOIN ( FROM account INNER JOIN token ON account.token_id = token.token_id `
SELECT DISTINCT idx,
first_value(nonce) over(partition by idx ORDER BY item_id DESC) as nonce,
first_value(balance) over(partition by idx ORDER BY item_id DESC) as balance
FROM account_update
) AS account_update ON account_update.idx = account.idx INNER JOIN token ON account.token_id = token.token_id `
// Apply filters // Apply filters
nextIsAnd := false nextIsAnd := false
// ethAddr filter // ethAddr filter
@@ -905,10 +914,10 @@ func (hdb *HistoryDB) GetAccountsAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.dbRead.Rebind(query) query = hdb.db.Rebind(query)
accounts := []*AccountAPI{} accounts := []*AccountAPI{}
if err := meddler.QueryAll(hdb.dbRead, &accounts, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.db, &accounts, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(accounts) == 0 { if len(accounts) == 0 {
@@ -919,267 +928,99 @@ func (hdb *HistoryDB) GetAccountsAPI(
accounts[0].TotalItems - uint64(len(accounts)), nil accounts[0].TotalItems - uint64(len(accounts)), nil
} }
// GetCommonAccountAPI returns the account associated to an account idx // GetMetricsAPI returns metrics
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) { func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
defer cancel() defer cancel()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
account := &common.Account{} metricsTotals := &MetricsTotals{}
metrics := &Metrics{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.dbRead, account, `SELECT * FROM account WHERE idx = $1;`, idx, hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
) COALESCE (MIN(tx.batch_num), 0) as batch_num, COALESCE (MIN(block.timestamp),
return account, tracerr.Wrap(err) NOW()) AS min_timestamp, COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
} FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`)
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release()
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
}
func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) { seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
coordinator := &CoordinatorAPI{} // Avoid dividing by 0
err := meddler.QueryRow( if seconds == 0 {
d, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// GetNodeInfoAPI retusnt he NodeInfo
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.GetNodeInfo()
}
// GetBucketUpdatesInternalAPI returns the latest bucket updates
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetNextForgersInternalAPI returns next forgers
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
auctionConsts *common.AuctionConstants,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForgerAPI{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum - 1
nextForger := NextForgerAPI{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
var metrics MetricsAPI
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: lastBatchNum,
}
if err := meddler.QueryRow(
hdb.dbRead, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++ seconds++
} }
metrics.TransactionsPerSecond = float64(nTxs) / seconds
// Set txs/batch metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
nBatches := p.ToBatchNum - p.FromBatchNum + 1
if nBatches == 0 { // Avoid dividing by 0 if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
nBatches++ metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
} float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
if (p.ToBatchNum - p.FromBatchNum) > 0 {
fmt.Printf("DBG ntxs: %v, nBatches: %v\n", nTxs, nBatches)
metrics.TransactionsPerBatch = float64(nTxs) /
float64(nBatches)
} else { } else {
metrics.TransactionsPerBatch = 0 metrics.TransactionsPerBatch = float64(0)
} }
// Get total fee of that period
row = hdb.dbRead.QueryRow( err = meddler.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`, hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
p.FromBatchNum, p.ToBatchNum, COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
) WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
var totalFee float64 if err != nil {
if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Set batch frequency if metricsTotals.TotalBatches > 0 {
metrics.BatchFrequency = seconds / float64(nBatches) metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
if nTxs > 0 { } else {
metrics.AvgTransactionFee = totalFee / float64(nTxs) metrics.BatchFrequency = 0
}
if metricsTotals.TotalTransactions > 0 {
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else { } else {
metrics.AvgTransactionFee = 0 metrics.AvgTransactionFee = 0
} }
// Get and set amount of registered accounts err = meddler.QueryRow(
type registeredAccounts struct { hdb.db, metrics,
TotalIdx int64 `meddler:"total_idx"` `SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
TotalBJJ int64 `meddler:"total_bjj"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.TotalAccounts = ra.TotalIdx
metrics.TotalBJJs = ra.TotalBJJ
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil
}
// GetStateAPI returns the StateAPI
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release()
return hdb.getStateAPI(hdb.dbRead) return metrics, nil
}
// GetAvgTxFeeAPI returns average transaction fee of the last 1h
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return 0, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{}
err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
if err != nil {
return 0, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return 0, tracerr.Wrap(err)
}
var avgTransactionFee float64
if metricsTotals.TotalTransactions > 0 {
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
avgTransactionFee = 0
}
return avgTransactionFee, nil
} }

View File

@@ -27,35 +27,30 @@ const (
// HistoryDB persist the historic of the rollup // HistoryDB persist the historic of the rollup
type HistoryDB struct { type HistoryDB struct {
dbRead *sqlx.DB db *sqlx.DB
dbWrite *sqlx.DB
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
// NewHistoryDB initialize the DB // NewHistoryDB initialize the DB
func NewHistoryDB(dbRead, dbWrite *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB { func NewHistoryDB(db *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
return &HistoryDB{ return &HistoryDB{db: db, apiConnCon: apiConnCon}
dbRead: dbRead,
dbWrite: dbWrite,
apiConnCon: apiConnCon,
}
} }
// DB returns a pointer to the L2DB.db. This method should be used only for // DB returns a pointer to the L2DB.db. This method should be used only for
// internal testing purposes. // internal testing purposes.
func (hdb *HistoryDB) DB() *sqlx.DB { func (hdb *HistoryDB) DB() *sqlx.DB {
return hdb.dbWrite return hdb.db
} }
// AddBlock insert a block into the DB // AddBlock insert a block into the DB
func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.dbWrite, block) } func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.db, block) }
func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error { func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error {
return tracerr.Wrap(meddler.Insert(d, "block", block)) return tracerr.Wrap(meddler.Insert(d, "block", block))
} }
// AddBlocks inserts blocks into the DB // AddBlocks inserts blocks into the DB
func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error { func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error {
return tracerr.Wrap(hdb.addBlocks(hdb.dbWrite, blocks)) return tracerr.Wrap(hdb.addBlocks(hdb.db, blocks))
} }
func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error { func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
@@ -66,7 +61,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
timestamp, timestamp,
hash hash
) VALUES %s;`, ) VALUES %s;`,
blocks, blocks[:],
)) ))
} }
@@ -74,7 +69,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) { func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
block := &common.Block{} block := &common.Block{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, block, hdb.db, block,
"SELECT * FROM block WHERE eth_block_num = $1;", blockNum, "SELECT * FROM block WHERE eth_block_num = $1;", blockNum,
) )
return block, tracerr.Wrap(err) return block, tracerr.Wrap(err)
@@ -84,7 +79,7 @@ func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) { func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &blocks, hdb.db, &blocks,
"SELECT * FROM block ORDER BY eth_block_num;", "SELECT * FROM block ORDER BY eth_block_num;",
) )
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err) return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
@@ -94,7 +89,7 @@ func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) { func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &blocks, hdb.db, &blocks,
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;", "SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;",
from, to, from, to,
) )
@@ -105,13 +100,13 @@ func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) { func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) {
block := &common.Block{} block := &common.Block{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;", hdb.db, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;",
) )
return block, tracerr.Wrap(err) return block, tracerr.Wrap(err)
} }
// AddBatch insert a Batch into the DB // AddBatch insert a Batch into the DB
func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.dbWrite, batch) } func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.db, batch) }
func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error { func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
// Calculate total collected fees in USD // Calculate total collected fees in USD
// Get IDs of collected tokens for fees // Get IDs of collected tokens for fees
@@ -134,9 +129,9 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = hdb.dbWrite.Rebind(query) query = hdb.db.Rebind(query)
if err := meddler.QueryAll( if err := meddler.QueryAll(
hdb.dbWrite, &tokenPrices, query, args..., hdb.db, &tokenPrices, query, args...,
); err != nil { ); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -158,7 +153,7 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
// AddBatches insert Bids into the DB // AddBatches insert Bids into the DB
func (hdb *HistoryDB) AddBatches(batches []common.Batch) error { func (hdb *HistoryDB) AddBatches(batches []common.Batch) error {
return tracerr.Wrap(hdb.addBatches(hdb.dbWrite, batches)) return tracerr.Wrap(hdb.addBatches(hdb.db, batches))
} }
func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error { func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
for i := 0; i < len(batches); i++ { for i := 0; i < len(batches); i++ {
@@ -173,7 +168,7 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) { func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
var batch common.Batch var batch common.Batch
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root, batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num, batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`, batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
@@ -186,7 +181,7 @@ func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error)
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) { func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &batches, hdb.db, &batches,
`SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected,
batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root, batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root,
batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch
@@ -199,7 +194,7 @@ func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) { func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &batches, hdb.db, &batches,
`SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator, `SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator,
state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd
FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`, FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`,
@@ -211,7 +206,7 @@ func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, erro
// GetFirstBatchBlockNumBySlot returns the ethereum block number of the first // GetFirstBatchBlockNumBySlot returns the ethereum block number of the first
// batch within a slot // batch within a slot
func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) { func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) {
row := hdb.dbRead.QueryRow( row := hdb.db.QueryRow(
`SELECT eth_block_num FROM batch `SELECT eth_block_num FROM batch
WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum, WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum,
) )
@@ -221,7 +216,7 @@ func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error)
// GetLastBatchNum returns the BatchNum of the latest forged batch // GetLastBatchNum returns the BatchNum of the latest forged batch
func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) { func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
row := hdb.dbRead.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;") row := hdb.db.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;")
var batchNum common.BatchNum var batchNum common.BatchNum
return batchNum, tracerr.Wrap(row.Scan(&batchNum)) return batchNum, tracerr.Wrap(row.Scan(&batchNum))
} }
@@ -230,7 +225,7 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) { func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
var batch common.Batch var batch common.Batch
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root, batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num, batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`, batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
@@ -240,7 +235,7 @@ func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch // GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) { func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
row := hdb.dbRead.QueryRow(`SELECT eth_block_num FROM batch row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
WHERE forge_l1_txs_num IS NOT NULL WHERE forge_l1_txs_num IS NOT NULL
ORDER BY batch_num DESC LIMIT 1;`) ORDER BY batch_num DESC LIMIT 1;`)
var blockNum int64 var blockNum int64
@@ -250,7 +245,7 @@ func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged // GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged
// batches. If there's no batch in the DB (nil, nil) is returned. // batches. If there's no batch in the DB (nil, nil) is returned.
func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) { func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
row := hdb.dbRead.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;") row := hdb.db.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
lastL1TxsNum := new(int64) lastL1TxsNum := new(int64)
return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum)) return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum))
} }
@@ -261,15 +256,15 @@ func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
func (hdb *HistoryDB) Reorg(lastValidBlock int64) error { func (hdb *HistoryDB) Reorg(lastValidBlock int64) error {
var err error var err error
if lastValidBlock < 0 { if lastValidBlock < 0 {
_, err = hdb.dbWrite.Exec("DELETE FROM block;") _, err = hdb.db.Exec("DELETE FROM block;")
} else { } else {
_, err = hdb.dbWrite.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock) _, err = hdb.db.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock)
} }
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// AddBids insert Bids into the DB // AddBids insert Bids into the DB
func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.dbWrite, bids) } func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.db, bids) }
func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error { func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
if len(bids) == 0 { if len(bids) == 0 {
return nil return nil
@@ -278,7 +273,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;", "INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
bids, bids[:],
)) ))
} }
@@ -286,7 +281,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) { func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
var bids []*common.Bid var bids []*common.Bid
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &bids, hdb.db, &bids,
`SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid `SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid
ORDER BY item_id;`, ORDER BY item_id;`,
) )
@@ -297,7 +292,7 @@ func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) { func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
bidCoord := &common.BidCoordinator{} bidCoord := &common.BidCoordinator{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, bidCoord, hdb.db, bidCoord,
`SELECT ( `SELECT (
SELECT default_slot_set_bid SELECT default_slot_set_bid
FROM auction_vars FROM auction_vars
@@ -320,7 +315,7 @@ func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinat
// AddCoordinators insert Coordinators into the DB // AddCoordinators insert Coordinators into the DB
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error { func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
return tracerr.Wrap(hdb.addCoordinators(hdb.dbWrite, coordinators)) return tracerr.Wrap(hdb.addCoordinators(hdb.db, coordinators))
} }
func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error { func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error {
if len(coordinators) == 0 { if len(coordinators) == 0 {
@@ -329,13 +324,13 @@ func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordi
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;", "INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
coordinators, coordinators[:],
)) ))
} }
// AddExitTree insert Exit tree into the DB // AddExitTree insert Exit tree into the DB
func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error { func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error {
return tracerr.Wrap(hdb.addExitTree(hdb.dbWrite, exitTree)) return tracerr.Wrap(hdb.addExitTree(hdb.db, exitTree))
} }
func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error { func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error {
if len(exitTree) == 0 { if len(exitTree) == 0 {
@@ -345,7 +340,7 @@ func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) erro
d, d,
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+ "INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;", "instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
exitTree, exitTree[:],
)) ))
} }
@@ -423,13 +418,11 @@ func (hdb *HistoryDB) updateExitTree(d sqlx.Ext, blockNum int64,
// AddToken insert a token into the DB // AddToken insert a token into the DB
func (hdb *HistoryDB) AddToken(token *common.Token) error { func (hdb *HistoryDB) AddToken(token *common.Token) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "token", token)) return tracerr.Wrap(meddler.Insert(hdb.db, "token", token))
} }
// AddTokens insert tokens into the DB // AddTokens insert tokens into the DB
func (hdb *HistoryDB) AddTokens(tokens []common.Token) error { func (hdb *HistoryDB) AddTokens(tokens []common.Token) error { return hdb.addTokens(hdb.db, tokens) }
return hdb.addTokens(hdb.dbWrite, tokens)
}
func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error { func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
if len(tokens) == 0 { if len(tokens) == 0 {
return nil return nil
@@ -450,17 +443,16 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
symbol, symbol,
decimals decimals
) VALUES %s;`, ) VALUES %s;`,
tokens, tokens[:],
)) ))
} }
// UpdateTokenValue updates the USD value of a token. Value is the price in // UpdateTokenValue updates the USD value of a token
// USD of a normalized token (1 token = 10^decimals units)
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error { func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
// Sanitize symbol // Sanitize symbol
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ") tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
_, err := hdb.dbWrite.Exec( _, err := hdb.db.Exec(
"UPDATE token SET usd = $1 WHERE symbol = $2;", "UPDATE token SET usd = $1 WHERE symbol = $2;",
value, tokenSymbol, value, tokenSymbol,
) )
@@ -471,7 +463,7 @@ func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error
func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) { func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
token := &TokenWithUSD{} token := &TokenWithUSD{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.dbRead, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID, hdb.db, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID,
) )
return token, tracerr.Wrap(err) return token, tracerr.Wrap(err)
} }
@@ -480,7 +472,7 @@ func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) { func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
var tokens []*TokenWithUSD var tokens []*TokenWithUSD
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &tokens, hdb.db, &tokens,
"SELECT * FROM token ORDER BY token_id;", "SELECT * FROM token ORDER BY token_id;",
) )
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err) return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
@@ -489,7 +481,7 @@ func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
// GetTokenSymbols returns all the token symbols from the DB // GetTokenSymbols returns all the token symbols from the DB
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) { func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
var tokenSymbols []string var tokenSymbols []string
rows, err := hdb.dbRead.Query("SELECT symbol FROM token;") rows, err := hdb.db.Query("SELECT symbol FROM token;")
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -507,7 +499,7 @@ func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
// AddAccounts insert accounts into the DB // AddAccounts insert accounts into the DB
func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error { func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error {
return tracerr.Wrap(hdb.addAccounts(hdb.dbWrite, accounts)) return tracerr.Wrap(hdb.addAccounts(hdb.db, accounts))
} }
func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error { func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error {
if len(accounts) == 0 { if len(accounts) == 0 {
@@ -522,7 +514,7 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
bjj, bjj,
eth_addr eth_addr
) VALUES %s;`, ) VALUES %s;`,
accounts, accounts[:],
)) ))
} }
@@ -530,49 +522,18 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) { func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
var accs []*common.Account var accs []*common.Account
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &accs, hdb.db, &accs,
"SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;", "SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;",
) )
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err) return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
} }
// AddAccountUpdates inserts accUpdates into the DB
func (hdb *HistoryDB) AddAccountUpdates(accUpdates []common.AccountUpdate) error {
return tracerr.Wrap(hdb.addAccountUpdates(hdb.dbWrite, accUpdates))
}
func (hdb *HistoryDB) addAccountUpdates(d meddler.DB, accUpdates []common.AccountUpdate) error {
if len(accUpdates) == 0 {
return nil
}
return tracerr.Wrap(db.BulkInsert(
d,
`INSERT INTO account_update (
eth_block_num,
batch_num,
idx,
nonce,
balance
) VALUES %s;`,
accUpdates,
))
}
// GetAllAccountUpdates returns all the AccountUpdate from the DB
func (hdb *HistoryDB) GetAllAccountUpdates() ([]common.AccountUpdate, error) {
var accUpdates []*common.AccountUpdate
err := meddler.QueryAll(
hdb.dbRead, &accUpdates,
"SELECT eth_block_num, batch_num, idx, nonce, balance FROM account_update ORDER BY idx;",
)
return db.SlicePtrsToSlice(accUpdates).([]common.AccountUpdate), tracerr.Wrap(err)
}
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user, // If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx. // BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
// EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB. // EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB.
func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error { func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error {
return tracerr.Wrap(hdb.addL1Txs(hdb.dbWrite, l1txs)) return tracerr.Wrap(hdb.addL1Txs(hdb.db, l1txs))
} }
// addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
@@ -626,7 +587,7 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error {
// AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx. // AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error { func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error {
return tracerr.Wrap(hdb.addL2Txs(hdb.dbWrite, l2txs)) return tracerr.Wrap(hdb.addL2Txs(hdb.db, l2txs))
} }
// addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx. // addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
@@ -685,7 +646,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
fee, fee,
nonce nonce
) VALUES %s;`, ) VALUES %s;`,
txs, txs[:],
)) ))
} }
@@ -693,7 +654,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) { func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
var exits []*common.ExitInfo var exits []*common.ExitInfo
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &exits, hdb.db, &exits,
`SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof, `SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof,
exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request, exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request,
exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`, exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`,
@@ -705,7 +666,7 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) { func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
var txs []*common.L1Tx var txs []*common.L1Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0 hdb.db, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount, tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
@@ -722,7 +683,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
// Since the query specifies that only coordinator txs are returned, it's safe to assume // Since the query specifies that only coordinator txs are returned, it's safe to assume
// that returned txs will always have effective amounts // that returned txs will always have effective amounts
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, hdb.db, &txs,
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, tx.amount AS effective_amount, tx.amount, tx.amount AS effective_amount,
@@ -737,7 +698,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) { func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
var txs []*common.L2Tx var txs []*common.L2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, hdb.db, &txs,
`SELECT tx.id, tx.batch_num, tx.position, `SELECT tx.id, tx.batch_num, tx.position,
tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.from_idx, tx.to_idx, tx.amount, tx.token_id,
tx.fee, tx.nonce, tx.type, tx.eth_block_num tx.fee, tx.nonce, tx.type, tx.eth_block_num
@@ -750,7 +711,7 @@ func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) { func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
var txs []*common.L1Tx var txs []*common.L1Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null hdb.db, &txs, // only L1 user txs can have batch_num set to null
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, NULL AS effective_amount, tx.amount, NULL AS effective_amount,
@@ -767,7 +728,7 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
// GetLastTxsPosition for a given to_forge_l1_txs_num // GetLastTxsPosition for a given to_forge_l1_txs_num
func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) { func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) {
row := hdb.dbRead.QueryRow( row := hdb.db.QueryRow(
"SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;", "SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;",
toForgeL1TxsNum, toForgeL1TxsNum,
) )
@@ -781,15 +742,15 @@ func (hdb *HistoryDB) GetSCVars() (*common.RollupVariables, *common.AuctionVaria
var rollup common.RollupVariables var rollup common.RollupVariables
var auction common.AuctionVariables var auction common.AuctionVariables
var wDelayer common.WDelayerVariables var wDelayer common.WDelayerVariables
if err := meddler.QueryRow(hdb.dbRead, &rollup, if err := meddler.QueryRow(hdb.db, &rollup,
"SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
if err := meddler.QueryRow(hdb.dbRead, &auction, if err := meddler.QueryRow(hdb.db, &auction,
"SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
if err := meddler.QueryRow(hdb.dbRead, &wDelayer, if err := meddler.QueryRow(hdb.db, &wDelayer,
"SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
@@ -820,7 +781,7 @@ func (hdb *HistoryDB) addBucketUpdates(d meddler.DB, bucketUpdates []common.Buck
block_stamp, block_stamp,
withdrawals withdrawals
) VALUES %s;`, ) VALUES %s;`,
bucketUpdates, bucketUpdates[:],
)) ))
} }
@@ -834,25 +795,13 @@ func (hdb *HistoryDB) AddBucketUpdatesTest(d meddler.DB, bucketUpdates []common.
func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) { func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
var bucketUpdates []*common.BucketUpdate var bucketUpdates []*common.BucketUpdate
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates, hdb.db, &bucketUpdates,
`SELECT eth_block_num, num_bucket, block_stamp, withdrawals `SELECT eth_block_num, num_bucket, block_stamp, withdrawals
FROM bucket_update ORDER BY item_id;`, FROM bucket_update ORDER BY item_id;`,
) )
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err) return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
} }
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
minBidInfo := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;`
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
}
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error { func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
if len(tokenExchanges) == 0 { if len(tokenExchanges) == 0 {
return nil return nil
@@ -864,7 +813,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
eth_addr, eth_addr,
value_usd value_usd
) VALUES %s;`, ) VALUES %s;`,
tokenExchanges, tokenExchanges[:],
)) ))
} }
@@ -872,7 +821,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) { func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) {
var tokenExchanges []*common.TokenExchange var tokenExchanges []*common.TokenExchange
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &tokenExchanges, hdb.db, &tokenExchanges,
"SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;", "SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;",
) )
return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err) return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err)
@@ -892,7 +841,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
token_addr, token_addr,
amount amount
) VALUES %s;`, ) VALUES %s;`,
escapeHatchWithdrawals, escapeHatchWithdrawals[:],
)) ))
} }
@@ -900,7 +849,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) { func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) {
var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.dbRead, &escapeHatchWithdrawals, hdb.db, &escapeHatchWithdrawals,
"SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;", "SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;",
) )
return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal), return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal),
@@ -913,7 +862,7 @@ func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHat
// exist in the smart contracts. // exist in the smart contracts.
func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables, func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables,
auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error { auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error {
txn, err := hdb.dbWrite.Beginx() txn, err := hdb.db.Beginx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -997,7 +946,7 @@ func (hdb *HistoryDB) setExtraInfoForgedL1UserTxs(d sqlx.Ext, txs []common.L1Tx)
// the pagination system of the API/DB depends on this. Within blocks, all // the pagination system of the API/DB depends on this. Within blocks, all
// items should also be in the correct order (Accounts, Tokens, Txs, etc.) // items should also be in the correct order (Accounts, Tokens, Txs, etc.)
func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) { func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
txn, err := hdb.dbWrite.Beginx() txn, err := hdb.db.Beginx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1069,11 +1018,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Add accountBalances if it exists
if err := hdb.addAccountUpdates(txn, batch.UpdatedAccounts); err != nil {
return tracerr.Wrap(err)
}
// Set the EffectiveAmount and EffectiveDepositAmount of all the // Set the EffectiveAmount and EffectiveDepositAmount of all the
// L1UserTxs that have been forged in this batch // L1UserTxs that have been forged in this batch
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil { if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {
@@ -1151,16 +1095,27 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(txn.Commit()) return tracerr.Wrap(txn.Commit())
} }
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
hdb.db, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// AddAuctionVars insert auction vars into the DB // AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error { func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars)) return tracerr.Wrap(meddler.Insert(hdb.db, "auction_vars", auctionVars))
} }
// GetTokensTest used to get tokens in a testing context // GetTokensTest used to get tokens in a testing context
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) { func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
tokens := []*TokenWithUSD{} tokens := []*TokenWithUSD{}
if err := meddler.QueryAll( if err := meddler.QueryAll(
hdb.dbRead, &tokens, hdb.db, &tokens,
"SELECT * FROM TOKEN", "SELECT * FROM TOKEN",
); err != nil { ); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -1170,49 +1125,3 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
} }
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
} }
// GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount =
math.Max(avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
return &recommendedFee, nil
}

View File

@@ -39,12 +39,12 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
historyDB = NewHistoryDB(db, db, nil) historyDB = NewHistoryDB(db, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
historyDBWithACC = NewHistoryDB(db, db, apiConnCon) historyDBWithACC = NewHistoryDB(db, apiConnCon)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -377,22 +377,6 @@ func TestAccounts(t *testing.T) {
accs[i].Balance = nil accs[i].Balance = nil
assert.Equal(t, accs[i], acc) assert.Equal(t, accs[i], acc)
} }
// Test AccountBalances
accUpdates := make([]common.AccountUpdate, len(accs))
for i, acc := range accs {
accUpdates[i] = common.AccountUpdate{
EthBlockNum: batches[acc.BatchNum-1].EthBlockNum,
BatchNum: acc.BatchNum,
Idx: acc.Idx,
Nonce: common.Nonce(i),
Balance: big.NewInt(int64(i)),
}
}
err = historyDB.AddAccountUpdates(accUpdates)
require.NoError(t, err)
fetchedAccBalances, err := historyDB.GetAllAccountUpdates()
require.NoError(t, err)
assert.Equal(t, accUpdates, fetchedAccBalances)
} }
func TestTxs(t *testing.T) { func TestTxs(t *testing.T) {
@@ -637,10 +621,10 @@ func TestTxs(t *testing.T) {
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type) assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
// Tx ID // Tx ID
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String()) assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String()) assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String()) assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String()) assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
// Tx From and To IDx // Tx From and To IDx
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx) assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
@@ -817,11 +801,11 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
} }
// Add second batch to trigger the update of the batch_num, // Add second batch to trigger the update of the batch_num,
// while avoiding the implicit call of setExtraInfoForgedL1UserTxs // while avoiding the implicit call of setExtraInfoForgedL1UserTxs
err = historyDB.addBlock(historyDB.dbWrite, &blocks[1].Block) err = historyDB.addBlock(historyDB.db, &blocks[1].Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.addBatch(historyDB.dbWrite, &blocks[1].Rollup.Batches[0].Batch) err = historyDB.addBatch(historyDB.db, &blocks[1].Rollup.Batches[0].Batch)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.addAccounts(historyDB.dbWrite, blocks[1].Rollup.Batches[0].CreatedAccounts) err = historyDB.addAccounts(historyDB.db, blocks[1].Rollup.Batches[0].CreatedAccounts)
require.NoError(t, err) require.NoError(t, err)
// Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block // Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block
@@ -831,7 +815,7 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
l1Txs[1].EffectiveAmount = big.NewInt(0) l1Txs[1].EffectiveAmount = big.NewInt(0)
l1Txs[2].EffectiveDepositAmount = big.NewInt(0) l1Txs[2].EffectiveDepositAmount = big.NewInt(0)
l1Txs[2].EffectiveAmount = big.NewInt(0) l1Txs[2].EffectiveAmount = big.NewInt(0)
err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.dbWrite, l1Txs) err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.db, l1Txs)
require.NoError(t, err) require.NoError(t, err)
dbL1Txs, err := historyDB.GetAllL1UserTxs() dbL1Txs, err := historyDB.GetAllL1UserTxs()
@@ -918,10 +902,10 @@ func TestUpdateExitTree(t *testing.T) {
common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false, common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false,
Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr}, Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr},
) )
err = historyDB.addBlock(historyDB.dbWrite, &block.Block) err = historyDB.addBlock(historyDB.db, &block.Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num, err = historyDB.updateExitTree(historyDB.db, block.Block.Num,
block.Rollup.Withdrawals, block.WDelayer.Withdrawals) block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
require.NoError(t, err) require.NoError(t, err)
@@ -951,10 +935,10 @@ func TestUpdateExitTree(t *testing.T) {
Token: tokenAddr, Token: tokenAddr,
Amount: big.NewInt(80), Amount: big.NewInt(80),
}) })
err = historyDB.addBlock(historyDB.dbWrite, &block.Block) err = historyDB.addBlock(historyDB.db, &block.Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num, err = historyDB.updateExitTree(historyDB.db, block.Block.Num,
block.Rollup.Withdrawals, block.WDelayer.Withdrawals) block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
require.NoError(t, err) require.NoError(t, err)
@@ -997,7 +981,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
URL: "bar", URL: "bar",
}, },
} }
err = historyDB.addCoordinators(historyDB.dbWrite, coords) err = historyDB.addCoordinators(historyDB.db, coords)
require.NoError(t, err) require.NoError(t, err)
bids := []common.Bid{ bids := []common.Bid{
@@ -1015,7 +999,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
}, },
} }
err = historyDB.addBids(historyDB.dbWrite, bids) err = historyDB.addBids(historyDB.db, bids)
require.NoError(t, err) require.NoError(t, err)
forger10, err := historyDB.GetBestBidCoordinator(10) forger10, err := historyDB.GetBestBidCoordinator(10)
@@ -1053,7 +1037,7 @@ func TestAddBucketUpdates(t *testing.T) {
Withdrawals: big.NewInt(42), Withdrawals: big.NewInt(42),
}, },
} }
err := historyDB.addBucketUpdates(historyDB.dbWrite, bucketUpdates) err := historyDB.addBucketUpdates(historyDB.db, bucketUpdates)
require.NoError(t, err) require.NoError(t, err)
dbBucketUpdates, err := historyDB.GetAllBucketUpdates() dbBucketUpdates, err := historyDB.GetAllBucketUpdates()
require.NoError(t, err) require.NoError(t, err)
@@ -1078,7 +1062,7 @@ func TestAddTokenExchanges(t *testing.T) {
ValueUSD: 67890, ValueUSD: 67890,
}, },
} }
err := historyDB.addTokenExchanges(historyDB.dbWrite, tokenExchanges) err := historyDB.addTokenExchanges(historyDB.db, tokenExchanges)
require.NoError(t, err) require.NoError(t, err)
dbTokenExchanges, err := historyDB.GetAllTokenExchanges() dbTokenExchanges, err := historyDB.GetAllTokenExchanges()
require.NoError(t, err) require.NoError(t, err)
@@ -1107,7 +1091,7 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
Amount: big.NewInt(20003), Amount: big.NewInt(20003),
}, },
} }
err := historyDB.addEscapeHatchWithdrawals(historyDB.dbWrite, escapeHatchWithdrawals) err := historyDB.addEscapeHatchWithdrawals(historyDB.db, escapeHatchWithdrawals)
require.NoError(t, err) require.NoError(t, err)
dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals() dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals()
require.NoError(t, err) require.NoError(t, err)
@@ -1172,15 +1156,19 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch) assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
// Frequency is not exactly the desired one, some decimals may appear // Frequency is not exactly the desired one, some decimals may appear
// There is a -2 as time for first and last batch is not taken into account assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01) assert.Less(t, res.BatchFrequency, float64(frequency+1))
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01) // Truncate frecuency into an int to do an exact check
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees
@@ -1207,8 +1195,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
set = append(set, til.Instruction{Typ: til.TypeNewBlock}) set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// Transfers // Transfers
const numBlocks int = 30 for x := 0; x < 6000; x++ {
for x := 0; x < numBlocks; x++ {
set = append(set, til.Instruction{ set = append(set, til.Instruction{
Typ: common.TxTypeTransfer, Typ: common.TxTypeTransfer,
TokenID: common.TokenID(0), TokenID: common.TokenID(0),
@@ -1232,31 +1219,36 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
err = tc.FillBlocksExtra(blocks, &tilCfgExtra) err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err) require.NoError(t, err)
const numBatches int = 2 + numBlocks const numBatches int = 6002
const blockNum = 4 + numBlocks const numTx int = 6003
const blockNum = 6005 - 1
// Sanity check // Sanity check
require.Equal(t, blockNum, len(blocks)) require.Equal(t, blockNum, len(blocks))
// Adding one batch per block // Adding one batch per block
// batch frequency can be chosen // batch frequency can be chosen
const blockTime time.Duration = 3600 * time.Second const frequency int = 15
now := time.Now()
require.NoError(t, err)
for i := range blocks { for i := range blocks {
blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime) blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
err = historyDB.AddBlockSCData(&blocks[i]) err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1) assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1) // Frequency is not exactly the desired one, some decimals may appear
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1) assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
assert.Less(t, res.BatchFrequency, float64(frequency+1))
// Truncate frecuency into an int to do an exact check
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees
@@ -1265,7 +1257,13 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
func TestGetMetricsAPIEmpty(t *testing.T) { func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsInternalAPI(0) _, err := historyDBWithACC.GetMetricsAPI(0)
assert.NoError(t, err)
}
func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetAvgTxFeeAPI()
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -1454,65 +1452,3 @@ func setTestBlocks(from, to int64) []common.Block {
} }
return blocks return blocks
} }
func TestNodeInfo(t *testing.T) {
test.WipeDB(historyDB.DB())
err := historyDB.SetStateInternalAPI(&StateAPI{})
require.NoError(t, err)
clientSetup := test.NewClientSetupExample()
constants := &Constants{
SCConsts: common.SCConsts{
Rollup: *clientSetup.RollupConstants,
Auction: *clientSetup.AuctionConstants,
WDelayer: *clientSetup.WDelayerConstants,
},
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
}
err = historyDB.SetConstants(constants)
require.NoError(t, err)
// Test parameters
stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{
ForgeDelay: 3.1,
},
Network: NetworkAPI{
LastEthBlock: 12,
LastSyncBlock: 34,
},
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TotalAccounts: 42,
},
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
WithdrawalDelayer: *clientSetup.WDelayerVariables,
RecommendedFee: common.RecommendedFee{
ExistingAccount: 0.15,
},
}
err = historyDB.SetStateInternalAPI(stateAPI)
require.NoError(t, err)
nodeConfig := &NodeConfig{
MaxPoolTxs: 123,
MinFeeUSD: 0.5,
}
err = historyDB.SetNodeConfig(nodeConfig)
require.NoError(t, err)
dbConstants, err := historyDB.GetConstants()
require.NoError(t, err)
assert.Equal(t, constants, dbConstants)
dbNodeConfig, err := historyDB.GetNodeConfig()
require.NoError(t, err)
assert.Equal(t, nodeConfig, dbNodeConfig)
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
require.NoError(t, err)
assert.Equal(t, stateAPI, dbStateAPI)
}

View File

@@ -1,169 +0,0 @@
package historydb
import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Period represents a time period in ethereum
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
// NextForgerAPI represents the next forger exposed via the API
type NextForgerAPI struct {
Coordinator CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// NetworkAPI is the network state exposed via the API
type NetworkAPI struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
}
// NodePublicConfig is the configuration of the node that is exposed via API
type NodePublicConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
Auction AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// Constants contains network constants
type Constants struct {
common.SCConsts
ChainID uint16
HermezAddress ethCommon.Address
}
// NodeConfig contains the node config exposed in the API
type NodeConfig struct {
MaxPoolTxs uint32 `meddler:"max_pool_txs"`
MinFeeUSD float64 `meddler:"min_fee"`
}
// NodeInfo contains information about he node used when serving the API
type NodeInfo struct {
ItemID int `meddler:"item_id,pk"`
StateAPI *StateAPI `meddler:"state,json"`
NodeConfig *NodeConfig `meddler:"config,json"`
Constants *Constants `meddler:"constants,json"`
}
// GetNodeInfo returns the NodeInfo
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
ni := &NodeInfo{}
err := meddler.QueryRow(
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
)
return ni, tracerr.Wrap(err)
}
// GetConstants returns the Constats
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT constants FROM node_info WHERE item_id = 1;",
)
return nodeInfo.Constants, tracerr.Wrap(err)
}
// SetConstants sets the Constants
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
_constants := struct {
Constants *Constants `meddler:"constants,json"`
}{constants}
values, err := meddler.Default.Values(&_constants, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetStateInternalAPI returns the StateAPI
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
return hdb.getStateAPI(hdb.dbRead)
}
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
d, &nodeInfo,
"SELECT state FROM node_info WHERE item_id = 1;",
)
return nodeInfo.StateAPI, tracerr.Wrap(err)
}
// SetStateInternalAPI sets the StateAPI
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
_stateAPI := struct {
StateAPI *StateAPI `meddler:"state,json"`
}{stateAPI}
values, err := meddler.Default.Values(&_stateAPI, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetNodeConfig returns the NodeConfig
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT config FROM node_info WHERE item_id = 1;",
)
return nodeInfo.NodeConfig, tracerr.Wrap(err)
}
// SetNodeConfig sets the NodeConfig
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
_nodeConfig := struct {
NodeConfig *NodeConfig `meddler:"config,json"`
}{nodeConfig}
values, err := meddler.Default.Values(&_nodeConfig, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}

View File

@@ -239,8 +239,8 @@ type AccountAPI struct {
BatchNum common.BatchNum `meddler:"batch_num"` BatchNum common.BatchNum `meddler:"batch_num"`
PublicKey apitypes.HezBJJ `meddler:"bjj"` PublicKey apitypes.HezBJJ `meddler:"bjj"`
EthAddr apitypes.HezEthAddr `meddler:"eth_addr"` EthAddr apitypes.HezEthAddr `meddler:"eth_addr"`
Nonce common.Nonce `meddler:"nonce"` // max of 40 bits used Nonce common.Nonce `meddler:"-"` // max of 40 bits used
Balance *apitypes.BigIntStr `meddler:"balance"` // max of 192 bits used Balance *apitypes.BigIntStr `meddler:"-"` // max of 192 bits used
TotalItems uint64 `meddler:"total_items"` TotalItems uint64 `meddler:"total_items"`
FirstItem uint64 `meddler:"first_item"` FirstItem uint64 `meddler:"first_item"`
LastItem uint64 `meddler:"last_item"` LastItem uint64 `meddler:"last_item"`
@@ -302,15 +302,25 @@ type BatchAPI struct {
LastItem uint64 `json:"-" meddler:"last_item"` LastItem uint64 `json:"-" meddler:"last_item"`
} }
// MetricsAPI define metrics of the network // Metrics define metrics of the network
type MetricsAPI struct { type Metrics struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"` TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"` BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"` TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"` TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"` TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
AvgTransactionFee float64 `json:"avgTransactionFee"` AvgTransactionFee float64 `json:"avgTransactionFee"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimatedTimeToForgeL1"` }
// MetricsTotals is used to get temporal information from HistoryDB
// to calculate data to be stored into the Metrics struct
type MetricsTotals struct {
TotalTransactions uint64 `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
TotalBatches int64 `meddler:"total_batches"`
TotalFeesUSD float64 `meddler:"total_fees"`
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
} }
// BidAPI is a representation of a bid with additional information // BidAPI is a representation of a bid with additional information
@@ -363,27 +373,6 @@ type RollupVariablesAPI struct {
SafeMode bool `json:"safeMode" meddler:"safe_mode"` SafeMode bool `json:"safeMode" meddler:"safe_mode"`
} }
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
rollupVars := RollupVariablesAPI{
EthBlockNum: rollupVariables.EthBlockNum,
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
WithdrawalDelay: rollupVariables.WithdrawalDelay,
SafeMode: rollupVariables.SafeMode,
}
for i, bucket := range rollupVariables.Buckets {
rollupVars.Buckets[i] = BucketParamsAPI{
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
}
}
return &rollupVars
}
// AuctionVariablesAPI are the variables of the Auction Smart Contract // AuctionVariablesAPI are the variables of the Auction Smart Contract
type AuctionVariablesAPI struct { type AuctionVariablesAPI struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"` EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
@@ -408,28 +397,3 @@ type AuctionVariablesAPI struct {
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before // SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"` SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
} }
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
auctionVars := AuctionVariablesAPI{
EthBlockNum: auctionVariables.EthBlockNum,
DonationAddress: auctionVariables.DonationAddress,
BootCoordinator: auctionVariables.BootCoordinator,
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
Outbidding: auctionVariables.Outbidding,
SlotDeadline: auctionVariables.SlotDeadline,
}
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionVars.AllocationRatio[i] = ratio
}
return &auctionVars
}

View File

@@ -1,18 +1,12 @@
package l2db package l2db
import ( import (
"fmt"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/russross/meddler" "github.com/russross/meddler"
) )
var (
errPoolFull = fmt.Errorf("the pool is at full capacity. More transactions are not accepted currently")
)
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB // AddAccountCreationAuthAPI inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
cancel, err := l2db.apiConnCon.Acquire() cancel, err := l2db.apiConnCon.Acquire()
@@ -34,7 +28,7 @@ func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCre
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
auth := new(AccountCreationAuthAPI) auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow( return auth, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, auth, l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;", "SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr, addr,
)) ))
@@ -48,54 +42,20 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
row := l2db.db.QueryRow(
row := l2db.dbRead.QueryRow(`SELECT "SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
($1::NUMERIC * COALESCE(token.usd, 0) * fee_percentage($2::NUMERIC)) / common.PoolL2TxStatePending,
(10.0 ^ token.decimals::NUMERIC) )
FROM token WHERE token.token_id = $3;`, var totalTxs uint32
tx.AmountFloat, tx.Fee, tx.TokenID) if err := row.Scan(&totalTxs); err != nil {
var feeUSD float64
if err := row.Scan(&feeUSD); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if feeUSD < l2db.minFeeUSD { if totalTxs >= l2db.maxTxs {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)", return tracerr.New(
feeUSD, l2db.minFeeUSD)) "The pool is at full capacity. More transactions are not accepted currently",
)
} }
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
// Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
if err != nil {
return err
}
valuesPart, err := meddler.Default.PlaceholdersString(tx, false)
if err != nil {
return err
}
values, err := meddler.Default.Values(tx, false)
if err != nil {
return err
}
q := fmt.Sprintf(
`INSERT INTO tx_pool (%s)
SELECT %s
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
namesPart, valuesPart,
len(values)+1, len(values)+2) //nolint:gomnd
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
res, err := l2db.dbWrite.Exec(q, values...)
if err != nil {
return tracerr.Wrap(err)
}
rowsAffected, err := res.RowsAffected()
if err != nil {
return tracerr.Wrap(err)
}
if rowsAffected == 0 {
return tracerr.Wrap(errPoolFull)
}
return nil
} }
// selectPoolTxAPI select part of queries to get PoolL2TxRead // selectPoolTxAPI select part of queries to get PoolL2TxRead
@@ -118,7 +78,7 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
tx := new(PoolTxAPI) tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow( return tx, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, tx, l2db.db, tx,
selectPoolTxAPI+"WHERE tx_id = $1;", selectPoolTxAPI+"WHERE tx_id = $1;",
txID, txID,
)) ))

View File

@@ -21,12 +21,10 @@ import (
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant // L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
// due to them being forged or invalid after a safety period // due to them being forged or invalid after a safety period
type L2DB struct { type L2DB struct {
dbRead *sqlx.DB db *sqlx.DB
dbWrite *sqlx.DB
safetyPeriod common.BatchNum safetyPeriod common.BatchNum
ttl time.Duration ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
@@ -34,20 +32,17 @@ type L2DB struct {
// To create it, it's needed db connection, safety period expressed in batches, // To create it, it's needed db connection, safety period expressed in batches,
// maxTxs that the DB should have and TTL (time to live) for pending txs. // maxTxs that the DB should have and TTL (time to live) for pending txs.
func NewL2DB( func NewL2DB(
dbRead, dbWrite *sqlx.DB, db *sqlx.DB,
safetyPeriod common.BatchNum, safetyPeriod common.BatchNum,
maxTxs uint32, maxTxs uint32,
minFeeUSD float64,
TTL time.Duration, TTL time.Duration,
apiConnCon *db.APIConnectionController, apiConnCon *db.APIConnectionController,
) *L2DB { ) *L2DB {
return &L2DB{ return &L2DB{
dbRead: dbRead, db: db,
dbWrite: dbWrite,
safetyPeriod: safetyPeriod, safetyPeriod: safetyPeriod,
ttl: TTL, ttl: TTL,
maxTxs: maxTxs, maxTxs: maxTxs,
minFeeUSD: minFeeUSD,
apiConnCon: apiConnCon, apiConnCon: apiConnCon,
} }
} }
@@ -55,18 +50,12 @@ func NewL2DB(
// DB returns a pointer to the L2DB.db. This method should be used only for // DB returns a pointer to the L2DB.db. This method should be used only for
// internal testing purposes. // internal testing purposes.
func (l2db *L2DB) DB() *sqlx.DB { func (l2db *L2DB) DB() *sqlx.DB {
return l2db.dbWrite return l2db.db
}
// MinFeeUSD returns the minimum fee in USD that is required to accept txs into
// the pool
func (l2db *L2DB) MinFeeUSD() float64 {
return l2db.minFeeUSD
} }
// AddAccountCreationAuth inserts an account creation authorization into the DB // AddAccountCreationAuth inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
_, err := l2db.dbWrite.Exec( _, err := l2db.db.Exec(
`INSERT INTO account_creation_auth (eth_addr, bjj, signature) `INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES ($1, $2, $3);`, VALUES ($1, $2, $3);`,
auth.EthAddr, auth.BJJ, auth.Signature, auth.EthAddr, auth.BJJ, auth.Signature,
@@ -78,12 +67,30 @@ func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error
func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) { func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) {
auth := new(common.AccountCreationAuth) auth := new(common.AccountCreationAuth)
return auth, tracerr.Wrap(meddler.QueryRow( return auth, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, auth, l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;", "SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr, addr,
)) ))
} }
// AddTx inserts a tx to the pool
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
common.PoolL2TxStatePending,
)
var totalTxs uint32
if err := row.Scan(&totalTxs); err != nil {
return tracerr.Wrap(err)
}
if totalTxs >= l2db.maxTxs {
return tracerr.New(
"The pool is at full capacity. More transactions are not accepted currently",
)
}
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
}
// UpdateTxsInfo updates the parameter Info of the pool transactions // UpdateTxsInfo updates the parameter Info of the pool transactions
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error { func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
if len(txs) == 0 { if len(txs) == 0 {
@@ -107,7 +114,7 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
WHERE tx_pool.tx_id = tx_update.id; WHERE tx_pool.tx_id = tx_update.id;
` `
if len(txUpdates) > 0 { if len(txUpdates) > 0 {
if _, err := sqlx.NamedExec(l2db.dbWrite, query, txUpdates); err != nil { if _, err := sqlx.NamedExec(l2db.db, query, txUpdates); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -115,8 +122,9 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
return nil return nil
} }
// NewPoolL2TxWriteFromPoolL2Tx creates a new PoolL2TxWrite from a PoolL2Tx // AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite { // but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
// transform tx from *common.PoolL2Tx to PoolL2TxWrite // transform tx from *common.PoolL2Tx to PoolL2TxWrite
insertTx := &PoolL2TxWrite{ insertTx := &PoolL2TxWrite{
TxID: tx.TxID, TxID: tx.TxID,
@@ -158,15 +166,8 @@ func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite {
f := new(big.Float).SetInt(tx.Amount) f := new(big.Float).SetInt(tx.Amount)
amountF, _ := f.Float64() amountF, _ := f.Float64()
insertTx.AmountFloat = amountF insertTx.AmountFloat = amountF
return insertTx
}
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
// but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
insertTx := NewPoolL2TxWriteFromPoolL2Tx(tx)
// insert tx // insert tx
return tracerr.Wrap(meddler.Insert(l2db.dbWrite, "tx_pool", insertTx)) return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
} }
// selectPoolTxCommon select part of queries to get common.PoolL2Tx // selectPoolTxCommon select part of queries to get common.PoolL2Tx
@@ -175,15 +176,14 @@ tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx, tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount, rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type, tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
(fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f) / fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f AS fee_usd, token.usd_update
(10.0 ^ token.decimals::NUMERIC) AS fee_usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id ` FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// GetTx return the specified Tx in common.PoolL2Tx format // GetTx return the specified Tx in common.PoolL2Tx format
func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) { func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
tx := new(common.PoolL2Tx) tx := new(common.PoolL2Tx)
return tx, tracerr.Wrap(meddler.QueryRow( return tx, tracerr.Wrap(meddler.QueryRow(
l2db.dbRead, tx, l2db.db, tx,
selectPoolTxCommon+"WHERE tx_id = $1;", selectPoolTxCommon+"WHERE tx_id = $1;",
txID, txID,
)) ))
@@ -193,7 +193,7 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) { func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx var txs []*common.PoolL2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
l2db.dbRead, &txs, l2db.db, &txs,
selectPoolTxCommon+"WHERE state = $1", selectPoolTxCommon+"WHERE state = $1",
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
) )
@@ -218,8 +218,8 @@ func (l2db *L2DB) StartForging(txIDs []common.TxID, batchNum common.BatchNum) er
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.dbWrite.Rebind(query) query = l2db.db.Rebind(query)
_, err = l2db.dbWrite.Exec(query, args...) _, err = l2db.db.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -241,8 +241,8 @@ func (l2db *L2DB) DoneForging(txIDs []common.TxID, batchNum common.BatchNum) err
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.dbWrite.Rebind(query) query = l2db.db.Rebind(query)
_, err = l2db.dbWrite.Exec(query, args...) _, err = l2db.db.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -263,8 +263,8 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.dbWrite.Rebind(query) query = l2db.db.Rebind(query)
_, err = l2db.dbWrite.Exec(query, args...) _, err = l2db.db.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -272,7 +272,7 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
// of unique FromIdx // of unique FromIdx
func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) { func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) {
var idxs []common.Idx var idxs []common.Idx
rows, err := l2db.dbRead.Query(`SELECT DISTINCT from_idx FROM tx_pool rows, err := l2db.db.Query(`SELECT DISTINCT from_idx FROM tx_pool
WHERE state = $1;`, common.PoolL2TxStatePending) WHERE state = $1;`, common.PoolL2TxStatePending)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -313,7 +313,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
// named query which works with slices, and doens't handle an extra // named query which works with slices, and doens't handle an extra
// individual argument. // individual argument.
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum) query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil { if _, err := sqlx.NamedExec(l2db.db, query, updatedAccounts); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -322,7 +322,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
// Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg. // Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg.
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending // The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error { func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
_, err := l2db.dbWrite.Exec( _, err := l2db.db.Exec(
`UPDATE tx_pool SET batch_num = NULL, state = $1 `UPDATE tx_pool SET batch_num = NULL, state = $1
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`, WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
@@ -338,7 +338,7 @@ func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
// it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded // it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded
func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) { func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
now := time.Now().UTC().Unix() now := time.Now().UTC().Unix()
_, err = l2db.dbWrite.Exec( _, err = l2db.db.Exec(
`DELETE FROM tx_pool WHERE ( `DELETE FROM tx_pool WHERE (
batch_num < $1 AND (state = $2 OR state = $3) batch_num < $1 AND (state = $2 OR state = $3)
) OR ( ) OR (
@@ -354,14 +354,3 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
) )
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// PurgeByExternalDelete deletes all pending transactions marked with true in
// the `external_delete` column. An external process can set this column to
// true to instruct the coordinator to delete the tx when possible.
func (l2db *L2DB) PurgeByExternalDelete() error {
_, err := l2db.dbWrite.Exec(
`DELETE from tx_pool WHERE (external_delete = true AND state = $1);`,
common.PoolL2TxStatePending,
)
return tracerr.Wrap(err)
}

View File

@@ -1,8 +1,8 @@
package l2db package l2db
import ( import (
"database/sql" "math"
"fmt" "math/big"
"os" "os"
"testing" "testing"
"time" "time"
@@ -20,14 +20,12 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var decimals = uint64(3)
var tokenValue = 1.0 // The price update gives a value of 1.0 USD to the token
var l2DB *L2DB var l2DB *L2DB
var l2DBWithACC *L2DB var l2DBWithACC *L2DB
var historyDB *historydb.HistoryDB var historyDB *historydb.HistoryDB
var tc *til.Context var tc *til.Context
var tokens map[common.TokenID]historydb.TokenWithUSD var tokens map[common.TokenID]historydb.TokenWithUSD
var tokensValue map[common.TokenID]float64
var accs map[common.Idx]common.Account var accs map[common.Idx]common.Account
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@@ -37,11 +35,11 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil) l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, db, nil) historyDB = historydb.NewHistoryDB(db, nil)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -60,10 +58,10 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
AddToken(1) AddToken(1)
AddToken(2) AddToken(2)
CreateAccountDeposit(1) A: 20000 CreateAccountDeposit(1) A: 2000
CreateAccountDeposit(2) A: 20000 CreateAccountDeposit(2) A: 2000
CreateAccountDeposit(1) B: 10000 CreateAccountDeposit(1) B: 1000
CreateAccountDeposit(2) B: 10000 CreateAccountDeposit(2) B: 1000
> batchL1 > batchL1
> batchL1 > batchL1
> block > block
@@ -84,23 +82,15 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for i := range blocks {
block := &blocks[i]
for j := range block.Rollup.AddedTokens {
token := &block.Rollup.AddedTokens[j]
token.Name = fmt.Sprintf("Token %d", token.TokenID)
token.Symbol = fmt.Sprintf("TK%d", token.TokenID)
token.Decimals = decimals
}
}
tokens = make(map[common.TokenID]historydb.TokenWithUSD) tokens = make(map[common.TokenID]historydb.TokenWithUSD)
// tokensValue = make(map[common.TokenID]float64) tokensValue = make(map[common.TokenID]float64)
accs = make(map[common.Idx]common.Account) accs = make(map[common.Idx]common.Account)
value := 5 * 5.389329
now := time.Now().UTC() now := time.Now().UTC()
// Add all blocks except for the last one // Add all blocks except for the last one
for i := range blocks[:len(blocks)-1] { for i := range blocks[:len(blocks)-1] {
if err := historyDB.AddBlockSCData(&blocks[i]); err != nil { err = historyDB.AddBlockSCData(&blocks[i])
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for _, batch := range blocks[i].Rollup.Batches { for _, batch := range blocks[i].Rollup.Batches {
@@ -116,38 +106,39 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
Name: token.Name, Name: token.Name,
Symbol: token.Symbol, Symbol: token.Symbol,
Decimals: token.Decimals, Decimals: token.Decimals,
USD: &tokenValue,
USDUpdate: &now,
} }
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
readToken.USDUpdate = &now
readToken.USD = &value
tokens[token.TokenID] = readToken tokens[token.TokenID] = readToken
// Set value to the tokens }
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD) // Set value to the tokens (tokens have no symbol)
tokenSymbol := ""
err := historyDB.UpdateTokenValue(tokenSymbol, value)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
}
return nil return nil
} }
func generatePoolL2Txs() ([]common.PoolL2Tx, error) { func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
// Fee = 126 corresponds to ~10%
setPool := ` setPool := `
Type: PoolL2 Type: PoolL2
PoolTransfer(1) A-B: 6000 (126) PoolTransfer(1) A-B: 6 (4)
PoolTransfer(2) A-B: 3000 (126) PoolTransfer(2) A-B: 3 (1)
PoolTransfer(1) B-A: 5000 (126) PoolTransfer(1) B-A: 5 (2)
PoolTransfer(2) B-A: 10000 (126) PoolTransfer(2) B-A: 10 (3)
PoolTransfer(1) A-B: 7000 (126) PoolTransfer(1) A-B: 7 (2)
PoolTransfer(2) A-B: 2000 (126) PoolTransfer(2) A-B: 2 (1)
PoolTransfer(1) B-A: 8000 (126) PoolTransfer(1) B-A: 8 (2)
PoolTransfer(2) B-A: 1000 (126) PoolTransfer(2) B-A: 1 (1)
PoolTransfer(1) A-B: 3000 (126) PoolTransfer(1) A-B: 3 (1)
PoolTransferToEthAddr(2) B-A: 5000 (126) PoolTransferToEthAddr(2) B-A: 5 (2)
PoolTransferToBJJ(2) B-A: 5000 (126) PoolTransferToBJJ(2) B-A: 5 (2)
PoolExit(1) A: 5000 (126) PoolExit(1) A: 5 (2)
PoolExit(2) B: 3000 (126) PoolExit(2) B: 3 (1)
` `
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool) poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
if err != nil { if err != nil {
@@ -162,74 +153,25 @@ func TestAddTxTest(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err) assert.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx) assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone() nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone) assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset) assert.Equal(t, 0, offset)
} }
} }
func TestAddTxAPI(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
oldMaxTxs := l2DBWithACC.maxTxs
// set max number of pending txs that can be kept in the pool to 5
l2DBWithACC.maxTxs = 5
poolL2Txs, err := generatePoolL2Txs()
txs := make([]*PoolL2TxWrite, len(poolL2Txs))
for i := range poolL2Txs {
txs[i] = NewPoolL2TxWriteFromPoolL2Tx(&poolL2Txs[i])
}
require.NoError(t, err)
require.GreaterOrEqual(t, len(poolL2Txs), 8)
for i := range txs[:5] {
err := l2DBWithACC.AddTxAPI(txs[i])
require.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset)
}
err = l2DBWithACC.AddTxAPI(txs[5])
assert.Equal(t, errPoolFull, tracerr.Unwrap(err))
// reset maxTxs to original value
l2DBWithACC.maxTxs = oldMaxTxs
// set minFeeUSD to a high value than the tx feeUSD to test the error
// of inserting a tx with lower than min fee
oldMinFeeUSD := l2DBWithACC.minFeeUSD
tx := txs[5]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount, decimals, tokenValue)
// set minFeeUSD higher than the tx fee to trigger the error
l2DBWithACC.minFeeUSD = feeAmountUSD + 1
err = l2DBWithACC.AddTxAPI(tx)
require.Error(t, err)
assert.Regexp(t, "tx.feeUSD (.*) < minFeeUSD (.*)", err.Error())
// reset minFeeUSD to original value
l2DBWithACC.minFeeUSD = oldMinFeeUSD
}
func TestUpdateTxsInfo(t *testing.T) { func TestUpdateTxsInfo(t *testing.T) {
err := prepareHistoryDB(historyDB) err := prepareHistoryDB(historyDB)
if err != nil { if err != nil {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) require.NoError(t, err)
@@ -243,7 +185,7 @@ func TestUpdateTxsInfo(t *testing.T) {
for i := range poolL2Txs { for i := range poolL2Txs {
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "test", fetchedTx.Info) assert.Equal(t, "test", fetchedTx.Info)
} }
} }
@@ -261,8 +203,9 @@ func assertTx(t *testing.T, expected, actual *common.PoolL2Tx) {
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix()) assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
// Set expected fee // Set expected fee
amountUSD := common.TokensToUSD(expected.Amount, token.Decimals, *token.USD) f := new(big.Float).SetInt(expected.Amount)
expected.AbsoluteFee = amountUSD * expected.Fee.Percentage() amountF, _ := f.Float64()
expected.AbsoluteFee = *token.USD * amountF * expected.Fee.Percentage()
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee) test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
} }
assert.Equal(t, expected, actual) assert.Equal(t, expected, actual)
@@ -287,28 +230,19 @@ func TestGetPending(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var pendingTxs []*common.PoolL2Tx var pendingTxs []*common.PoolL2Tx
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
pendingTxs = append(pendingTxs, &poolL2Txs[i]) pendingTxs = append(pendingTxs, &poolL2Txs[i])
} }
fetchedTxs, err := l2DB.GetPendingTxs() fetchedTxs, err := l2DB.GetPendingTxs()
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs)) assert.Equal(t, len(pendingTxs), len(fetchedTxs))
for i := range fetchedTxs { for i := range fetchedTxs {
assertTx(t, pendingTxs[i], &fetchedTxs[i]) assertTx(t, pendingTxs[i], &fetchedTxs[i])
} }
// Check AbsoluteFee amount
for i := range fetchedTxs {
tx := &fetchedTxs[i]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount,
tokens[tx.TokenID].Decimals, *tokens[tx.TokenID].USD)
assert.InEpsilon(t, feeAmountUSD, tx.AbsoluteFee, 0.01)
}
} }
func TestStartForging(t *testing.T) { func TestStartForging(t *testing.T) {
@@ -319,13 +253,13 @@ func TestStartForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -333,11 +267,11 @@ func TestStartForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range startForgingTxIDs { for _, id := range startForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -351,13 +285,13 @@ func TestDoneForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -365,7 +299,7 @@ func TestDoneForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -377,12 +311,12 @@ func TestDoneForging(t *testing.T) {
} }
// Done forging txs // Done forging txs
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range doneForgingTxIDs { for _, id := range doneForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -396,13 +330,13 @@ func TestInvalidate(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var invalidTxIDs []common.TxID var invalidTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 { if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
randomizer++ randomizer++
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
@@ -410,11 +344,11 @@ func TestInvalidate(t *testing.T) {
} }
// Invalidate txs // Invalidate txs
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -428,7 +362,7 @@ func TestInvalidateOldNonces(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
// Update Accounts currentNonce // Update Accounts currentNonce
var updateAccounts []common.IdxNonce var updateAccounts []common.IdxNonce
var currentNonce = common.Nonce(1) var currentNonce = common.Nonce(1)
@@ -445,13 +379,13 @@ func TestInvalidateOldNonces(t *testing.T) {
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
} }
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
} }
// sanity check // sanity check
require.Greater(t, len(invalidTxIDs), 0) require.Greater(t, len(invalidTxIDs), 0)
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum) err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
@@ -473,7 +407,7 @@ func TestReorg(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -484,7 +418,7 @@ func TestReorg(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -496,7 +430,7 @@ func TestReorg(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -521,22 +455,22 @@ func TestReorg(t *testing.T) {
// Invalidate txs BEFORE reorgBatch --> nonReorg // Invalidate txs BEFORE reorgBatch --> nonReorg
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch) err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
// Done forging txs in reorgBatch --> Reorg // Done forging txs in reorgBatch --> Reorg
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch) err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
require.NoError(t, err) assert.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -553,7 +487,7 @@ func TestReorg2(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -564,7 +498,7 @@ func TestReorg2(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -576,7 +510,7 @@ func TestReorg2(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -598,22 +532,22 @@ func TestReorg2(t *testing.T) {
} }
// Done forging txs BEFORE reorgBatch --> nonReorg // Done forging txs BEFORE reorgBatch --> nonReorg
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch) err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
// Invalidate txs in reorgBatch --> Reorg // Invalidate txs in reorgBatch --> Reorg
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch) err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
require.NoError(t, err) assert.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -629,7 +563,7 @@ func TestPurge(t *testing.T) {
var poolL2Tx []common.PoolL2Tx var poolL2Tx []common.PoolL2Tx
for i := 0; i < generateTx; i++ { for i := 0; i < generateTx; i++ {
poolL2TxAux, err := generatePoolL2Txs() poolL2TxAux, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
poolL2Tx = append(poolL2Tx, poolL2TxAux...) poolL2Tx = append(poolL2Tx, poolL2TxAux...)
} }
@@ -656,39 +590,39 @@ func TestPurge(t *testing.T) {
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID) deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
} }
err := l2DB.AddTxTest(&tx) err := l2DB.AddTxTest(&tx)
require.NoError(t, err) assert.NoError(t, err)
} }
// Set batchNum keeped txs // Set batchNum keeped txs
for i := range keepedIDs { for i := range keepedIDs {
_, err = l2DB.dbWrite.Exec( _, err = l2DB.db.Exec(
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;", "UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
safeBatchNum, keepedIDs[i], safeBatchNum, keepedIDs[i],
) )
require.NoError(t, err) assert.NoError(t, err)
} }
// Start forging txs and set batchNum // Start forging txs and set batchNum
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Done forging txs and set batchNum // Done forging txs and set batchNum
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Invalidate txs and set batchNum // Invalidate txs and set batchNum
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Update timestamp of afterTTL txs // Update timestamp of afterTTL txs
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0) deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
for _, id := range afterTTLIDs { for _, id := range afterTTLIDs {
// Set timestamp // Set timestamp
_, err = l2DB.dbWrite.Exec( _, err = l2DB.db.Exec(
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;", "UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
deleteTimestamp, common.PoolL2TxStatePending, id, deleteTimestamp, common.PoolL2TxStatePending, id,
) )
require.NoError(t, err) assert.NoError(t, err)
} }
// Purge txs // Purge txs
err = l2DB.Purge(safeBatchNum) err = l2DB.Purge(safeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Check results // Check results
for _, id := range deletedIDs { for _, id := range deletedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
@@ -696,7 +630,7 @@ func TestPurge(t *testing.T) {
} }
for _, id := range keepedIDs { for _, id := range keepedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
require.NoError(t, err) assert.NoError(t, err)
} }
} }
@@ -710,10 +644,10 @@ func TestAuth(t *testing.T) {
for i := 0; i < len(auths); i++ { for i := 0; i < len(auths); i++ {
// Add to the DB // Add to the DB
err := l2DB.AddAccountCreationAuth(auths[i]) err := l2DB.AddAccountCreationAuth(auths[i])
require.NoError(t, err) assert.NoError(t, err)
// Fetch from DB // Fetch from DB
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr) auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
require.NoError(t, err) assert.NoError(t, err)
// Check fetched vs generated // Check fetched vs generated
assert.Equal(t, auths[i].EthAddr, auth.EthAddr) assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
assert.Equal(t, auths[i].BJJ, auth.BJJ) assert.Equal(t, auths[i].BJJ, auth.BJJ)
@@ -731,7 +665,7 @@ func TestAddGet(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
// We will work with only 3 txs // We will work with only 3 txs
require.GreaterOrEqual(t, len(poolL2Txs), 3) require.GreaterOrEqual(t, len(poolL2Txs), 3)
@@ -767,56 +701,3 @@ func TestAddGet(t *testing.T) {
assert.Equal(t, txs[i], *dbTx) assert.Equal(t, txs[i], *dbTx)
} }
} }
func TestPurgeByExternalDelete(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
txs, err := generatePoolL2Txs()
require.NoError(t, err)
// We will work with 8 txs
require.GreaterOrEqual(t, len(txs), 8)
txs = txs[:8]
for i := range txs {
require.NoError(t, l2DB.AddTxTest(&txs[i]))
}
// We will recreate this scenario:
// tx index, status , external_delete
// 0 , pending, false
// 1 , pending, false
// 2 , pending, true // will be deleted
// 3 , pending, true // will be deleted
// 4 , fging , false
// 5 , fging , false
// 6 , fging , true
// 7 , fging , true
require.NoError(t, l2DB.StartForging(
[]common.TxID{txs[4].TxID, txs[5].TxID, txs[6].TxID, txs[7].TxID},
1))
_, err = l2DB.dbWrite.Exec(
`UPDATE tx_pool SET external_delete = true WHERE
tx_id IN ($1, $2, $3, $4)
;`,
txs[2].TxID, txs[3].TxID, txs[6].TxID, txs[7].TxID,
)
require.NoError(t, err)
require.NoError(t, l2DB.PurgeByExternalDelete())
// Query txs that are have been not deleted
for _, i := range []int{0, 1, 4, 5, 6, 7} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.NoError(t, err)
}
// Query txs that have been deleted
for _, i := range []int{2, 3} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
}
}

View File

@@ -34,7 +34,6 @@ type PoolL2TxWrite struct {
RqFee *common.FeeSelector `meddler:"rq_fee"` RqFee *common.FeeSelector `meddler:"rq_fee"`
RqNonce *common.Nonce `meddler:"rq_nonce"` RqNonce *common.Nonce `meddler:"rq_nonce"`
Type common.TxType `meddler:"tx_type"` Type common.TxType `meddler:"tx_type"`
ClientIP string `meddler:"client_ip"`
} }
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API // PoolTxAPI represents a L2 Tx pool with extra metadata used by the API
@@ -95,6 +94,7 @@ func (tx PoolTxAPI) MarshalJSON() ([]byte, error) {
"info": tx.Info, "info": tx.Info,
"signature": tx.Signature, "signature": tx.Signature,
"timestamp": tx.Timestamp, "timestamp": tx.Timestamp,
"batchNum": tx.BatchNum,
"requestFromAccountIndex": tx.RqFromIdx, "requestFromAccountIndex": tx.RqFromIdx,
"requestToAccountIndex": tx.RqToIdx, "requestToAccountIndex": tx.RqToIdx,
"requestToHezEthereumAddress": tx.RqToEthAddr, "requestToHezEthereumAddress": tx.RqToEthAddr,

View File

@@ -47,7 +47,7 @@ CREATE TABLE token (
name VARCHAR(20) NOT NULL, name VARCHAR(20) NOT NULL,
symbol VARCHAR(10) NOT NULL, symbol VARCHAR(10) NOT NULL,
decimals INT NOT NULL, decimals INT NOT NULL,
usd NUMERIC, -- value of a normalized token (1 token = 10^decimals units) usd NUMERIC,
usd_update TIMESTAMP WITHOUT TIME ZONE usd_update TIMESTAMP WITHOUT TIME ZONE
); );
@@ -100,15 +100,6 @@ CREATE TABLE account (
eth_addr BYTEA NOT NULL eth_addr BYTEA NOT NULL
); );
CREATE TABLE account_update (
item_id SERIAL,
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
nonce BIGINT NOT NULL,
balance BYTEA NOT NULL
);
CREATE TABLE exit_tree ( CREATE TABLE exit_tree (
item_id SERIAL PRIMARY KEY, item_id SERIAL PRIMARY KEY,
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE, batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
@@ -627,9 +618,7 @@ CREATE TABLE tx_pool (
rq_amount BYTEA, rq_amount BYTEA,
rq_fee SMALLINT, rq_fee SMALLINT,
rq_nonce BIGINT, rq_nonce BIGINT,
tx_type VARCHAR(40) NOT NULL, tx_type VARCHAR(40) NOT NULL
client_ip VARCHAR,
external_delete BOOLEAN NOT NULL DEFAULT false
); );
-- +migrate StatementBegin -- +migrate StatementBegin
@@ -661,47 +650,35 @@ CREATE TABLE account_creation_auth (
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now()) timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
); );
CREATE TABLE node_info (
item_id SERIAL PRIMARY KEY,
state BYTEA, -- object returned by GET /state
config BYTEA, -- Node config
-- max_pool_txs BIGINT, -- L2DB config
-- min_fee NUMERIC, -- L2DB config
constants BYTEA -- info of the network that is constant
);
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
-- +migrate Down -- +migrate Down
-- triggers -- drop triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token; DROP TRIGGER trigger_token_usd_update ON token;
DROP TRIGGER IF EXISTS trigger_set_tx ON tx; DROP TRIGGER trigger_set_tx ON tx;
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch; DROP TRIGGER trigger_forge_l1_txs ON batch;
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool; DROP TRIGGER trigger_set_pool_tx ON tx_pool;
-- functions -- drop functions
DROP FUNCTION IF EXISTS hez_idx; DROP FUNCTION hez_idx;
DROP FUNCTION IF EXISTS set_token_usd_update; DROP FUNCTION set_token_usd_update;
DROP FUNCTION IF EXISTS fee_percentage; DROP FUNCTION fee_percentage;
DROP FUNCTION IF EXISTS set_tx; DROP FUNCTION set_tx;
DROP FUNCTION IF EXISTS forge_l1_user_txs; DROP FUNCTION forge_l1_user_txs;
DROP FUNCTION IF EXISTS set_pool_tx; DROP FUNCTION set_pool_tx;
-- drop tables IF EXISTS -- drop tables
DROP TABLE IF EXISTS node_info; DROP TABLE account_creation_auth;
DROP TABLE IF EXISTS account_creation_auth; DROP TABLE tx_pool;
DROP TABLE IF EXISTS tx_pool; DROP TABLE auction_vars;
DROP TABLE IF EXISTS auction_vars; DROP TABLE rollup_vars;
DROP TABLE IF EXISTS rollup_vars; DROP TABLE escape_hatch_withdrawal;
DROP TABLE IF EXISTS escape_hatch_withdrawal; DROP TABLE bucket_update;
DROP TABLE IF EXISTS bucket_update; DROP TABLE token_exchange;
DROP TABLE IF EXISTS token_exchange; DROP TABLE wdelayer_vars;
DROP TABLE IF EXISTS wdelayer_vars; DROP TABLE tx;
DROP TABLE IF EXISTS tx; DROP TABLE exit_tree;
DROP TABLE IF EXISTS exit_tree; DROP TABLE account;
DROP TABLE IF EXISTS account_update; DROP TABLE token;
DROP TABLE IF EXISTS account; DROP TABLE bid;
DROP TABLE IF EXISTS token; DROP TABLE batch;
DROP TABLE IF EXISTS bid; DROP TABLE coordinator;
DROP TABLE IF EXISTS batch; DROP TABLE block;
DROP TABLE IF EXISTS coordinator; -- drop sequences
DROP TABLE IF EXISTS block; DROP SEQUENCE tx_item_id;
-- sequences
DROP SEQUENCE IF EXISTS tx_item_id;

View File

@@ -13,9 +13,9 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate" migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler" "github.com/russross/meddler"
"golang.org/x/sync/semaphore"
) )
var migrations *migrate.PackrMigrationSource var migrations *migrate.PackrMigrationSource
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
// APIConnectionController is used to limit the SQL open connections used by the API // APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct { type APIConnectionController struct {
smphr *semaphore.Weighted smphr semaphore.Semaphore
timeout time.Duration timeout time.Duration
} }
// NewAPICnnectionController initialize APIConnectionController // NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController { func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{ return &APIConnectionController{
smphr: semaphore.NewWeighted(int64(maxConnections)), smphr: semaphore.New(maxConnections),
timeout: timeout, timeout: timeout,
} }
} }

View File

@@ -324,6 +324,5 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction,
Value: tx.Value(), Value: tx.Value(),
Data: tx.Data(), Data: tx.Data(),
} }
result, err := c.client.CallContract(ctx, msg, blockNum) return c.client.CallContract(ctx, msg, blockNum)
return result, tracerr.Wrap(err)
} }

View File

@@ -316,7 +316,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
} }
consts, err := c.RollupConstants() consts, err := c.RollupConstants()
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err)) return nil, tracerr.Wrap(err)
} }
c.consts = consts c.consts = consts
return c, nil return c, nil
@@ -327,7 +327,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
if auth == nil { if auth == nil {
auth, err = c.client.NewAuth() auth, err = c.client.NewAuth()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, err
} }
auth.GasLimit = 1000000 auth.GasLimit = 1000000
} }
@@ -393,7 +393,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch, l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
args.ProofA, args.ProofB, args.ProofC) args.ProofA, args.ProofB, args.ProofC)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Hermez.ForgeBatch: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed Hermez.ForgeBatch: %w", err))
} }
return tx, nil return tx, nil
} }
@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
} }
fromIdxBig := big.NewInt(fromIdx) fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx) toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat40(depositAmount) depositAmountF, err := common.NewFloat16(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountF, err := common.NewFloat40(amount) amountF, err := common.NewFloat16(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
} }
fromIdxBig := big.NewInt(fromIdx) fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx) toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat40(depositAmount) depositAmountF, err := common.NewFloat16(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountF, err := common.NewFloat40(amount) amountF, err := common.NewFloat16(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -939,9 +939,9 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
FeeIdxCoordinator: []common.Idx{}, FeeIdxCoordinator: []common.Idx{},
} }
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1) lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
l1UserTxsData := []byte{} l1UserTxsData := []byte{}
@@ -968,7 +968,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx) rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
} }
for i := 0; i < numTxsL1Coord; i++ { for i := 0; i < numTxsL1Coord; i++ {
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes] bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
var signature []byte var signature []byte
v := bytesL1Coordinator[0] v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33] s := bytesL1Coordinator[1:33]

View File

@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified. args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001") l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
require.NoError(t, err) require.NoError(t, err)
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
for i := 0; i < numTxsL1; i++ { for i := 0; i < numTxsL1; i++ {
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes] bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
var signature []byte var signature []byte
v := bytesL1Coordinator[0] v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33] s := bytesL1Coordinator[1:33]

6
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml v0.3.1
github.com/dghubble/sling v1.3.0 github.com/dghubble/sling v1.3.0
github.com/ethereum/go-ethereum v1.9.25 github.com/ethereum/go-ethereum v1.9.25
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/getkin/kin-openapi v0.22.0 github.com/getkin/kin-openapi v0.22.0
github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/cors v1.3.1
github.com/gin-gonic/gin v1.5.0 github.com/gin-gonic/gin v1.5.0
@@ -17,18 +18,19 @@ require (
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0 github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0 github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/mapstructure v1.3.0 github.com/mitchellh/mapstructure v1.3.0
github.com/prometheus/client_golang v1.3.0
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/russross/meddler v1.0.0 github.com/russross/meddler v1.0.0
github.com/sirupsen/logrus v1.5.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
github.com/stretchr/testify v1.6.1 github.com/stretchr/testify v1.6.1
github.com/urfave/cli/v2 v2.2.0 github.com/urfave/cli/v2 v2.2.0
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
gopkg.in/go-playground/validator.v9 v9.29.1 gopkg.in/go-playground/validator.v9 v9.29.1
) )

9
go.sum
View File

@@ -68,7 +68,6 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
@@ -422,6 +421,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@@ -445,7 +447,6 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
@@ -546,27 +547,23 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=

View File

@@ -67,11 +67,6 @@ func Init(levelStr string, outputs []string) {
func sprintStackTrace(st []tracerr.Frame) string { func sprintStackTrace(st []tracerr.Frame) string {
builder := strings.Builder{} builder := strings.Builder{}
// Skip deepest frame because it belongs to the go runtime and we don't
// care about it.
if len(st) > 0 {
st = st[:len(st)-1]
}
for _, f := range st { for _, f := range st {
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func)) builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
} }

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"net"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@@ -54,7 +53,6 @@ const (
// Node is the Hermez Node // Node is the Hermez Node
type Node struct { type Node struct {
nodeAPI *NodeAPI nodeAPI *NodeAPI
stateAPIUpdater *api.StateAPIUpdater
debugAPI *debugapi.DebugAPI debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater priceUpdater *priceupdater.PriceUpdater
// Coordinator // Coordinator
@@ -66,9 +64,7 @@ type Node struct {
// General // General
cfg *config.Node cfg *config.Node
mode Mode mode Mode
sqlConnRead *sqlx.DB sqlConn *sqlx.DB
sqlConnWrite *sqlx.DB
historyDB *historydb.HistoryDB
ctx context.Context ctx context.Context
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
@@ -78,34 +74,15 @@ type Node struct {
func NewNode(mode Mode, cfg *config.Node) (*Node, error) { func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
meddler.Debug = cfg.Debug.MeddlerLogs meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection // Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB( db, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite, cfg.PostgreSQL.Port,
cfg.PostgreSQL.HostWrite, cfg.PostgreSQL.Host,
cfg.PostgreSQL.UserWrite, cfg.PostgreSQL.User,
cfg.PostgreSQL.PasswordWrite, cfg.PostgreSQL.Password,
cfg.PostgreSQL.NameWrite, cfg.PostgreSQL.Name,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err)) return nil, tracerr.Wrap(err)
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
} }
var apiConnCon *dbUtils.APIConnectionController var apiConnCon *dbUtils.APIConnectionController
if cfg.API.Explorer || mode == ModeCoordinator { if cfg.API.Explorer || mode == ModeCoordinator {
@@ -115,7 +92,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
) )
} }
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon) historyDB := historydb.NewHistoryDB(db, apiConnCon)
ethClient, err := ethclient.Dial(cfg.Web3.URL) ethClient, err := ethclient.Dial(cfg.Web3.URL)
if err != nil { if err != nil {
@@ -126,8 +103,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
var keyStore *ethKeystore.KeyStore var keyStore *ethKeystore.KeyStore
if mode == ModeCoordinator { if mode == ModeCoordinator {
ethCfg = eth.EthereumConfig{ ethCfg = eth.EthereumConfig{
CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit, CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit,
GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv, GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv,
} }
scryptN := ethKeystore.StandardScryptN scryptN := ethKeystore.StandardScryptN
@@ -139,23 +116,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path, keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
scryptN, scryptP) scryptN, scryptP)
balance, err := ethClient.BalanceAt(context.TODO(), cfg.Coordinator.ForgerAddress, nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
minForgeBalance := cfg.Coordinator.MinimumForgeAddressBalance
if minForgeBalance != nil && balance.Cmp(minForgeBalance) == -1 {
return nil, tracerr.Wrap(fmt.Errorf(
"forger account balance is less than cfg.Coordinator.MinimumForgeAddressBalance: %v < %v",
balance.Int64(), minForgeBalance))
}
log.Infow("forger ethereum account balance",
"addr", cfg.Coordinator.ForgerAddress,
"balance", balance.Int64(),
"minForgeBalance", minForgeBalance.Int64(),
)
// Unlock Coordinator ForgerAddr in the keystore to make calls // Unlock Coordinator ForgerAddr in the keystore to make calls
// to ForgeBatch in the smart contract // to ForgeBatch in the smart contract
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) { if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
@@ -231,42 +191,19 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
initSCVars := sync.SCVars() initSCVars := sync.SCVars()
scConsts := common.SCConsts{ scConsts := synchronizer.SCConsts{
Rollup: *sync.RollupConstants(), Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(), Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(), WDelayer: *sync.WDelayerConstants(),
} }
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
return nil, tracerr.Wrap(err)
}
hdbConsts := historydb.Constants{
SCConsts: common.SCConsts{
Rollup: scConsts.Rollup,
Auction: scConsts.Auction,
WDelayer: scConsts.WDelayer,
},
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
}
if err := historyDB.SetConstants(&hdbConsts); err != nil {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := api.NewStateAPIUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
var l2DB *l2db.L2DB var l2DB *l2db.L2DB
if mode == ModeCoordinator { if mode == ModeCoordinator {
l2DB = l2db.NewL2DB( l2DB = l2db.NewL2DB(
dbRead, dbWrite, db,
cfg.Coordinator.L2DB.SafetyPeriod, cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration, cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon, apiConnCon,
) )
@@ -308,6 +245,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if err != nil {
return nil, tracerr.Wrap(err)
}
serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs)) serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs))
for i, serverProofCfg := range cfg.Coordinator.ServerProofs { for i, serverProofCfg := range cfg.Coordinator.ServerProofs {
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL, serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
@@ -359,16 +299,12 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks, ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc, L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration, ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration, SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts, EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce, EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration, EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice, MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration, TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath, DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
@@ -377,7 +313,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay, PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay, InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
}, },
ForgeBatchGasCost: cfg.Coordinator.EthClient.ForgeBatchGasCost,
VerifierIdx: uint8(verifierIdx), VerifierIdx: uint8(verifierIdx),
TxProcessorConfig: txProcessorCfg, TxProcessorConfig: txProcessorCfg,
}, },
@@ -388,7 +323,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
serverProofs, serverProofs,
client, client,
&scConsts, &scConsts,
initSCVars, &synchronizer.SCVariables{
Rollup: *initSCVars.Rollup,
Auction: *initSCVars.Auction,
WDelayer: *initSCVars.WDelayer,
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -396,11 +335,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
var nodeAPI *NodeAPI var nodeAPI *NodeAPI
if cfg.API.Address != "" { if cfg.API.Address != "" {
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
if cfg.API.UpdateMetricsInterval.Duration == 0 { if cfg.API.UpdateMetricsInterval.Duration == 0 {
return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v", return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
cfg.API.UpdateMetricsInterval.Duration)) cfg.API.UpdateMetricsInterval.Duration))
@@ -420,11 +354,22 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
coord, cfg.API.Explorer, coord, cfg.API.Explorer,
server, server,
historyDB, historyDB,
stateDB,
l2DB, l2DB,
&api.Config{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
} }
var debugAPI *debugapi.DebugAPI var debugAPI *debugapi.DebugAPI
if cfg.Debug.APIAddress != "" { if cfg.Debug.APIAddress != "" {
@@ -437,7 +382,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
return &Node{ return &Node{
stateAPIUpdater: stateAPIUpdater,
nodeAPI: nodeAPI, nodeAPI: nodeAPI,
debugAPI: debugAPI, debugAPI: debugAPI,
priceUpdater: priceUpdater, priceUpdater: priceUpdater,
@@ -445,130 +389,12 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
sync: sync, sync: sync,
cfg: cfg, cfg: cfg,
mode: mode, mode: mode,
sqlConnRead: dbRead, sqlConn: db,
sqlConnWrite: dbWrite,
historyDB: historyDB,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
}, nil }, nil
} }
// APIServer is a server that only runs the API
type APIServer struct {
nodeAPI *NodeAPI
mode Mode
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
}
// NewAPIServer creates a new APIServer
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
apiConnCon := dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
dbRead, dbWrite,
0,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
0,
apiConnCon,
)
}
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
server := gin.Default()
coord := false
if mode == ModeCoordinator {
coord = cfg.Coordinator.API.Coordinator
}
nodeAPI, err := NewNodeAPI(
cfg.API.Address,
coord, cfg.API.Explorer,
server,
historyDB,
l2DB,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
return &APIServer{
nodeAPI: nodeAPI,
mode: mode,
ctx: ctx,
cancel: cancel,
}, nil
}
// Start the APIServer
func (s *APIServer) Start() {
log.Infow("Starting api server...", "mode", s.mode)
log.Info("Starting NodeAPI...")
s.wg.Add(1)
go func() {
defer func() {
log.Info("NodeAPI routine stopped")
s.wg.Done()
}()
if err := s.nodeAPI.Run(s.ctx); err != nil {
if s.ctx.Err() != nil {
return
}
log.Fatalw("NodeAPI.Run", "err", err)
}
}()
}
// Stop the APIServer
func (s *APIServer) Stop() {
log.Infow("Stopping NodeAPI...")
s.cancel()
s.wg.Wait()
}
// NodeAPI holds the node http API // NodeAPI holds the node http API
type NodeAPI struct { //nolint:golint type NodeAPI struct { //nolint:golint
api *api.API api *api.API
@@ -588,7 +414,9 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints bool, coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *api.Config,
) (*NodeAPI, error) { ) (*NodeAPI, error) {
engine := gin.Default() engine := gin.Default()
engine.NoRoute(handleNoRoute) engine.NoRoute(handleNoRoute)
@@ -597,7 +425,9 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints, coordinatorEndpoints, explorerEndpoints,
engine, engine,
hdb, hdb,
sdb,
l2db, l2db,
config,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -613,20 +443,16 @@ func NewNodeAPI(
// cancelation. // cancelation.
func (a *NodeAPI) Run(ctx context.Context) error { func (a *NodeAPI) Run(ctx context.Context) error {
server := &http.Server{ server := &http.Server{
Addr: a.addr,
Handler: a.engine, Handler: a.engine,
// TODO: Figure out best parameters for production // TODO: Figure out best parameters for production
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", a.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("NodeAPI is ready at %v", a.addr)
go func() { go func() {
if err := server.Serve(listener); err != nil && log.Infof("NodeAPI is ready at %v", a.addr)
tracerr.Unwrap(err) != http.ErrServerClosed { if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()
@@ -642,57 +468,64 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil return nil
} }
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr, func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr,
batches []common.BatchData) error { batches []common.BatchData) {
if n.mode == ModeCoordinator { if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{ n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats, Stats: *stats,
Vars: *vars, Vars: vars,
Batches: batches, Batches: batches,
}) })
} }
n.stateAPIUpdater.SetSCVars(vars) if n.nodeAPI != nil {
if vars.Rollup != nil {
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
}
if vars.Auction != nil {
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
}
if vars.WDelayer != nil {
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
}
if stats.Synced() { if stats.Synced() {
if err := n.stateAPIUpdater.UpdateNetworkInfo( if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum), common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum, stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil { ); err != nil {
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err) log.Errorw("API.UpdateNetworkInfo", "err", err)
} }
} else { } else {
n.stateAPIUpdater.UpdateNetworkInfoBlock( n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
) )
} }
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
} }
return nil
} }
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
vars *common.SCVariables) error {
if n.mode == ModeCoordinator { if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{ n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats, Stats: *stats,
Vars: *vars.AsPtr(), Vars: vars,
}) })
} }
n.stateAPIUpdater.SetSCVars(vars.AsPtr()) if n.nodeAPI != nil {
n.stateAPIUpdater.UpdateNetworkInfoBlock( vars := n.sync.SCVars()
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
) )
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
} }
return nil
} }
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we // TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
// don't have to pass it around. // don't have to pass it around.
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) { func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
blockData, discarded, err := n.sync.Sync(ctx, lastBlock) blockData, discarded, err := n.sync.Sync2(ctx, lastBlock)
stats := n.sync.Stats() stats := n.sync.Stats()
if err != nil { if err != nil {
// case: error // case: error
@@ -701,20 +534,16 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg // case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded) log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars() vars := n.sync.SCVars()
if err := n.handleReorg(ctx, stats, vars); err != nil { n.handleReorg(ctx, stats, vars)
return nil, time.Duration(0), tracerr.Wrap(err)
}
return nil, time.Duration(0), nil return nil, time.Duration(0), nil
} else if blockData != nil { } else if blockData != nil {
// case: new block // case: new block
vars := common.SCVariablesPtr{ vars := synchronizer.SCVariablesPtr{
Rollup: blockData.Rollup.Vars, Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars, Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars, WDelayer: blockData.WDelayer.Vars,
} }
if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil { n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
return nil, time.Duration(0), tracerr.Wrap(err)
}
return &blockData.Block, time.Duration(0), nil return &blockData.Block, time.Duration(0), nil
} else { } else {
// case: no block // case: no block
@@ -733,9 +562,7 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized // the last synced one) is synchronized
stats := n.sync.Stats() stats := n.sync.Stats()
vars := n.sync.SCVars() vars := n.sync.SCVars()
if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil { n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
log.Fatalw("Node.handleNewBlock", "err", err)
}
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
@@ -756,8 +583,6 @@ func (n *Node) StartSynchronizer() {
} }
if errors.Is(err, eth.ErrBlockHashMismatchEvent) { if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
log.Warnw("Synchronizer.Sync", "err", err) log.Warnw("Synchronizer.Sync", "err", err)
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
log.Warnw("Synchronizer.Sync", "err", err)
} else { } else {
log.Errorw("Synchronizer.Sync", "err", err) log.Errorw("Synchronizer.Sync", "err", err)
} }
@@ -821,26 +646,15 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
log.Info("ApiStateUpdater.UpdateMetrics loop done") log.Info("API.UpdateMetrics loop done")
n.wg.Done() n.wg.Done()
return return
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration): case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil { if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err) log.Errorw("API.UpdateMetrics", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
} }
} }
@@ -848,26 +662,15 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done") log.Info("API.UpdateRecommendedFee loop done")
n.wg.Done() n.wg.Done()
return return
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration): case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil { if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err) log.Errorw("API.UpdateRecommendedFee", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
} }
} }

View File

@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
assert.NoError(t, err) assert.NoError(t, err)
historyDB := historydb.NewHistoryDB(db, db, nil) historyDB := historydb.NewHistoryDB(db, nil)
// Clean DB // Clean DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
// Populate DB // Populate DB

View File

@@ -1,44 +0,0 @@
package synchronizer
import "github.com/prometheus/client_golang/prometheus"
var (
metricReorgsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "sync_reorgs",
Help: "",
},
)
metricSyncedLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_block_num",
Help: "",
},
)
metricEthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_block_num",
Help: "",
},
)
metricSyncedLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_batch_num",
Help: "",
},
)
metricEthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_batch_num",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricReorgsCount)
prometheus.MustRegister(metricSyncedLastBlockNum)
prometheus.MustRegister(metricEthLastBlockNum)
prometheus.MustRegister(metricSyncedLastBatchNum)
prometheus.MustRegister(metricEthLastBatchNum)
}

View File

@@ -18,19 +18,6 @@ import (
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
const (
// errStrUnknownBlock is the string returned by geth when querying an
// unknown block
errStrUnknownBlock = "unknown block"
)
var (
// ErrUnknownBlock is the error returned by the Synchronizer when a
// block is queried by hash but the ethereum node doesn't find it due
// to it being discarded from a reorg.
ErrUnknownBlock = fmt.Errorf("unknown block")
)
// Stats of the syncrhonizer // Stats of the syncrhonizer
type Stats struct { type Stats struct {
Eth struct { Eth struct {
@@ -183,26 +170,26 @@ type StartBlockNums struct {
} }
// SCVariables joins all the smart contract variables in a single struct // SCVariables joins all the smart contract variables in a single struct
// type SCVariables struct { type SCVariables struct {
// Rollup common.RollupVariables `validate:"required"` Rollup common.RollupVariables `validate:"required"`
// Auction common.AuctionVariables `validate:"required"` Auction common.AuctionVariables `validate:"required"`
// WDelayer common.WDelayerVariables `validate:"required"` WDelayer common.WDelayerVariables `validate:"required"`
// } }
//
// // SCVariablesPtr joins all the smart contract variables as pointers in a single // SCVariablesPtr joins all the smart contract variables as pointers in a single
// // struct // struct
// type SCVariablesPtr struct { type SCVariablesPtr struct {
// Rollup *common.RollupVariables `validate:"required"` Rollup *common.RollupVariables `validate:"required"`
// Auction *common.AuctionVariables `validate:"required"` Auction *common.AuctionVariables `validate:"required"`
// WDelayer *common.WDelayerVariables `validate:"required"` WDelayer *common.WDelayerVariables `validate:"required"`
// } }
//
// // SCConsts joins all the smart contract constants in a single struct // SCConsts joins all the smart contract constants in a single struct
// type SCConsts struct { type SCConsts struct {
// Rollup common.RollupConstants Rollup common.RollupConstants
// Auction common.AuctionConstants Auction common.AuctionConstants
// WDelayer common.WDelayerConstants WDelayer common.WDelayerConstants
// } }
// Config is the Synchronizer configuration // Config is the Synchronizer configuration
type Config struct { type Config struct {
@@ -213,13 +200,13 @@ type Config struct {
// Synchronizer implements the Synchronizer type // Synchronizer implements the Synchronizer type
type Synchronizer struct { type Synchronizer struct {
ethClient eth.ClientInterface ethClient eth.ClientInterface
consts common.SCConsts consts SCConsts
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
stateDB *statedb.StateDB stateDB *statedb.StateDB
cfg Config cfg Config
initVars common.SCVariables initVars SCVariables
startBlockNum int64 startBlockNum int64
vars common.SCVariables vars SCVariables
stats *StatsHolder stats *StatsHolder
resetStateFailed bool resetStateFailed bool
} }
@@ -242,7 +229,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w", return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
err)) err))
} }
consts := common.SCConsts{ consts := SCConsts{
Rollup: *rollupConstants, Rollup: *rollupConstants,
Auction: *auctionConstants, Auction: *auctionConstants,
WDelayer: *wDelayerConstants, WDelayer: *wDelayerConstants,
@@ -307,11 +294,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
} }
// SCVars returns a copy of the Smart Contract Variables // SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() *common.SCVariables { func (s *Synchronizer) SCVars() SCVariablesPtr {
return &common.SCVariables{ return SCVariablesPtr{
Rollup: *s.vars.Rollup.Copy(), Rollup: s.vars.Rollup.Copy(),
Auction: *s.vars.Auction.Copy(), Auction: s.vars.Auction.Copy(),
WDelayer: *s.vars.WDelayer.Copy(), WDelayer: s.vars.WDelayer.Copy(),
} }
} }
@@ -503,13 +490,13 @@ func (s *Synchronizer) resetIntermediateState() error {
return nil return nil
} }
// Sync attems to synchronize an ethereum block starting from lastSavedBlock. // Sync2 attems to synchronize an ethereum block starting from lastSavedBlock.
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB. // If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
// If a block is synched, it will be returned and also stored in the DB. If a // If a block is synched, it will be returned and also stored in the DB. If a
// reorg is detected, the number of discarded blocks will be returned and no // reorg is detected, the number of discarded blocks will be returned and no
// synchronization will be made. // synchronization will be made.
// TODO: Be smart about locking: only lock during the read/write operations // TODO: Be smart about locking: only lock during the read/write operations
func (s *Synchronizer) Sync(ctx context.Context, func (s *Synchronizer) Sync2(ctx context.Context,
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) { lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
if s.resetStateFailed { if s.resetStateFailed {
if err := s.resetIntermediateState(); err != nil { if err := s.resetIntermediateState(); err != nil {
@@ -661,22 +648,18 @@ func (s *Synchronizer) Sync(ctx context.Context,
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlockNum", s.stats.Eth.LastBlock.Num,
)
for _, batchData := range rollupData.Batches { for _, batchData := range rollupData.Batches {
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
log.Debugw("Synced batch", log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum, "syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatchNum,
) )
} }
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlockNum", s.stats.Eth.LastBlock.Num,
)
return blockData, nil, nil return blockData, nil, nil
} }
@@ -724,7 +707,7 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
} }
func getInitialVariables(ethClient eth.ClientInterface, func getInitialVariables(ethClient eth.ClientInterface,
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) { consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err)) return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
@@ -740,7 +723,7 @@ func getInitialVariables(ethClient eth.ClientInterface,
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
wDelayerVars := wDelayerInit.WDelayerVariables() wDelayerVars := wDelayerInit.WDelayerVariables()
return &common.SCVariables{ return &SCVariables{
Rollup: *rollupVars, Rollup: *rollupVars,
Auction: *auctionVars, Auction: *auctionVars,
WDelayer: *wDelayerVars, WDelayer: *wDelayerVars,
@@ -828,9 +811,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// Get rollup events in the block, and make sure the block hash matches // Get rollup events in the block, and make sure the block hash matches
// the expected one. // the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash) rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
@@ -997,19 +978,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
} }
batchData.CreatedAccounts = processTxsOut.CreatedAccounts batchData.CreatedAccounts = processTxsOut.CreatedAccounts
batchData.UpdatedAccounts = make([]common.AccountUpdate, 0,
len(processTxsOut.UpdatedAccounts))
for _, acc := range processTxsOut.UpdatedAccounts {
batchData.UpdatedAccounts = append(batchData.UpdatedAccounts,
common.AccountUpdate{
EthBlockNum: blockNum,
BatchNum: batchNum,
Idx: acc.Idx,
Nonce: acc.Nonce,
Balance: acc.Balance,
})
}
slotNum := int64(0) slotNum := int64(0)
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum { if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) / slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
@@ -1153,9 +1121,7 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
// Get auction events in the block // Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash) auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
@@ -1252,9 +1218,7 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
// Get wDelayer events in the block // Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash) wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
} }
// No events in this block // No events in this block

View File

@@ -17,6 +17,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
@@ -171,8 +172,6 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
*exit = syncBatch.ExitTree[j] *exit = syncBatch.ExitTree[j]
} }
assert.Equal(t, batch.Batch, syncBatch.Batch) assert.Equal(t, batch.Batch, syncBatch.Batch)
// Ignore updated accounts
syncBatch.UpdatedAccounts = nil
assert.Equal(t, batch, syncBatch) assert.Equal(t, batch, syncBatch)
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
@@ -315,21 +314,13 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
historyDB := historydb.NewHistoryDB(db, db, nil) historyDB := historydb.NewHistoryDB(db, nil)
// Clear DB // Clear DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
return stateDB, historyDB return stateDB, historyDB
} }
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestSyncGeneral(t *testing.T) { func TestSyncGeneral(t *testing.T) {
// //
// Setup // Setup
@@ -348,6 +339,7 @@ func TestSyncGeneral(t *testing.T) {
s, err := NewSynchronizer(client, historyDB, stateDB, Config{ s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
}) })
log.Error(err)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@@ -359,7 +351,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
// Test Sync for rollup genesis block // Test Sync for rollup genesis block
syncBlock, discards, err := s.Sync(ctx, nil) syncBlock, discards, err := s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -372,9 +364,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num) assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num) assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
vars := s.SCVars() vars := s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
dbBlocks, err := s.historyDB.GetAllBlocks() dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err) require.NoError(t, err)
@@ -382,7 +374,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), dbBlocks[1].Num) assert.Equal(t, int64(1), dbBlocks[1].Num)
// Sync again and expect no new blocks // Sync again and expect no new blocks
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.Nil(t, syncBlock) require.Nil(t, syncBlock)
@@ -442,22 +434,12 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs)) require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
// blocks 1 (blockNum=3) // blocks 1 (blockNum=3)
i = 1 i = 1
require.Equal(t, 3, int(blocks[i].Block.Num)) require.Equal(t, 3, int(blocks[i].Block.Num))
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs)) require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
// Generate extra required data // Generate extra required data
ethAddTokens(blocks, client) ethAddTokens(blocks, client)
@@ -479,7 +461,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 2 // Block 2
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -496,7 +478,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 3 // Block 3
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
@@ -520,7 +502,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -533,9 +515,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num) assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num) assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
dbExits, err := s.historyDB.GetAllExits() dbExits, err := s.historyDB.GetAllExits()
require.NoError(t, err) require.NoError(t, err)
@@ -571,7 +553,7 @@ func TestSyncGeneral(t *testing.T) {
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -632,12 +614,6 @@ func TestSyncGeneral(t *testing.T) {
blocks, err = tc.GenerateBlocks(set2) blocks, err = tc.GenerateBlocks(set2)
require.NoError(t, err) require.NoError(t, err)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
client.CtlRollback() client.CtlRollback()
} }
@@ -656,7 +632,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// First sync detects the reorg and discards 4 blocks // First sync detects the reorg and discards 4 blocks
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
expetedDiscards := int64(4) expetedDiscards := int64(4)
require.Equal(t, &expetedDiscards, discards) require.Equal(t, &expetedDiscards, discards)
@@ -665,9 +641,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num) assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
// At this point, the DB only has data up to block 1 // At this point, the DB only has data up to block 1
dbBlock, err := s.historyDB.GetLastBlock() dbBlock, err := s.historyDB.GetLastBlock()
@@ -684,7 +660,7 @@ func TestSyncGeneral(t *testing.T) {
// Sync blocks 2-6 // Sync blocks 2-6
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
syncBlock, discards, err = s.Sync(ctx, nil) syncBlock, discards, err = s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -704,9 +680,9 @@ func TestSyncGeneral(t *testing.T) {
} }
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
} }
dbBlock, err = s.historyDB.GetLastBlock() dbBlock, err = s.historyDB.GetLastBlock()
@@ -807,7 +783,7 @@ func TestSyncForgerCommitment(t *testing.T) {
// be in sync // be in sync
for { for {
syncBlock, discards, err := s.Sync(ctx, nil) syncBlock, discards, err := s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -826,7 +802,7 @@ func TestSyncForgerCommitment(t *testing.T) {
err = client.CtlAddBlocks([]common.BlockData{block}) err = client.CtlAddBlocks([]common.BlockData{block})
require.NoError(t, err) require.NoError(t, err)
syncBlock, discards, err := s.Sync(ctx, nil) syncBlock, discards, err := s.Sync2(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {

View File

@@ -2,7 +2,6 @@ package debugapi
import ( import (
"context" "context"
"net"
"net/http" "net/http"
"time" "time"
@@ -13,7 +12,6 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
func handleNoRoute(c *gin.Context) { func handleNoRoute(c *gin.Context) {
@@ -109,8 +107,6 @@ func (a *DebugAPI) Run(ctx context.Context) error {
api.Use(cors.Default()) api.Use(cors.Default())
debugAPI := api.Group("/debug") debugAPI := api.Group("/debug")
debugAPI.GET("/metrics", gin.WrapH(promhttp.Handler()))
debugAPI.GET("sdb/batchnum", a.handleCurrentBatch) debugAPI.GET("sdb/batchnum", a.handleCurrentBatch)
debugAPI.GET("sdb/mtroot", a.handleMTRoot) debugAPI.GET("sdb/mtroot", a.handleMTRoot)
// Accounts returned by these endpoints will always have BatchNum = 0, // Accounts returned by these endpoints will always have BatchNum = 0,
@@ -122,20 +118,16 @@ func (a *DebugAPI) Run(ctx context.Context) error {
debugAPI.GET("sync/stats", a.handleSyncStats) debugAPI.GET("sync/stats", a.handleSyncStats)
debugAPIServer := &http.Server{ debugAPIServer := &http.Server{
Addr: a.addr,
Handler: api, Handler: api,
// Use some hardcoded numbers that are suitable for testing // Use some hardcoded numberes that are suitable for testing
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", a.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("DebugAPI is ready at %v", a.addr)
go func() { go func() {
if err := debugAPIServer.Serve(listener); err != nil && log.Infof("DebugAPI is ready at %v", a.addr)
tracerr.Unwrap(err) != http.ErrServerClosed { if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()

View File

@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
cpy := c.nextBlock().copy() cpy := c.nextBlock().copy()
defer func() { c.revertIfErr(err, cpy) }() defer func() { c.revertIfErr(err, cpy) }()
_, err = common.NewFloat40(amount) _, err = common.NewFloat16(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
_, err = common.NewFloat40(depositAmount) _, err = common.NewFloat16(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@@ -146,7 +145,7 @@ const longWaitDuration = 999 * time.Hour
// const provingDuration = 2 * time.Second // const provingDuration = 2 * time.Second
func (s *Mock) runProver(ctx context.Context) { func (s *Mock) runProver(ctx context.Context) {
timer := time.NewTimer(longWaitDuration) waitDuration := longWaitDuration
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -154,27 +153,21 @@ func (s *Mock) runProver(ctx context.Context) {
case msg := <-s.msgCh: case msg := <-s.msgCh:
switch msg.value { switch msg.value {
case "cancel": case "cancel":
if !timer.Stop() { waitDuration = longWaitDuration
<-timer.C
}
timer.Reset(longWaitDuration)
s.Lock() s.Lock()
if !s.status.IsReady() { if !s.status.IsReady() {
s.status = prover.StatusCodeAborted s.status = prover.StatusCodeAborted
} }
s.Unlock() s.Unlock()
case "prove": case "prove":
if !timer.Stop() { waitDuration = s.provingDuration
<-timer.C
}
timer.Reset(s.provingDuration)
s.Lock() s.Lock()
s.status = prover.StatusCodeBusy s.status = prover.StatusCodeBusy
s.Unlock() s.Unlock()
} }
msg.ackCh <- true msg.ackCh <- true
case <-timer.C: case <-time.After(waitDuration):
timer.Reset(longWaitDuration) waitDuration = longWaitDuration
s.Lock() s.Lock()
if s.status != prover.StatusCodeBusy { if s.status != prover.StatusCodeBusy {
s.Unlock() s.Unlock()
@@ -209,20 +202,16 @@ func (s *Mock) Run(ctx context.Context) error {
apiGroup.POST("/cancel", s.handleCancel) apiGroup.POST("/cancel", s.handleCancel)
debugAPIServer := &http.Server{ debugAPIServer := &http.Server{
Addr: s.addr,
Handler: api, Handler: api,
// Use some hardcoded numberes that are suitable for testing // Use some hardcoded numberes that are suitable for testing
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", s.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("prover.MockServer is ready at %v", s.addr)
go func() { go func() {
if err := debugAPIServer.Serve(listener); err != nil && log.Infof("prover.MockServer is ready at %v", s.addr)
tracerr.Unwrap(err) != http.ErrServerClosed { if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()

View File

@@ -142,12 +142,10 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
// same values than in the js test // same values than in the js test
users = GenerateJsUsers(t) users = GenerateJsUsers(t)
depositAmount, err := common.Float40(10400).BigInt()
require.Nil(t, err)
l1UserTxs = []common.L1Tx{ l1UserTxs = []common.L1Tx{
{ {
FromIdx: 0, FromIdx: 0,
DepositAmount: depositAmount, DepositAmount: big.NewInt(16000000),
Amount: big.NewInt(0), Amount: big.NewInt(0),
TokenID: 1, TokenID: 1,
FromBJJ: users[0].BJJ.Public().Compress(), FromBJJ: users[0].BJJ.Public().Compress(),

View File

@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
}) })
} }
hdb := historydb.NewHistoryDB(db, db, nil) hdb := historydb.NewHistoryDB(db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{ assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1, Num: 1,
})) }))
@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB") dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)
@@ -311,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
h, err := zki.HashGlobalData() h, err := zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String()) assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)
// batch3 // batch3
@@ -334,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
h, err = zki.HashGlobalData() h, err = zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String()) assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String())
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0]) assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
assert.Equal(t, "0", zki.EthAddr3[1].String()) assert.Equal(t, "0", zki.EthAddr3[1].String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)

View File

@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
os.Exit(exitVal) os.Exit(exitVal)
} }
const MaxTx = 352 const MaxTx = 376
const NLevels = 32 const NLevels = 32
const MaxL1Tx = 256 const MaxL1Tx = 256
const MaxFeeTx = 64 const MaxFeeTx = 64
@@ -61,7 +61,6 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
return return
} }
log.Infof("sending proof to %s", proofServerURL)
// Store zkinputs json for debugging purposes // Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(zki) zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -27,20 +27,15 @@ type TxProcessor struct {
// AccumulatedFees contains the accumulated fees for each token (Coord // AccumulatedFees contains the accumulated fees for each token (Coord
// Idx) in the processed batch // Idx) in the processed batch
AccumulatedFees map[common.Idx]*big.Int AccumulatedFees map[common.Idx]*big.Int
// updatedAccounts stores the last version of the account when it has
// been created/updated by any of the processed transactions.
updatedAccounts map[common.Idx]*common.Account
config Config config Config
} }
// Config contains the TxProcessor configuration parameters // Config contains the TxProcessor configuration parameters
type Config struct { type Config struct {
NLevels uint32 NLevels uint32
// MaxFeeTx is the maximum number of coordinator accounts that can receive fees
MaxFeeTx uint32 MaxFeeTx uint32
MaxTx uint32 MaxTx uint32
MaxL1Tx uint32 MaxL1Tx uint32
// ChainID of the blockchain
ChainID uint16 ChainID uint16
} }
@@ -58,9 +53,6 @@ type ProcessTxOutput struct {
CreatedAccounts []common.Account CreatedAccounts []common.Account
CoordinatorIdxsMap map[common.TokenID]common.Idx CoordinatorIdxsMap map[common.TokenID]common.Idx
CollectedFees map[common.TokenID]*big.Int CollectedFees map[common.TokenID]*big.Int
// UpdatedAccounts returns the current state of each account
// created/updated by any of the processed transactions.
UpdatedAccounts map[common.Idx]*common.Account
} }
func newErrorNotEnoughBalance(tx common.Tx) error { func newErrorNotEnoughBalance(tx common.Tx) error {
@@ -133,10 +125,6 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx)) return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx))
} }
if tp.s.Type() == statedb.TypeSynchronizer {
tp.updatedAccounts = make(map[common.Idx]*common.Account)
}
exits := make([]processedExit, nTx) exits := make([]processedExit, nTx)
if tp.s.Type() == statedb.TypeBatchBuilder { if tp.s.Type() == statedb.TypeBatchBuilder {
@@ -208,7 +196,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
} }
} }
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder { if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if exitIdx != nil && exitTree != nil && exitAccount != nil { if exitIdx != nil && exitTree != nil {
exits[tp.i] = processedExit{ exits[tp.i] = processedExit{
exit: true, exit: true,
newExit: newExit, newExit: newExit,
@@ -392,7 +380,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr) tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr)
} }
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee) accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
pFee, err := tp.updateAccount(idx, accCoord) pFee, err := tp.s.UpdateAccount(idx, accCoord)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -417,7 +405,6 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
return nil, nil return nil, nil
} }
if tp.s.Type() == statedb.TypeSynchronizer {
// once all txs processed (exitTree root frozen), for each Exit, // once all txs processed (exitTree root frozen), for each Exit,
// generate common.ExitInfo data // generate common.ExitInfo data
var exitInfos []common.ExitInfo var exitInfos []common.ExitInfo
@@ -449,7 +436,8 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
} }
} }
// retun exitInfos, createdAccounts and collectedFees, so Synchronizer will if tp.s.Type() == statedb.TypeSynchronizer {
// retuTypeexitInfos, createdAccounts and collectedFees, so Synchronizer will
// be able to store it into HistoryDB for the concrete BatchNum // be able to store it into HistoryDB for the concrete BatchNum
return &ProcessTxOutput{ return &ProcessTxOutput{
ZKInputs: nil, ZKInputs: nil,
@@ -457,7 +445,6 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
CreatedAccounts: createdAccounts, CreatedAccounts: createdAccounts,
CoordinatorIdxsMap: coordIdxsMap, CoordinatorIdxsMap: coordIdxsMap,
CollectedFees: collectedFees, CollectedFees: collectedFees,
UpdatedAccounts: tp.updatedAccounts,
}, nil }, nil
} }
@@ -514,11 +501,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
tp.zki.OnChain[tp.i] = big.NewInt(1) tp.zki.OnChain[tp.i] = big.NewInt(1)
// L1Txs // L1Txs
depositAmountF40, err := common.NewFloat40(tx.DepositAmount) depositAmountF16, err := common.NewFloat16(tx.DepositAmount)
if err != nil { if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err) return nil, nil, false, nil, tracerr.Wrap(err)
} }
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40)) tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16))
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr) tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
if tx.FromBJJ != common.EmptyBJJComp { if tx.FromBJJ != common.EmptyBJJComp {
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ) tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
@@ -528,20 +515,6 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
tp.zki.ISOnChain[tp.i] = big.NewInt(1) tp.zki.ISOnChain[tp.i] = big.NewInt(1)
} }
if tx.Type == common.TxTypeForceTransfer ||
tx.Type == common.TxTypeDepositTransfer ||
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
tx.Type == common.TxTypeForceExit {
// in the cases where at L1Tx there is usage of the
// Amount parameter, add it at the ZKInputs.AmountF
// slot
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
}
} }
switch tx.Type { switch tx.Type {
@@ -605,7 +578,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
// execute exit flow // execute exit flow
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees // coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
exitAccount, newExit, err := tp.applyExit(nil, nil, exitTree, tx.Tx(), tx.Amount) exitAccount, newExit, err := tp.applyExit(nil, nil, exitTree, tx.Tx())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, nil, false, nil, tracerr.Wrap(err) return nil, nil, false, nil, tracerr.Wrap(err)
@@ -684,11 +657,6 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr) tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
tp.zki.OnChain[tp.i] = big.NewInt(0) tp.zki.OnChain[tp.i] = big.NewInt(0)
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
tp.zki.NewAccount[tp.i] = big.NewInt(0) tp.zki.NewAccount[tp.i] = big.NewInt(0)
// L2Txs // L2Txs
@@ -730,7 +698,7 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
} }
case common.TxTypeExit: case common.TxTypeExit:
// execute exit flow // execute exit flow
exitAccount, newExit, err := tp.applyExit(coordIdxsMap, collectedFees, exitTree, tx.Tx(), tx.Amount) exitAccount, newExit, err := tp.applyExit(coordIdxsMap, collectedFees, exitTree, tx.Tx())
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, nil, false, tracerr.Wrap(err) return nil, nil, false, tracerr.Wrap(err)
@@ -752,7 +720,7 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
EthAddr: tx.FromEthAddr, EthAddr: tx.FromEthAddr,
} }
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), account) p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), account)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -787,28 +755,6 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1) return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1)
} }
// createAccount is a wrapper over the StateDB.CreateAccount method that also
// stores the created account in the updatedAccounts map in case the StateDB is
// of TypeSynchronizer
func (tp *TxProcessor) createAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
if tp.s.Type() == statedb.TypeSynchronizer {
account.Idx = idx
tp.updatedAccounts[idx] = account
}
return tp.s.CreateAccount(idx, account)
}
// updateAccount is a wrapper over the StateDB.UpdateAccount method that also
// stores the updated account in the updatedAccounts map in case the StateDB is
// of TypeSynchronizer
func (tp *TxProcessor) updateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
if tp.s.Type() == statedb.TypeSynchronizer {
account.Idx = idx
tp.updatedAccounts[idx] = account
}
return tp.s.UpdateAccount(idx, account)
}
// applyDeposit updates the balance in the account of the depositer, if // applyDeposit updates the balance in the account of the depositer, if
// andTransfer parameter is set to true, the method will also apply the // andTransfer parameter is set to true, the method will also apply the
// Transfer of the L1Tx/DepositTransfer // Transfer of the L1Tx/DepositTransfer
@@ -839,7 +785,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
} }
// update sender account in localStateDB // update sender account in localStateDB
p, err := tp.updateAccount(tx.FromIdx, accSender) p, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -876,7 +822,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
// update receiver account in localStateDB // update receiver account in localStateDB
p, err := tp.updateAccount(tx.ToIdx, accReceiver) p, err := tp.s.UpdateAccount(tx.ToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -959,7 +905,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
} }
// update sender account in localStateDB // update sender account in localStateDB
pSender, err := tp.updateAccount(tx.FromIdx, accSender) pSender, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -998,7 +944,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
// update receiver account in localStateDB // update receiver account in localStateDB
pReceiver, err := tp.updateAccount(auxToIdx, accReceiver) pReceiver, err := tp.s.UpdateAccount(auxToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1041,7 +987,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
} }
// create Account of the Sender // create Account of the Sender
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), accSender) p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1089,7 +1035,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
// update receiver account in localStateDB // update receiver account in localStateDB
p, err = tp.updateAccount(tx.ToIdx, accReceiver) p, err = tp.s.UpdateAccount(tx.ToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1104,7 +1050,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
// new Leaf in the ExitTree. // new Leaf in the ExitTree.
func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx, func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree, collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree,
tx common.Tx, originalAmount *big.Int) (*common.Account, bool, error) { tx common.Tx) (*common.Account, bool, error) {
// 0. subtract tx.Amount from current Account in StateMT // 0. subtract tx.Amount from current Account in StateMT
// add the tx.Amount into the Account (tx.FromIdx) in the ExitMT // add the tx.Amount into the Account (tx.FromIdx) in the ExitMT
acc, err := tp.s.GetAccount(tx.FromIdx) acc, err := tp.s.GetAccount(tx.FromIdx)
@@ -1163,7 +1109,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} }
} }
p, err := tp.updateAccount(tx.FromIdx, acc) p, err := tp.s.UpdateAccount(tx.FromIdx, acc)
if err != nil { if err != nil {
return nil, false, tracerr.Wrap(err) return nil, false, tracerr.Wrap(err)
} }
@@ -1174,21 +1120,6 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
if exitTree == nil { if exitTree == nil {
return nil, false, nil return nil, false, nil
} }
// Do not add the Exit when Amount=0, not EffectiveAmount=0. In
// txprocessor.applyExit function, the tx.Amount is in reality the
// EffectiveAmount, that's why is used here the originalAmount
// parameter, which contains the real value of the tx.Amount (not
// tx.EffectiveAmount). This is a particularity of the approach of the
// circuit, the idea will be in the future to update the circuit and
// when Amount>0 but EffectiveAmount=0, to not add the Exit in the
// Exits MerkleTree, but for the moment the Go code is adapted to the
// circuit.
if originalAmount.Cmp(big.NewInt(0)) == 0 { // Amount == 0
// if the Exit Amount==0, the Exit is not added to the ExitTree
return nil, false, nil
}
exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx) exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx)
if tracerr.Unwrap(err) == db.ErrNotFound { if tracerr.Unwrap(err) == db.ErrNotFound {
// 1a. if idx does not exist in exitTree: // 1a. if idx does not exist in exitTree:
@@ -1197,8 +1128,6 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
exitAccount := &common.Account{ exitAccount := &common.Account{
TokenID: acc.TokenID, TokenID: acc.TokenID,
Nonce: common.Nonce(0), Nonce: common.Nonce(0),
// as is a common.Tx, the tx.Amount is already an
// EffectiveAmount
Balance: tx.Amount, Balance: tx.Amount,
BJJ: acc.BJJ, BJJ: acc.BJJ,
EthAddr: acc.EthAddr, EthAddr: acc.EthAddr,
@@ -1212,9 +1141,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Sign2[tp.i] = big.NewInt(1) tp.zki.Sign2[tp.i] = big.NewInt(1)
} }
tp.zki.Ay2[tp.i] = accBJJY tp.zki.Ay2[tp.i] = accBJJY
// Balance2 contains the ExitLeaf Balance before the tp.zki.Balance2[tp.i] = tx.Amount
// leaf update, which is 0
tp.zki.Balance2[tp.i] = big.NewInt(0)
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr) tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
// as Leaf didn't exist in the ExitTree, set NewExit[i]=1 // as Leaf didn't exist in the ExitTree, set NewExit[i]=1
tp.zki.NewExit[tp.i] = big.NewInt(1) tp.zki.NewExit[tp.i] = big.NewInt(1)
@@ -1248,9 +1175,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Sign2[tp.i] = big.NewInt(1) tp.zki.Sign2[tp.i] = big.NewInt(1)
} }
tp.zki.Ay2[tp.i] = accBJJY tp.zki.Ay2[tp.i] = accBJJY
// Balance2 contains the ExitLeaf Balance before the leaf tp.zki.Balance2[tp.i] = tx.Amount
// update
tp.zki.Balance2[tp.i] = exitAccount.Balance
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr) tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
} }
@@ -1268,7 +1193,6 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} }
tp.zki.OldKey2[tp.i] = p.OldKey.BigInt() tp.zki.OldKey2[tp.i] = p.OldKey.BigInt()
tp.zki.OldValue2[tp.i] = p.OldValue.BigInt() tp.zki.OldValue2[tp.i] = p.OldValue.BigInt()
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
} }
return exitAccount, false, nil return exitAccount, false, nil

View File

@@ -1,10 +1,11 @@
package txprocessor package txprocessor
import ( import (
"encoding/binary"
"encoding/hex"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"os" "os"
"sort"
"testing" "testing"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
@@ -641,16 +642,17 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
users := txsets.GenerateJsUsers(t) users := txsets.GenerateJsUsers(t)
daMaxF40 := common.Float40(0xFFFFFFFFFF) daMaxHex, err := hex.DecodeString("FFFF")
daMaxBI, err := daMaxF40.BigInt()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String()) daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
daMaxBI := daMaxF16.BigInt()
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
daMax1F40 := common.Float40(0xFFFFFFFFFE) daMax1Hex, err := hex.DecodeString("FFFE")
require.NoError(t, err) require.NoError(t, err)
daMax1BI, err := daMax1F40.BigInt() daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
require.NoError(t, err) daMax1BI := daMax1F16.BigInt()
assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String()) assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
l1Txs := []common.L1Tx{ l1Txs := []common.L1Tx{
{ {
@@ -795,8 +797,7 @@ func TestMultipleCoordIdxForTokenID(t *testing.T) {
checkBalanceByIdx(t, tp.s, 259, "0") // Coord0 checkBalanceByIdx(t, tp.s, 259, "0") // Coord0
} }
func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOutput, func TestTwoExits(t *testing.T) {
[]*ProcessTxOutput, []*ProcessTxOutput) {
// In the first part we generate a batch with two force exits for the // In the first part we generate a batch with two force exits for the
// same account of 20 each. The txprocessor output should be a single // same account of 20 each. The txprocessor output should be a single
// exitInfo with balance of 40. // exitInfo with balance of 40.
@@ -804,9 +805,8 @@ func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOu
require.NoError(t, err) require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir)) defer assert.NoError(t, os.RemoveAll(dir))
nLevels := 16
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: stateDBType, NLevels: nLevels}) Type: statedb.TypeSynchronizer, NLevels: 32})
assert.NoError(t, err) assert.NoError(t, err)
chainID := uint16(1) chainID := uint16(1)
@@ -842,10 +842,10 @@ func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOu
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs)) require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
config := Config{ config := Config{
NLevels: uint32(nLevels), NLevels: 32,
MaxTx: 3, MaxFeeTx: 64,
MaxL1Tx: 2, MaxTx: 512,
MaxFeeTx: 2, MaxL1Tx: 16,
ChainID: chainID, ChainID: chainID,
} }
tp := NewTxProcessor(sdb, config) tp := NewTxProcessor(sdb, config)
@@ -858,6 +858,8 @@ func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOu
} }
} }
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
acc, err := sdb.GetAccount(256) acc, err := sdb.GetAccount(256)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, big.NewInt(60), acc.Balance) assert.Equal(t, big.NewInt(60), acc.Balance)
@@ -872,7 +874,7 @@ func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOu
defer assert.NoError(t, os.RemoveAll(dir2)) defer assert.NoError(t, os.RemoveAll(dir2))
sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128, sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128,
Type: stateDBType, NLevels: nLevels}) Type: statedb.TypeSynchronizer, NLevels: 32})
assert.NoError(t, err) assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx) tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
@@ -910,261 +912,5 @@ func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOu
} }
} }
// In the third part we start a fresh statedb and generate a batch with
// two force exit for the same account as before but where the 1st Exit
// is with all the amount, and the 2nd Exit is with more amount than
// the available balance. The txprocessor output should be a single
// exitInfo with balance of 40, and the exit merkle tree proof should
// be equal to the previous ones.
dir3, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir3))
sdb3, err := statedb.NewStateDB(statedb.Config{Path: dir3, Keep: 128,
Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
// Single exit with balance of both exits in previous set. The exit
// root should match.
set3 := `
Type: Blockchain
CreateAccountDeposit(0) A: 100
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
ForceExit(0) A: 40
ForceExit(0) A: 100
> batchL1 // freeze L1User{2}
> batchL1 // forge L1User{2}
> block
`
blocks, err = tc.GenerateBlocks(set3)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
tp = NewTxProcessor(sdb3, config)
ptOuts3 := []*ProcessTxOutput{}
for _, block := range blocks {
for _, batch := range block.Rollup.Batches {
ptOut, err := tp.ProcessTxs(nil, batch.L1UserTxs, nil, nil)
require.NoError(t, err)
ptOuts3 = append(ptOuts3, ptOut)
}
}
return ptOuts, ptOuts2, ptOuts3
}
func TestTwoExitsSynchronizer(t *testing.T) {
ptOuts, ptOuts2, ptOuts3 := testTwoExits(t, statedb.TypeSynchronizer)
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof) assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts3[3].ExitInfos[0].MerkleProof)
}
func TestExitOf0Amount(t *testing.T) {
// Test to check that when doing an Exit with amount 0 the Exit Root
// does not change (as there is no new Exit Leaf created)
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 32})
assert.NoError(t, err)
chainID := uint16(1)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
set := `
Type: Blockchain
CreateAccountDeposit(0) A: 100
CreateAccountDeposit(0) B: 100
> batchL1 // batch1: freeze L1User{2}
> batchL1 // batch2: forge L1User{2}
ForceExit(0) A: 10
ForceExit(0) B: 0
> batchL1 // batch3: freeze L1User{2}
> batchL1 // batch4: forge L1User{2}
ForceExit(0) A: 10
> batchL1 // batch5: freeze L1User{1}
> batchL1 // batch6: forge L1User{1}
ForceExit(0) A: 0
> batchL1 // batch7: freeze L1User{1}
> batchL1 // batch8: forge L1User{1}
> block
`
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
// Sanity check
require.Equal(t, 2, len(blocks[0].Rollup.Batches[1].L1UserTxs))
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
require.Equal(t, big.NewInt(10), blocks[0].Rollup.Batches[3].L1UserTxs[0].Amount)
require.Equal(t, big.NewInt(0), blocks[0].Rollup.Batches[3].L1UserTxs[1].Amount)
config := Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
ChainID: chainID,
}
tp := NewTxProcessor(sdb, config)
// For this test are only processed the batches with transactions:
// - Batch2, equivalent to Batches[1]
// - Batch4, equivalent to Batches[3]
// - Batch6, equivalent to Batches[5]
// - Batch8, equivalent to Batches[7]
// process Batch2:
_, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[1].L1UserTxs, nil, nil)
require.NoError(t, err)
// process Batch4:
ptOut, err := tp.ProcessTxs(nil, blocks[0].Rollup.Batches[3].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
exitRootBatch4 := ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String()
// process Batch6:
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[5].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
// Expect that the ExitRoot for the Batch6 will be equal than for the
// Batch4, as the Batch4 & Batch6 have the same tx with Exit Amount=10,
// and Batch4 has a 2nd tx with Exit Amount=0.
assert.Equal(t, exitRootBatch4, ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
// For the Batch8, as there is only 1 exit with Amount=0, the ExitRoot
// should be 0.
// process Batch8:
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[7].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
}
func TestUpdatedAccounts(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
assert.NoError(t, err)
set := `
Type: Blockchain
AddToken(1)
CreateAccountCoordinator(0) Coord // 256
CreateAccountCoordinator(1) Coord // 257
> batch // 1
CreateAccountDeposit(0) A: 50 // 258
CreateAccountDeposit(0) B: 60 // 259
CreateAccountDeposit(1) A: 70 // 260
CreateAccountDeposit(1) B: 80 // 261
> batchL1 // 2
> batchL1 // 3
Transfer(0) A-B: 5 (126)
> batch // 4
Exit(1) B: 5 (126)
> batch // 5
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "Coord",
}
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
tc.FillBlocksL1UserTxsBatchNum(blocks)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
require.Equal(t, 5, len(blocks[0].Rollup.Batches))
config := Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
ChainID: chainID,
}
tp := NewTxProcessor(sdb, config)
sortedKeys := func(m map[common.Idx]*common.Account) []int {
keys := make([]int, 0)
for k := range m {
keys = append(keys, int(k))
}
sort.Ints(keys)
return keys
}
for _, batch := range blocks[0].Rollup.Batches {
l2Txs := common.L2TxsToPoolL2Txs(batch.L2Txs)
ptOut, err := tp.ProcessTxs(batch.Batch.FeeIdxsCoordinator, batch.L1UserTxs,
batch.L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
switch batch.Batch.BatchNum {
case 1:
assert.Equal(t, 2, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{256, 257}, sortedKeys(ptOut.UpdatedAccounts))
case 2:
assert.Equal(t, 0, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{}, sortedKeys(ptOut.UpdatedAccounts))
case 3:
assert.Equal(t, 4, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{258, 259, 260, 261}, sortedKeys(ptOut.UpdatedAccounts))
case 4:
assert.Equal(t, 2+1, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{256, 258, 259}, sortedKeys(ptOut.UpdatedAccounts))
case 5:
assert.Equal(t, 1+1, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{257, 261}, sortedKeys(ptOut.UpdatedAccounts))
}
for idx, updAcc := range ptOut.UpdatedAccounts {
acc, err := sdb.GetAccount(idx)
require.NoError(t, err)
// If acc.Balance is 0, set it to 0 with big.NewInt so
// that the comparison succeeds. Without this, the
// comparison will not succeed because acc.Balance is
// set from a slice, and thus the internal big.Int
// buffer is not nil (big.Int.abs)
if acc.Balance.BitLen() == 0 {
acc.Balance = big.NewInt(0)
}
assert.Equal(t, acc, updAcc)
}
}
} }

File diff suppressed because one or more lines are too long

View File

@@ -1,53 +0,0 @@
package txselector
import "github.com/prometheus/client_golang/prometheus"
var (
metricGetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l2_txselecton_total",
Help: "",
},
)
metricGetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l1_l2_txselecton_total",
Help: "",
},
)
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_coordinator_txs",
Help: "",
},
)
metricSelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_user_txs",
Help: "",
},
)
metricSelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l2_txs",
Help: "",
},
)
metricDiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_discarded_l2_txs",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricGetL2TxSelection)
prometheus.MustRegister(metricGetL1L2TxSelection)
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
prometheus.MustRegister(metricSelectedL1UserTxs)
prometheus.MustRegister(metricSelectedL2Txs)
prometheus.MustRegister(metricDiscardedL2Txs)
}

View File

@@ -3,6 +3,7 @@ package txselector
// current: very simple version of TxSelector // current: very simple version of TxSelector
import ( import (
"bytes"
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
@@ -18,6 +19,19 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// txs implements the interface Sort for an array of Tx
type txs []common.PoolL2Tx
func (t txs) Len() int {
return len(t)
}
func (t txs) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t txs) Less(i, j int) bool {
return t[i].AbsoluteFee > t[j].AbsoluteFee
}
// CoordAccount contains the data of the Coordinator account, that will be used // CoordAccount contains the data of the Coordinator account, that will be used
// to create new transactions of CreateAccountDeposit type to add new TokenID // to create new transactions of CreateAccountDeposit type to add new TokenID
// accounts for the Coordinator to receive the fees. // accounts for the Coordinator to receive the fees.
@@ -133,11 +147,9 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]common.Idx, func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]common.Idx,
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL2TxSelection.Inc() coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, discardedL2Txs, err :=
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, txsel.GetL1L2TxSelection(selectionConfig, []common.L1Tx{})
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{}) return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err)
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err)
} }
// GetL1L2TxSelection returns the selection of L1 + L2 txs. // GetL1L2TxSelection returns the selection of L1 + L2 txs.
@@ -149,16 +161,6 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]c
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig, func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL1L2TxSelection.Inc()
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err)
}
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
// WIP.0: the TxSelector is not optimized and will need a redesign. The // WIP.0: the TxSelector is not optimized and will need a redesign. The
@@ -189,16 +191,14 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
} }
// discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch // discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch
var discardedL2Txs []common.PoolL2Tx
var l1CoordinatorTxs []common.L1Tx var l1CoordinatorTxs []common.L1Tx
positionL1 := len(l1UserTxs) positionL1 := len(l1UserTxs)
var accAuths [][]byte var accAuths [][]byte
// sort l2TxsRaw (cropping at MaxTx at this point) // sort l2TxsRaw (cropping at MaxTx at this point)
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx) l2Txs0 := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
for i := range discardedL2Txs {
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee"
}
noncesMap := make(map[common.Idx]common.Nonce) noncesMap := make(map[common.Idx]common.Nonce)
var l2Txs []common.PoolL2Tx var l2Txs []common.PoolL2Tx
@@ -232,8 +232,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of // discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " + l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator"
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs0[i]) discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue continue
} }
@@ -258,9 +257,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Amount with current Balance. Discard L2Tx, // not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to // and update Info parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String())
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -272,8 +269,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Nonce at tx. Discard L2Tx, and update Info // not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs // parameter of the tx, and add it to the discardedTxs
// array // array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -291,31 +287,18 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i]) len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil { if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err) log.Debug(err)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s", l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error())
err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
if l1CoordinatorTx != nil { if accAuth != nil && l1CoordinatorTx != nil {
// If ToEthAddr == 0xff.. this means that we
// are handling a TransferToBJJ, which doesn't
// require an authorization because it doesn't
// contain a valid ethereum address.
// Otherwise only create the account if we have
// the corresponding authorization
if validL2Tx.ToEthAddr == common.FFAddr {
accAuths = append(accAuths, common.EmptyEthSignature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
} else if accAuth != nil {
accAuths = append(accAuths, accAuth.Signature) accAuths = append(accAuths, accAuth.Signature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx) l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++ positionL1++
} }
}
if validL2Tx != nil { if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx) validTxs = append(validTxs, *validL2Tx)
} }
@@ -327,8 +310,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
"ToIdx", l2Txs[i].ToIdx) "ToIdx", l2Txs[i].ToIdx)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d",
"ToIdx: %d", l2Txs[i].ToIdx) l2Txs[i].ToIdx)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -340,9 +323,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
"does not correspond to the Account.EthAddr. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -356,9 +337,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
"does not correspond to the Account.BJJ. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -432,7 +411,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
log.Error(err) log.Error(err)
// Discard L2Tx, and update Info parameter of the tx, // Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array // and add it to the discardedTxs array
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error()) selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i]) discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
continue continue
} }
@@ -464,11 +443,6 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
} }
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL2Txs.Set(float64(len(finalL2Txs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
} }
@@ -493,7 +467,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
var l1CoordinatorTx *common.L1Tx var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth var accAuth *common.AccountCreationAuth
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr { if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) &&
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
// case: ToEthAddr != 0x00 neither 0xff // case: ToEthAddr != 0x00 neither 0xff
if l2Tx.ToBJJ != common.EmptyBJJComp { if l2Tx.ToBJJ != common.EmptyBJJComp {
// case: ToBJJ!=0: // case: ToBJJ!=0:
@@ -549,7 +524,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
DepositAmount: big.NewInt(0), DepositAmount: big.NewInt(0),
Type: common.TxTypeCreateAccountDeposit, Type: common.TxTypeCreateAccountDeposit,
} }
} else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp { } else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) &&
l2Tx.ToBJJ != common.EmptyBJJComp {
// if idx exist for EthAddr&BJJ use it // if idx exist for EthAddr&BJJ use it
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ, _, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
l2Tx.TokenID) l2Tx.TokenID)
@@ -575,8 +551,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
} }
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
// L2Tx discarded // L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " + return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx"))
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
} }
return &l2Tx, l1CoordinatorTx, accAuth, nil return &l2Tx, l1CoordinatorTx, accAuth, nil
@@ -585,7 +560,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ { for i := 0; i < len(l1CoordinatorTxs); i++ {
if l1CoordinatorTxs[i].FromEthAddr == addr && if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) &&
l1CoordinatorTxs[i].TokenID == tokenID && l1CoordinatorTxs[i].TokenID == tokenID &&
l1CoordinatorTxs[i].FromBJJ == bjj { l1CoordinatorTxs[i].FromBJJ == bjj {
return true return true
@@ -595,33 +570,21 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
} }
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce // getL2Profitable returns the profitable selection of L2Txssorted by Nonce
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx, func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx {
[]common.PoolL2Tx) { sort.Sort(txs(l2Txs))
// First sort by nonce so that txs from the same account are sorted so if len(l2Txs) < int(max) {
// that they could be applied in succession. return l2Txs
sort.Slice(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
// Sort by absolute fee with SliceStable, so that txs with same
// AbsoluteFee are not rearranged and nonce order is kept in such case
sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
})
discardedL2Txs := []common.PoolL2Tx{}
if len(l2Txs) > int(max) {
discardedL2Txs = l2Txs[max:]
l2Txs = l2Txs[:max]
} }
l2Txs = l2Txs[:max]
// sort l2Txs by Nonce. This can be done in many different ways, what // sort l2Txs by Nonce. This can be done in many different ways, what
// is needed is to output the l2Txs where the Nonce of l2Txs for each // is needed is to output the l2Txs where the Nonce of l2Txs for each
// Account is sorted, but the l2Txs can not be grouped by sender Account // Account is sorted, but the l2Txs can not be grouped by sender Account
// neither by Fee. This is because later on the Nonces will need to be // neither by Fee. This is because later on the Nonces will need to be
// sequential for the zkproof generation. // sequential for the zkproof generation.
sort.Slice(l2Txs, func(i, j int) bool { sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce return l2Txs[i].Nonce < l2Txs[j].Nonce
}) })
return l2Txs, discardedL2Txs return l2Txs
} }

View File

@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
@@ -48,7 +48,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil, AccountCreationAuth: nil,
} }
// fmt.Printf("%v\n", coordAccount) fmt.Printf("%v", coordAccount)
auth := common.AccountCreationAuth{ auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr, EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
@@ -106,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
}) })
} }
hdb := historydb.NewHistoryDB(db, db, nil) hdb := historydb.NewHistoryDB(db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{ assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1, Num: 1,
})) }))
@@ -424,9 +424,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr
expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit
expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr
// batch2 // batch2
// prepare the PoolL2Txs // prepare the PoolL2Txs
@@ -497,215 +497,3 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch()) err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err) require.NoError(t, err)
} }
func TestTransferToBjj(t *testing.T) {
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
CreateAccountDeposit(1) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
addTokens(t, tc, txsel.l2db.DB())
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 20,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to freeze L1UserTxs that will create some accounts with
// positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx.
batchPoolL2 := `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// Now the BJJ-only account for B is already created, so the transfer
// happens without an L1CoordTx that creates the user account.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx. Since it's a
// transfer of a token for which the coordinator doesn't have a fee
// account, another L1CoordTx will be created for the coordinator to
// receive the fees.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(1) B-A: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
// to receive the fees by the coordinator and another one for the
// recipient of the l2tx
assert.Equal(t, 2, len(oL1CoordTxs))
// [0] Coordinator account cration for token 1
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
// [1] User A BJJ-only account creation for token 1
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestTransferManyFromSameAccount(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// 8 transfers from the same account
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) A-B: 10 (126) // 1
PoolTransfer(0) A-B: 10 (126) // 2
PoolTransfer(0) A-B: 10 (126) // 3
PoolTransfer(0) A-B: 10 (126) // 4
PoolTransfer(0) A-B: 10 (126) // 5
PoolTransfer(0) A-B: 10 (126) // 6
PoolTransfer(0) A-B: 10 (126) // 7
PoolTransfer(0) A-B: 10 (126) // 8
PoolTransfer(0) A-B: 10 (126) // 9
PoolTransfer(0) A-B: 10 (126) // 10
PoolTransfer(0) A-B: 10 (126) // 11
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 11, len(poolL2Txs))
// reorder poolL2Txs so that nonces are not sorted
poolL2Txs[0], poolL2Txs[7] = poolL2Txs[7], poolL2Txs[0]
poolL2Txs[1], poolL2Txs[10] = poolL2Txs[10], poolL2Txs[1]
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 7, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}