mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
105 Commits
feature/sq
...
feature/se
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b330889570 | ||
|
|
a5ef822c64 | ||
|
|
5501f30062 | ||
|
|
d4f6926311 | ||
|
|
bfba1ba2d2 | ||
|
|
eed635539f | ||
|
|
87610f6188 | ||
|
|
4b596072d2 | ||
|
|
95c4019cb2 | ||
|
|
c4d5e8a7ab | ||
|
|
c1375d9c5f | ||
|
|
39b7882ef2 | ||
|
|
c7d0422c16 | ||
|
|
56ffea2190 | ||
|
|
cf70111de5 | ||
|
|
f664a3a382 | ||
|
|
cd1df6ea8c | ||
|
|
26e2bbc262 | ||
|
|
6da827c751 | ||
|
|
60023e4574 | ||
|
|
9de3a4ec6a | ||
|
|
bb4c464200 | ||
|
|
982899efed | ||
|
|
91c96eb429 | ||
|
|
70f874aaf1 | ||
|
|
54508b0ba6 | ||
|
|
0adcf1a2bc | ||
|
|
527cd9a2cc | ||
|
|
b269117a32 | ||
|
|
e14705c13b | ||
|
|
5ff0350f51 | ||
|
|
15fd3945dd | ||
|
|
83c256deda | ||
|
|
d50ae71710 | ||
|
|
ba108b1146 | ||
|
|
c771bdf94e | ||
|
|
3c68aa5014 | ||
|
|
4856251f01 | ||
|
|
4acfeb0ad9 | ||
|
|
400ab14f04 | ||
|
|
706e4c7a3d | ||
|
|
bf4acfad9f | ||
|
|
ba88b25425 | ||
|
|
df729b3b71 | ||
|
|
f8d01b5998 | ||
|
|
155e06484b | ||
|
|
5cdcae47b8 | ||
|
|
ed9bfdffa9 | ||
|
|
504e36ac47 | ||
|
|
ca53dc9b52 | ||
|
|
5b4cfac309 | ||
|
|
11a0707746 | ||
|
|
d16fba72a7 | ||
|
|
971b2c1d40 | ||
|
|
9d08ec6978 | ||
|
|
ffda9fa1ef | ||
|
|
5a11aa5c27 | ||
|
|
3e5e9bd633 | ||
|
|
c83047f527 | ||
|
|
bcd576480c | ||
|
|
35ea597ac4 | ||
|
|
8259aee884 | ||
|
|
72862147f3 | ||
|
|
3706ddb2fb | ||
|
|
df0cc32eed | ||
|
|
67b2b7da4b | ||
|
|
e23063380c | ||
|
|
ed4d39fcd1 | ||
|
|
d6ec1910da | ||
|
|
c829eb99dc | ||
|
|
6ecb8118bd | ||
|
|
4500820a03 | ||
|
|
b4e6104fd3 | ||
|
|
28f026f628 | ||
|
|
688d376ce0 | ||
|
|
2547d5dce7 | ||
|
|
bb8d81c3aa | ||
|
|
af6f114667 | ||
|
|
e2376980f8 | ||
|
|
264f01b572 | ||
|
|
a21793b2b0 | ||
|
|
7b01f6a288 | ||
|
|
f2e5800ebd | ||
|
|
f0e79f3d55 | ||
|
|
26fbeb5c68 | ||
|
|
05104b0565 | ||
|
|
53507edabb | ||
|
|
729966f854 | ||
|
|
1c10a01cf7 | ||
|
|
1d0abe438f | ||
|
|
6a01c0ac14 | ||
|
|
63151a285c | ||
|
|
52d4197330 | ||
|
|
ea63cba62a | ||
|
|
c7e6267189 | ||
|
|
2a77dac9c1 | ||
|
|
ac1fd9acf7 | ||
|
|
1bf29636db | ||
|
|
2bf3b843ed | ||
|
|
277a1bc321 | ||
|
|
3181c8738c | ||
|
|
62df063ccf | ||
|
|
48a538faa3 | ||
|
|
10a34c8801 | ||
|
|
6260dfedad |
@@ -4,6 +4,10 @@ Go implementation of the Hermez node.
|
|||||||
|
|
||||||
## Developing
|
## Developing
|
||||||
|
|
||||||
|
### Go version
|
||||||
|
|
||||||
|
The `hermez-node` has been tested with go version 1.14
|
||||||
|
|
||||||
### Unit testing
|
### Unit testing
|
||||||
|
|
||||||
Running the unit tests requires a connection to a PostgreSQL database. You can
|
Running the unit tests requires a connection to a PostgreSQL database. You can
|
||||||
@@ -11,7 +15,7 @@ start PostgreSQL with docker easily this way (where `yourpasswordhere` should
|
|||||||
be your password):
|
be your password):
|
||||||
|
|
||||||
```
|
```
|
||||||
POSTGRES_PASS=yourpasswordhere sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
|
POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
|
||||||
```
|
```
|
||||||
|
|
||||||
Afterwards, run the tests with the password as env var:
|
Afterwards, run the tests with the password as env var:
|
||||||
|
|||||||
@@ -4,10 +4,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/apitypes"
|
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
|
||||||
"github.com/hermeznetwork/tracerr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (a *API) getAccount(c *gin.Context) {
|
func (a *API) getAccount(c *gin.Context) {
|
||||||
@@ -23,16 +20,6 @@ func (a *API) getAccount(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get balance from stateDB
|
|
||||||
account, err := a.s.LastGetAccount(*idx)
|
|
||||||
if err != nil {
|
|
||||||
retSQLErr(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
apiAccount.Balance = apitypes.NewBigIntStr(account.Balance)
|
|
||||||
apiAccount.Nonce = account.Nonce
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, apiAccount)
|
c.JSON(http.StatusOK, apiAccount)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,26 +44,6 @@ func (a *API) getAccounts(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get balances from stateDB
|
|
||||||
if err := a.s.LastRead(func(sdb *statedb.Last) error {
|
|
||||||
for x, apiAccount := range apiAccounts {
|
|
||||||
idx, err := stringToIdx(string(apiAccount.Idx), "Account Idx")
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
account, err := sdb.GetAccount(*idx)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
apiAccounts[x].Balance = apitypes.NewBigIntStr(account.Balance)
|
|
||||||
apiAccounts[x].Nonce = account.Nonce
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}); err != nil {
|
|
||||||
retSQLErr(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build succesfull response
|
// Build succesfull response
|
||||||
type accountResponse struct {
|
type accountResponse struct {
|
||||||
Accounts []historydb.AccountAPI `json:"accounts"`
|
Accounts []historydb.AccountAPI `json:"accounts"`
|
||||||
|
|||||||
41
api/api.go
41
api/api.go
@@ -2,41 +2,19 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Add correct values to constants
|
|
||||||
const (
|
|
||||||
createAccountExtraFeePercentage float64 = 2
|
|
||||||
createAccountInternalExtraFeePercentage float64 = 2.5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Status define status of the network
|
|
||||||
type Status struct {
|
|
||||||
sync.RWMutex
|
|
||||||
Network Network `json:"network"`
|
|
||||||
Metrics historydb.Metrics `json:"metrics"`
|
|
||||||
Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
|
||||||
Auction historydb.AuctionVariablesAPI `json:"auction"`
|
|
||||||
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
|
||||||
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// API serves HTTP requests to allow external interaction with the Hermez node
|
// API serves HTTP requests to allow external interaction with the Hermez node
|
||||||
type API struct {
|
type API struct {
|
||||||
h *historydb.HistoryDB
|
h *historydb.HistoryDB
|
||||||
cg *configAPI
|
cg *configAPI
|
||||||
s *statedb.StateDB
|
|
||||||
l2 *l2db.L2DB
|
l2 *l2db.L2DB
|
||||||
status Status
|
|
||||||
chainID uint16
|
chainID uint16
|
||||||
hermezAddress ethCommon.Address
|
hermezAddress ethCommon.Address
|
||||||
}
|
}
|
||||||
@@ -46,9 +24,7 @@ func NewAPI(
|
|||||||
coordinatorEndpoints, explorerEndpoints bool,
|
coordinatorEndpoints, explorerEndpoints bool,
|
||||||
server *gin.Engine,
|
server *gin.Engine,
|
||||||
hdb *historydb.HistoryDB,
|
hdb *historydb.HistoryDB,
|
||||||
sdb *statedb.StateDB,
|
|
||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
config *Config,
|
|
||||||
) (*API, error) {
|
) (*API, error) {
|
||||||
// Check input
|
// Check input
|
||||||
// TODO: is stateDB only needed for explorer endpoints or for both?
|
// TODO: is stateDB only needed for explorer endpoints or for both?
|
||||||
@@ -58,19 +34,20 @@ func NewAPI(
|
|||||||
if explorerEndpoints && hdb == nil {
|
if explorerEndpoints && hdb == nil {
|
||||||
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
|
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
|
||||||
}
|
}
|
||||||
|
consts, err := hdb.GetConstants()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
a := &API{
|
a := &API{
|
||||||
h: hdb,
|
h: hdb,
|
||||||
cg: &configAPI{
|
cg: &configAPI{
|
||||||
RollupConstants: *newRollupConstants(config.RollupConstants),
|
RollupConstants: *newRollupConstants(consts.Rollup),
|
||||||
AuctionConstants: config.AuctionConstants,
|
AuctionConstants: consts.Auction,
|
||||||
WDelayerConstants: config.WDelayerConstants,
|
WDelayerConstants: consts.WDelayer,
|
||||||
},
|
},
|
||||||
s: sdb,
|
|
||||||
l2: l2db,
|
l2: l2db,
|
||||||
status: Status{},
|
chainID: consts.ChainID,
|
||||||
chainID: config.ChainID,
|
hermezAddress: consts.HermezAddress,
|
||||||
hermezAddress: config.HermezAddress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add coordinator endpoints
|
// Add coordinator endpoints
|
||||||
|
|||||||
139
api/api_test.go
139
api/api_test.go
@@ -8,6 +8,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -22,7 +23,6 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db"
|
"github.com/hermeznetwork/hermez-node/db"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/hermez-node/test"
|
"github.com/hermeznetwork/hermez-node/test"
|
||||||
"github.com/hermeznetwork/hermez-node/test/til"
|
"github.com/hermeznetwork/hermez-node/test/til"
|
||||||
@@ -39,8 +39,8 @@ type Pendinger interface {
|
|||||||
New() Pendinger
|
New() Pendinger
|
||||||
}
|
}
|
||||||
|
|
||||||
const apiPort = ":4010"
|
const apiAddr = ":4010"
|
||||||
const apiURL = "http://localhost" + apiPort + "/"
|
const apiURL = "http://localhost" + apiAddr + "/"
|
||||||
|
|
||||||
var SetBlockchain = `
|
var SetBlockchain = `
|
||||||
Type: Blockchain
|
Type: Blockchain
|
||||||
@@ -180,12 +180,13 @@ type testCommon struct {
|
|||||||
auctionVars common.AuctionVariables
|
auctionVars common.AuctionVariables
|
||||||
rollupVars common.RollupVariables
|
rollupVars common.RollupVariables
|
||||||
wdelayerVars common.WDelayerVariables
|
wdelayerVars common.WDelayerVariables
|
||||||
nextForgers []NextForger
|
nextForgers []historydb.NextForgerAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
var tc testCommon
|
var tc testCommon
|
||||||
var config configAPI
|
var config configAPI
|
||||||
var api *API
|
var api *API
|
||||||
|
var stateAPIUpdater *StateAPIUpdater
|
||||||
|
|
||||||
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
|
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
|
||||||
// emulating the task of the synchronizer in order to have data to be returned
|
// emulating the task of the synchronizer in order to have data to be returned
|
||||||
@@ -202,26 +203,12 @@ func TestMain(m *testing.M) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
apiConnCon := db.NewAPICnnectionController(1, time.Second)
|
apiConnCon := db.NewAPICnnectionController(1, time.Second)
|
||||||
hdb := historydb.NewHistoryDB(database, apiConnCon)
|
hdb := historydb.NewHistoryDB(database, database, apiConnCon)
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
// StateDB
|
|
||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := os.RemoveAll(dir); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
// L2DB
|
// L2DB
|
||||||
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
|
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
||||||
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
||||||
// Config (smart contract constants)
|
// Config (smart contract constants)
|
||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
@@ -234,29 +221,53 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
// API
|
// API
|
||||||
apiGin := gin.Default()
|
apiGin := gin.Default()
|
||||||
|
// Reset DB
|
||||||
|
test.WipeDB(hdb.DB())
|
||||||
|
|
||||||
|
constants := &historydb.Constants{
|
||||||
|
SCConsts: common.SCConsts{
|
||||||
|
Rollup: _config.RollupConstants,
|
||||||
|
Auction: _config.AuctionConstants,
|
||||||
|
WDelayer: _config.WDelayerConstants,
|
||||||
|
},
|
||||||
|
ChainID: chainID,
|
||||||
|
HermezAddress: _config.HermezAddress,
|
||||||
|
}
|
||||||
|
if err := hdb.SetConstants(constants); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
nodeConfig := &historydb.NodeConfig{
|
||||||
|
MaxPoolTxs: 10,
|
||||||
|
MinFeeUSD: 0,
|
||||||
|
}
|
||||||
|
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
api, err = NewAPI(
|
api, err = NewAPI(
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
apiGin,
|
apiGin,
|
||||||
hdb,
|
hdb,
|
||||||
sdb,
|
|
||||||
l2DB,
|
l2DB,
|
||||||
&_config,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
// Start server
|
// Start server
|
||||||
server := &http.Server{Addr: apiPort, Handler: apiGin}
|
listener, err := net.Listen("tcp", apiAddr) //nolint:gosec
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
server := &http.Server{Handler: apiGin}
|
||||||
go func() {
|
go func() {
|
||||||
if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
if err := server.Serve(listener); err != nil &&
|
||||||
|
tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Reset DB
|
|
||||||
test.WipeDB(api.h.DB())
|
|
||||||
|
|
||||||
// Genratre blockchain data with til
|
// Genratre blockchain data with til
|
||||||
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
@@ -350,19 +361,6 @@ func TestMain(m *testing.M) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastBlockNum2 := blocksData[len(blocksData)-1].Block.EthBlockNum
|
|
||||||
|
|
||||||
// Add accounts to StateDB
|
|
||||||
for i := 0; i < len(commonAccounts); i++ {
|
|
||||||
if _, err := api.s.CreateAccount(commonAccounts[i].Idx, &commonAccounts[i]); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Make a checkpoint to make the accounts available in Last
|
|
||||||
if err := api.s.MakeCheckpoint(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate Coordinators and add them to HistoryDB
|
// Generate Coordinators and add them to HistoryDB
|
||||||
const nCoords = 10
|
const nCoords = 10
|
||||||
commonCoords := test.GenCoordinators(nCoords, commonBlocks)
|
commonCoords := test.GenCoordinators(nCoords, commonBlocks)
|
||||||
@@ -470,19 +468,19 @@ func TestMain(m *testing.M) {
|
|||||||
if err = api.h.AddBids(bids); err != nil {
|
if err = api.h.AddBids(bids); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
bootForger := NextForger{
|
bootForger := historydb.NextForgerAPI{
|
||||||
Coordinator: historydb.CoordinatorAPI{
|
Coordinator: historydb.CoordinatorAPI{
|
||||||
Forger: auctionVars.BootCoordinator,
|
Forger: auctionVars.BootCoordinator,
|
||||||
URL: auctionVars.BootCoordinatorURL,
|
URL: auctionVars.BootCoordinatorURL,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
|
// Set next forgers: set all as boot coordinator then replace the non boot coordinators
|
||||||
nextForgers := []NextForger{}
|
nextForgers := []historydb.NextForgerAPI{}
|
||||||
var initBlock int64 = 140
|
var initBlock int64 = 140
|
||||||
var deltaBlocks int64 = 40
|
var deltaBlocks int64 = 40
|
||||||
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
|
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
|
||||||
fromBlock := initBlock + deltaBlocks*int64(i-1)
|
fromBlock := initBlock + deltaBlocks*int64(i-1)
|
||||||
bootForger.Period = Period{
|
bootForger.Period = historydb.Period{
|
||||||
SlotNum: int64(i),
|
SlotNum: int64(i),
|
||||||
FromBlock: fromBlock,
|
FromBlock: fromBlock,
|
||||||
ToBlock: fromBlock + deltaBlocks - 1,
|
ToBlock: fromBlock + deltaBlocks - 1,
|
||||||
@@ -522,6 +520,12 @@ func TestMain(m *testing.M) {
|
|||||||
WithdrawalDelay: uint64(3000),
|
WithdrawalDelay: uint64(3000),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stateAPIUpdater = NewStateAPIUpdater(hdb, nodeConfig, &common.SCVariables{
|
||||||
|
Rollup: rollupVars,
|
||||||
|
Auction: auctionVars,
|
||||||
|
WDelayer: wdelayerVars,
|
||||||
|
}, constants)
|
||||||
|
|
||||||
// Generate test data, as expected to be received/sended from/to the API
|
// Generate test data, as expected to be received/sended from/to the API
|
||||||
testCoords := genTestCoordinators(commonCoords)
|
testCoords := genTestCoordinators(commonCoords)
|
||||||
testBids := genTestBids(commonBlocks, testCoords, bids)
|
testBids := genTestBids(commonBlocks, testCoords, bids)
|
||||||
@@ -529,13 +533,41 @@ func TestMain(m *testing.M) {
|
|||||||
testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks)
|
testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks)
|
||||||
testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs)
|
testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs)
|
||||||
poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts)
|
poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts)
|
||||||
|
// Add balance and nonce to historyDB
|
||||||
|
accounts := genTestAccounts(commonAccounts, testTokens)
|
||||||
|
accUpdates := []common.AccountUpdate{}
|
||||||
|
for i := 0; i < len(accounts); i++ {
|
||||||
|
balance := new(big.Int)
|
||||||
|
balance.SetString(string(*accounts[i].Balance), 10)
|
||||||
|
idx, err := stringToIdx(string(accounts[i].Idx), "foo")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
accUpdates = append(accUpdates, common.AccountUpdate{
|
||||||
|
EthBlockNum: 0,
|
||||||
|
BatchNum: 1,
|
||||||
|
Idx: *idx,
|
||||||
|
Nonce: 0,
|
||||||
|
Balance: balance,
|
||||||
|
})
|
||||||
|
accUpdates = append(accUpdates, common.AccountUpdate{
|
||||||
|
EthBlockNum: 0,
|
||||||
|
BatchNum: 1,
|
||||||
|
Idx: *idx,
|
||||||
|
Nonce: accounts[i].Nonce,
|
||||||
|
Balance: balance,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := api.h.AddAccountUpdates(accUpdates); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
tc = testCommon{
|
tc = testCommon{
|
||||||
blocks: commonBlocks,
|
blocks: commonBlocks,
|
||||||
tokens: testTokens,
|
tokens: testTokens,
|
||||||
batches: testBatches,
|
batches: testBatches,
|
||||||
fullBatches: testFullBatches,
|
fullBatches: testFullBatches,
|
||||||
coordinators: testCoords,
|
coordinators: testCoords,
|
||||||
accounts: genTestAccounts(commonAccounts, testTokens),
|
accounts: accounts,
|
||||||
txs: testTxs,
|
txs: testTxs,
|
||||||
exits: testExits,
|
exits: testExits,
|
||||||
poolTxsToSend: poolTxsToSend,
|
poolTxsToSend: poolTxsToSend,
|
||||||
@@ -571,21 +603,18 @@ func TestMain(m *testing.M) {
|
|||||||
if err := database.Close(); err != nil {
|
if err := database.Close(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := os.RemoveAll(dir); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
os.Exit(result)
|
os.Exit(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimeout(t *testing.T) {
|
func TestTimeout(t *testing.T) {
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
|
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
|
||||||
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
|
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// L2DB
|
// L2DB
|
||||||
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
|
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO)
|
||||||
|
|
||||||
// API
|
// API
|
||||||
apiGinTO := gin.Default()
|
apiGinTO := gin.Default()
|
||||||
@@ -600,21 +629,21 @@ func TestTimeout(t *testing.T) {
|
|||||||
<-finishWait
|
<-finishWait
|
||||||
})
|
})
|
||||||
// Start server
|
// Start server
|
||||||
serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
|
serverTO := &http.Server{Handler: apiGinTO}
|
||||||
|
listener, err := net.Listen("tcp", ":4444") //nolint:gosec
|
||||||
|
require.NoError(t, err)
|
||||||
go func() {
|
go func() {
|
||||||
if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
if err := serverTO.Serve(listener); err != nil &&
|
||||||
|
tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
_config := getConfigTest(0)
|
|
||||||
_, err = NewAPI(
|
_, err = NewAPI(
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
apiGinTO,
|
apiGinTO,
|
||||||
hdbTO,
|
hdbTO,
|
||||||
nil,
|
|
||||||
l2DBTO,
|
l2DBTO,
|
||||||
&_config,
|
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
|
"github.com/russross/meddler"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -46,24 +47,33 @@ var (
|
|||||||
func retSQLErr(err error, c *gin.Context) {
|
func retSQLErr(err error, c *gin.Context) {
|
||||||
log.Warnw("HTTP API SQL request error", "err", err)
|
log.Warnw("HTTP API SQL request error", "err", err)
|
||||||
errMsg := tracerr.Unwrap(err).Error()
|
errMsg := tracerr.Unwrap(err).Error()
|
||||||
|
retDupKey := func(errCode pq.ErrorCode) {
|
||||||
|
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||||
|
if errCode == "23505" {
|
||||||
|
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||||
|
Message: errDuplicatedKey,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||||
|
Message: errMsg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
if errMsg == errCtxTimeout {
|
if errMsg == errCtxTimeout {
|
||||||
c.JSON(http.StatusServiceUnavailable, errorMsg{
|
c.JSON(http.StatusServiceUnavailable, errorMsg{
|
||||||
Message: errSQLTimeout,
|
Message: errSQLTimeout,
|
||||||
})
|
})
|
||||||
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
retDupKey(sqlErr.Code)
|
||||||
if sqlErr.Code == "23505" {
|
} else if sqlErr, ok := meddler.DriverErr(tracerr.Unwrap(err)); ok {
|
||||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
retDupKey(sqlErr.(*pq.Error).Code)
|
||||||
Message: errDuplicatedKey,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
c.JSON(http.StatusNotFound, errorMsg{
|
c.JSON(http.StatusNotFound, errorMsg{
|
||||||
Message: err.Error(),
|
Message: errMsg,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||||
Message: err.Error(),
|
Message: errMsg,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,7 +99,9 @@ func TestGetSlot(t *testing.T) {
|
|||||||
nil, &fetchedSlot,
|
nil, &fetchedSlot,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars)
|
// ni, err := api.h.GetNodeInfoAPI()
|
||||||
|
// assert.NoError(t, err)
|
||||||
|
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
|
||||||
assertSlot(t, emptySlot, fetchedSlot)
|
assertSlot(t, emptySlot, fetchedSlot)
|
||||||
|
|
||||||
// Invalid slotNum
|
// Invalid slotNum
|
||||||
@@ -127,8 +129,10 @@ func TestGetSlots(t *testing.T) {
|
|||||||
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
|
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
allSlots := tc.slots
|
allSlots := tc.slots
|
||||||
|
// ni, err := api.h.GetNodeInfoAPI()
|
||||||
|
// assert.NoError(t, err)
|
||||||
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
|
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
|
||||||
emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars)
|
emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars)
|
||||||
allSlots = append(allSlots, emptySlot)
|
allSlots = append(allSlots, emptySlot)
|
||||||
}
|
}
|
||||||
assertSlots(t, allSlots, fetchedSlots)
|
assertSlots(t, allSlots, fetchedSlots)
|
||||||
|
|||||||
353
api/state.go
353
api/state.go
@@ -2,305 +2,160 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"sync"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/apitypes"
|
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Network define status of the network
|
|
||||||
type Network struct {
|
|
||||||
LastEthBlock int64 `json:"lastEthereumBlock"`
|
|
||||||
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
|
||||||
LastBatch *historydb.BatchAPI `json:"lastBatch"`
|
|
||||||
CurrentSlot int64 `json:"currentSlot"`
|
|
||||||
NextForgers []NextForger `json:"nextForgers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextForger is a representation of the information of a coordinator and the period will forge
|
|
||||||
type NextForger struct {
|
|
||||||
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
|
|
||||||
Period Period `json:"period"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Period is a representation of a period
|
|
||||||
type Period struct {
|
|
||||||
SlotNum int64 `json:"slotNum"`
|
|
||||||
FromBlock int64 `json:"fromBlock"`
|
|
||||||
ToBlock int64 `json:"toBlock"`
|
|
||||||
FromTimestamp time.Time `json:"fromTimestamp"`
|
|
||||||
ToTimestamp time.Time `json:"toTimestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *API) getState(c *gin.Context) {
|
func (a *API) getState(c *gin.Context) {
|
||||||
// TODO: There are no events for the buckets information, so now this information will be 0
|
stateAPI, err := a.h.GetStateAPI()
|
||||||
a.status.RLock()
|
if err != nil {
|
||||||
status := a.status //nolint
|
retBadReq(err, c)
|
||||||
a.status.RUnlock()
|
return
|
||||||
c.JSON(http.StatusOK, status) //nolint
|
}
|
||||||
|
c.JSON(http.StatusOK, stateAPI)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SC Vars
|
// StateAPIUpdater is an utility object to facilitate updating the StateAPI
|
||||||
|
type StateAPIUpdater struct {
|
||||||
// SetRollupVariables set Status.Rollup variables
|
hdb *historydb.HistoryDB
|
||||||
func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
|
state historydb.StateAPI
|
||||||
a.status.Lock()
|
config historydb.NodeConfig
|
||||||
var rollupVAPI historydb.RollupVariablesAPI
|
vars common.SCVariablesPtr
|
||||||
rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
|
consts historydb.Constants
|
||||||
rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
|
rw sync.RWMutex
|
||||||
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
|
|
||||||
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
|
|
||||||
|
|
||||||
for i, bucket := range rollupVariables.Buckets {
|
|
||||||
var apiBucket historydb.BucketParamsAPI
|
|
||||||
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
|
|
||||||
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
|
|
||||||
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
|
|
||||||
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
|
|
||||||
rollupVAPI.Buckets[i] = apiBucket
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rollupVAPI.SafeMode = rollupVariables.SafeMode
|
// NewStateAPIUpdater creates a new StateAPIUpdater
|
||||||
a.status.Rollup = rollupVAPI
|
func NewStateAPIUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
||||||
a.status.Unlock()
|
consts *historydb.Constants) *StateAPIUpdater {
|
||||||
|
u := StateAPIUpdater{
|
||||||
|
hdb: hdb,
|
||||||
|
config: *config,
|
||||||
|
consts: *consts,
|
||||||
|
}
|
||||||
|
u.SetSCVars(vars.AsPtr())
|
||||||
|
return &u
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWDelayerVariables set Status.WithdrawalDelayer variables
|
// Store the State in the HistoryDB
|
||||||
func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) {
|
func (u *StateAPIUpdater) Store() error {
|
||||||
a.status.Lock()
|
u.rw.RLock()
|
||||||
a.status.WithdrawalDelayer = wDelayerVariables
|
defer u.rw.RUnlock()
|
||||||
a.status.Unlock()
|
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetAuctionVariables set Status.Auction variables
|
// SetSCVars sets the smart contract vars (ony updates those that are not nil)
|
||||||
func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) {
|
func (u *StateAPIUpdater) SetSCVars(vars *common.SCVariablesPtr) {
|
||||||
a.status.Lock()
|
u.rw.Lock()
|
||||||
var auctionAPI historydb.AuctionVariablesAPI
|
defer u.rw.Unlock()
|
||||||
|
if vars.Rollup != nil {
|
||||||
auctionAPI.EthBlockNum = auctionVariables.EthBlockNum
|
u.vars.Rollup = vars.Rollup
|
||||||
auctionAPI.DonationAddress = auctionVariables.DonationAddress
|
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
|
||||||
auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
|
u.state.Rollup = *rollupVars
|
||||||
auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
|
}
|
||||||
auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
|
if vars.Auction != nil {
|
||||||
auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
|
u.vars.Auction = vars.Auction
|
||||||
auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
|
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
|
||||||
auctionAPI.Outbidding = auctionVariables.Outbidding
|
u.state.Auction = *auctionVars
|
||||||
auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
|
}
|
||||||
|
if vars.WDelayer != nil {
|
||||||
for i, slot := range auctionVariables.DefaultSlotSetBid {
|
u.vars.WDelayer = vars.WDelayer
|
||||||
auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
|
u.state.WithdrawalDelayer = *u.vars.WDelayer
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, ratio := range auctionVariables.AllocationRatio {
|
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||||
auctionAPI.AllocationRatio[i] = ratio
|
func (u *StateAPIUpdater) UpdateRecommendedFee() error {
|
||||||
|
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.RecommendedFee = *recommendedFee
|
||||||
|
u.rw.Unlock()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
a.status.Auction = auctionAPI
|
// UpdateMetrics update Status.Metrics information
|
||||||
a.status.Unlock()
|
func (u *StateAPIUpdater) UpdateMetrics() error {
|
||||||
|
u.rw.RLock()
|
||||||
|
lastBatch := u.state.Network.LastBatch
|
||||||
|
u.rw.RUnlock()
|
||||||
|
if lastBatch == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lastBatchNum := lastBatch.BatchNum
|
||||||
|
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.Metrics = *metrics
|
||||||
|
u.rw.Unlock()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Network
|
|
||||||
|
|
||||||
// UpdateNetworkInfoBlock update Status.Network block related information
|
// UpdateNetworkInfoBlock update Status.Network block related information
|
||||||
func (a *API) UpdateNetworkInfoBlock(
|
func (u *StateAPIUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
|
||||||
lastEthBlock, lastSyncBlock common.Block,
|
u.rw.Lock()
|
||||||
) {
|
u.state.Network.LastSyncBlock = lastSyncBlock.Num
|
||||||
a.status.Network.LastSyncBlock = lastSyncBlock.Num
|
u.state.Network.LastEthBlock = lastEthBlock.Num
|
||||||
a.status.Network.LastEthBlock = lastEthBlock.Num
|
u.rw.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateNetworkInfo update Status.Network information
|
// UpdateNetworkInfo update Status.Network information
|
||||||
func (a *API) UpdateNetworkInfo(
|
func (u *StateAPIUpdater) UpdateNetworkInfo(
|
||||||
lastEthBlock, lastSyncBlock common.Block,
|
lastEthBlock, lastSyncBlock common.Block,
|
||||||
lastBatchNum common.BatchNum, currentSlot int64,
|
lastBatchNum common.BatchNum, currentSlot int64,
|
||||||
) error {
|
) error {
|
||||||
lastBatch, err := a.h.GetBatchAPI(lastBatchNum)
|
// Get last batch in API format
|
||||||
|
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
lastBatch = nil
|
lastBatch = nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots)
|
u.rw.RLock()
|
||||||
nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot)
|
auctionVars := u.vars.Auction
|
||||||
|
u.rw.RUnlock()
|
||||||
|
// Get next forgers
|
||||||
|
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
|
||||||
|
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
|
||||||
|
lastSyncBlock, currentSlot, lastClosedSlot)
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
nextForgers = nil
|
nextForgers = nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
a.status.Lock()
|
|
||||||
a.status.Network.LastSyncBlock = lastSyncBlock.Num
|
|
||||||
a.status.Network.LastEthBlock = lastEthBlock.Num
|
|
||||||
a.status.Network.LastBatch = lastBatch
|
|
||||||
a.status.Network.CurrentSlot = currentSlot
|
|
||||||
a.status.Network.NextForgers = nextForgers
|
|
||||||
|
|
||||||
// Update buckets withdrawals
|
bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
|
||||||
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
|
if err == sql.ErrNoRows {
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
bucketUpdates = nil
|
||||||
bucketsUpdate = nil
|
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, bucketParams := range a.status.Rollup.Buckets {
|
u.rw.Lock()
|
||||||
for _, bucketUpdate := range bucketsUpdate {
|
// Update NodeInfo struct
|
||||||
|
for i, bucketParams := range u.state.Rollup.Buckets {
|
||||||
|
for _, bucketUpdate := range bucketUpdates {
|
||||||
if bucketUpdate.NumBucket == i {
|
if bucketUpdate.NumBucket == i {
|
||||||
bucketParams.Withdrawals = bucketUpdate.Withdrawals
|
bucketParams.Withdrawals = bucketUpdate.Withdrawals
|
||||||
a.status.Rollup.Buckets[i] = bucketParams
|
u.state.Rollup.Buckets[i] = bucketParams
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.status.Unlock()
|
u.state.Network.LastSyncBlock = lastSyncBlock.Num
|
||||||
return nil
|
u.state.Network.LastEthBlock = lastEthBlock.Num
|
||||||
}
|
u.state.Network.LastBatch = lastBatch
|
||||||
|
u.state.Network.CurrentSlot = currentSlot
|
||||||
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int
|
u.state.Network.NextForgers = nextForgers
|
||||||
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) {
|
u.rw.Unlock()
|
||||||
var slots [6]*big.Int
|
|
||||||
|
|
||||||
for i, slot := range defaultSlotSetBid {
|
|
||||||
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
|
|
||||||
if !ok {
|
|
||||||
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
|
|
||||||
}
|
|
||||||
slots[i] = bigInt
|
|
||||||
}
|
|
||||||
|
|
||||||
return slots, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNextForgers returns next forgers
|
|
||||||
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
|
|
||||||
secondsPerBlock := int64(15) //nolint:gomnd
|
|
||||||
// currentSlot and lastClosedSlot included
|
|
||||||
limit := uint(lastClosedSlot - currentSlot + 1)
|
|
||||||
bids, _, err := a.h.GetBestBidsAPI(¤tSlot, &lastClosedSlot, nil, &limit, "ASC")
|
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
nextForgers := []NextForger{}
|
|
||||||
// Get min bid info
|
|
||||||
var minBidInfo []historydb.MinBidInfo
|
|
||||||
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
|
|
||||||
// All min bids can be calculated with the last update of AuctionVariables
|
|
||||||
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
minBidInfo = []historydb.MinBidInfo{{
|
|
||||||
DefaultSlotSetBid: bigIntSlots,
|
|
||||||
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
|
|
||||||
}}
|
|
||||||
} else {
|
|
||||||
// Get all the relevant updates from the DB
|
|
||||||
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create nextForger for each slot
|
|
||||||
for i := currentSlot; i <= lastClosedSlot; i++ {
|
|
||||||
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
|
|
||||||
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
|
|
||||||
nextForger := NextForger{
|
|
||||||
Period: Period{
|
|
||||||
SlotNum: i,
|
|
||||||
FromBlock: fromBlock,
|
|
||||||
ToBlock: toBlock,
|
|
||||||
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
|
|
||||||
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
foundForger := false
|
|
||||||
// If there is a bid for a slot, get forger (coordinator)
|
|
||||||
for j := range bids {
|
|
||||||
slotNum := bids[j].SlotNum
|
|
||||||
if slotNum == i {
|
|
||||||
// There's a bid for the slot
|
|
||||||
// Check if the bid is greater than the minimum required
|
|
||||||
for i := 0; i < len(minBidInfo); i++ {
|
|
||||||
// Find the most recent update
|
|
||||||
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
|
|
||||||
// Get min bid
|
|
||||||
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
|
|
||||||
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
|
|
||||||
// Check if the bid has beaten the minimum
|
|
||||||
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
|
|
||||||
if !ok {
|
|
||||||
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
|
|
||||||
}
|
|
||||||
if minBid.Cmp(bid) == 1 {
|
|
||||||
// Min bid is greater than bid, the slot will be forged by boot coordinator
|
|
||||||
break
|
|
||||||
}
|
|
||||||
foundForger = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !foundForger { // There is no bid or it's smaller than the minimum
|
|
||||||
break
|
|
||||||
}
|
|
||||||
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
nextForger.Coordinator = *coordinator
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If there is no bid, the coordinator that will forge is boot coordinator
|
|
||||||
if !foundForger {
|
|
||||||
nextForger.Coordinator = historydb.CoordinatorAPI{
|
|
||||||
Forger: a.status.Auction.BootCoordinator,
|
|
||||||
URL: a.status.Auction.BootCoordinatorURL,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nextForgers = append(nextForgers, nextForger)
|
|
||||||
}
|
|
||||||
return nextForgers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics
|
|
||||||
|
|
||||||
// UpdateMetrics update Status.Metrics information
|
|
||||||
func (a *API) UpdateMetrics() error {
|
|
||||||
a.status.RLock()
|
|
||||||
if a.status.Network.LastBatch == nil {
|
|
||||||
a.status.RUnlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
batchNum := a.status.Network.LastBatch.BatchNum
|
|
||||||
a.status.RUnlock()
|
|
||||||
metrics, err := a.h.GetMetricsAPI(batchNum)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
a.status.Lock()
|
|
||||||
a.status.Metrics = *metrics
|
|
||||||
a.status.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recommended fee
|
|
||||||
|
|
||||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
|
||||||
func (a *API) UpdateRecommendedFee() error {
|
|
||||||
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
a.status.Lock()
|
|
||||||
a.status.RecommendedFee.ExistingAccount = feeExistingAccount
|
|
||||||
a.status.RecommendedFee.CreatesAccount = createAccountExtraFeePercentage * feeExistingAccount
|
|
||||||
a.status.RecommendedFee.CreatesAccountAndRegister = createAccountInternalExtraFeePercentage * feeExistingAccount
|
|
||||||
a.status.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
type testStatus struct {
|
type testStatus struct {
|
||||||
Network testNetwork `json:"network"`
|
Network testNetwork `json:"network"`
|
||||||
Metrics historydb.Metrics `json:"metrics"`
|
Metrics historydb.MetricsAPI `json:"metrics"`
|
||||||
Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
Rollup historydb.RollupVariablesAPI `json:"rollup"`
|
||||||
Auction historydb.AuctionVariablesAPI `json:"auction"`
|
Auction historydb.AuctionVariablesAPI `json:"auction"`
|
||||||
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
||||||
@@ -25,14 +25,15 @@ type testNetwork struct {
|
|||||||
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
||||||
LastBatch testBatch `json:"lastBatch"`
|
LastBatch testBatch `json:"lastBatch"`
|
||||||
CurrentSlot int64 `json:"currentSlot"`
|
CurrentSlot int64 `json:"currentSlot"`
|
||||||
NextForgers []NextForger `json:"nextForgers"`
|
NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetRollupVariables(t *testing.T) {
|
func TestSetRollupVariables(t *testing.T) {
|
||||||
rollupVars := &common.RollupVariables{}
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars})
|
||||||
assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
api.SetRollupVariables(tc.rollupVars)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true)
|
require.NoError(t, err)
|
||||||
|
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
|
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
|
||||||
@@ -51,17 +52,19 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSetWDelayerVariables(t *testing.T) {
|
func TestSetWDelayerVariables(t *testing.T) {
|
||||||
wdelayerVars := &common.WDelayerVariables{}
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars})
|
||||||
assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
api.SetWDelayerVariables(tc.wdelayerVars)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer)
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetAuctionVariables(t *testing.T) {
|
func TestSetAuctionVariables(t *testing.T) {
|
||||||
auctionVars := &common.AuctionVariables{}
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars})
|
||||||
assertEqualAuctionVariables(t, *auctionVars, api.status.Auction)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
api.SetAuctionVariables(tc.auctionVars)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction)
|
require.NoError(t, err)
|
||||||
|
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
|
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
|
||||||
@@ -85,11 +88,6 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateNetworkInfo(t *testing.T) {
|
func TestUpdateNetworkInfo(t *testing.T) {
|
||||||
status := &Network{}
|
|
||||||
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
|
|
||||||
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
|
|
||||||
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
|
|
||||||
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
|
|
||||||
lastBlock := tc.blocks[3]
|
lastBlock := tc.blocks[3]
|
||||||
lastBatchNum := common.BatchNum(3)
|
lastBatchNum := common.BatchNum(3)
|
||||||
currentSlotNum := int64(1)
|
currentSlotNum := int64(1)
|
||||||
@@ -118,62 +116,79 @@ func TestUpdateNetworkInfo(t *testing.T) {
|
|||||||
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
|
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
// stateAPIUpdater := NewStateAPIUpdater(hdb)
|
||||||
assert.NoError(t, err)
|
err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
||||||
assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum)
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot)
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers))
|
require.NoError(t, err)
|
||||||
assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
|
assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock)
|
||||||
assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
|
assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum)
|
||||||
|
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
|
||||||
|
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
|
||||||
|
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
|
||||||
|
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateMetrics(t *testing.T) {
|
func TestUpdateMetrics(t *testing.T) {
|
||||||
// Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated
|
// Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated
|
||||||
lastBlock := tc.blocks[3]
|
lastBlock := tc.blocks[3]
|
||||||
lastBatchNum := common.BatchNum(3)
|
lastBatchNum := common.BatchNum(12)
|
||||||
currentSlotNum := int64(1)
|
currentSlotNum := int64(1)
|
||||||
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = api.UpdateMetrics()
|
err = stateAPIUpdater.UpdateMetrics()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0))
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0))
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0))
|
require.NoError(t, err)
|
||||||
assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0))
|
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
|
||||||
assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0))
|
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
|
||||||
assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0))
|
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
|
||||||
|
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
|
||||||
|
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
|
||||||
|
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateRecommendedFee(t *testing.T) {
|
func TestUpdateRecommendedFee(t *testing.T) {
|
||||||
err := api.UpdateRecommendedFee()
|
err := stateAPIUpdater.UpdateRecommendedFee()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, float64(0))
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
assert.Equal(t, api.status.RecommendedFee.CreatesAccount,
|
var minFeeUSD float64
|
||||||
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
|
if api.l2 != nil {
|
||||||
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister,
|
minFeeUSD = api.l2.MinFeeUSD()
|
||||||
api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
|
}
|
||||||
|
ni, err := api.h.GetNodeInfoAPI()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
|
||||||
|
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
|
||||||
|
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
|
||||||
|
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountAndRegister,
|
||||||
|
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetState(t *testing.T) {
|
func TestGetState(t *testing.T) {
|
||||||
lastBlock := tc.blocks[3]
|
lastBlock := tc.blocks[3]
|
||||||
lastBatchNum := common.BatchNum(3)
|
lastBatchNum := common.BatchNum(12)
|
||||||
currentSlotNum := int64(1)
|
currentSlotNum := int64(1)
|
||||||
api.SetRollupVariables(tc.rollupVars)
|
stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{
|
||||||
api.SetWDelayerVariables(tc.wdelayerVars)
|
Rollup: &tc.rollupVars,
|
||||||
api.SetAuctionVariables(tc.auctionVars)
|
Auction: &tc.auctionVars,
|
||||||
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
WDelayer: &tc.wdelayerVars,
|
||||||
assert.NoError(t, err)
|
})
|
||||||
err = api.UpdateMetrics()
|
err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = api.UpdateRecommendedFee()
|
err = stateAPIUpdater.UpdateMetrics()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
err = stateAPIUpdater.UpdateRecommendedFee()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, stateAPIUpdater.Store())
|
||||||
|
|
||||||
endpoint := apiURL + "state"
|
endpoint := apiURL + "state"
|
||||||
var status testStatus
|
var status testStatus
|
||||||
|
|
||||||
assert.NoError(t, doGoodReq("GET", endpoint, nil, &status))
|
require.NoError(t, doGoodReq("GET", endpoint, nil, &status))
|
||||||
|
|
||||||
// SC vars
|
// SC vars
|
||||||
// UpdateNetworkInfo will overwrite buckets withdrawal values
|
// UpdateNetworkInfo will overwrite buckets withdrawal values
|
||||||
@@ -200,13 +215,13 @@ func TestGetState(t *testing.T) {
|
|||||||
// Recommended fee
|
// Recommended fee
|
||||||
// TODO: perform real asserts (not just greater than 0)
|
// TODO: perform real asserts (not just greater than 0)
|
||||||
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
|
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
|
||||||
assert.Equal(t, status.RecommendedFee.CreatesAccount,
|
// assert.Equal(t, status.RecommendedFee.CreatesAccount,
|
||||||
status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
|
// status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
|
||||||
assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
|
// assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
|
||||||
status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
|
// status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNextForgers(t *testing.T, expected, actual []NextForger) {
|
func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
|
||||||
assert.Equal(t, len(expected), len(actual))
|
assert.Equal(t, len(expected), len(actual))
|
||||||
for i := range expected {
|
for i := range expected {
|
||||||
// ignore timestamps and other metadata
|
// ignore timestamps and other metadata
|
||||||
|
|||||||
@@ -1329,13 +1329,6 @@ components:
|
|||||||
type: string
|
type: string
|
||||||
description: Moment in which the transaction was added to the pool.
|
description: Moment in which the transaction was added to the pool.
|
||||||
format: date-time
|
format: date-time
|
||||||
batchNum:
|
|
||||||
type: integer
|
|
||||||
description: Identifier of a batch. Every new forged batch increases by one the batchNum, starting at 0.
|
|
||||||
minimum: 0
|
|
||||||
maximum: 4294967295
|
|
||||||
nullable: true
|
|
||||||
example: null
|
|
||||||
requestFromAccountIndex:
|
requestFromAccountIndex:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
@@ -1390,7 +1383,6 @@ components:
|
|||||||
$ref: '#/components/schemas/Token'
|
$ref: '#/components/schemas/Token'
|
||||||
example:
|
example:
|
||||||
amount: '100000000000000'
|
amount: '100000000000000'
|
||||||
batchNum:
|
|
||||||
fee: 0
|
fee: 0
|
||||||
fromAccountIndex: hez:SCC:256
|
fromAccountIndex: hez:SCC:256
|
||||||
fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn
|
fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn
|
||||||
@@ -1438,7 +1430,6 @@ components:
|
|||||||
- info
|
- info
|
||||||
- signature
|
- signature
|
||||||
- timestamp
|
- timestamp
|
||||||
- batchNum
|
|
||||||
- requestFromAccountIndex
|
- requestFromAccountIndex
|
||||||
- requestToAccountIndex
|
- requestToAccountIndex
|
||||||
- requestToHezEthereumAddress
|
- requestToHezEthereumAddress
|
||||||
@@ -2578,6 +2569,21 @@ components:
|
|||||||
description: List of next coordinators to forge.
|
description: List of next coordinators to forge.
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/NextForger'
|
$ref: '#/components/schemas/NextForger'
|
||||||
|
NodeConfig:
|
||||||
|
type: object
|
||||||
|
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
|
||||||
|
properties:
|
||||||
|
forgeDelay:
|
||||||
|
type: number
|
||||||
|
description: |
|
||||||
|
Delay in seconds after which a batch is forged if the slot is
|
||||||
|
already committed. If set to 0s, the coordinator will continuously
|
||||||
|
forge at the maximum rate. Note that this is a configuration parameter of a node,
|
||||||
|
so each coordinator may have a different value.
|
||||||
|
example: 193.4
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- forgeDelay
|
||||||
State:
|
State:
|
||||||
type: object
|
type: object
|
||||||
description: Gobal variables of the network
|
description: Gobal variables of the network
|
||||||
@@ -2594,6 +2600,8 @@ components:
|
|||||||
$ref: '#/components/schemas/StateWithdrawDelayer'
|
$ref: '#/components/schemas/StateWithdrawDelayer'
|
||||||
recommendedFee:
|
recommendedFee:
|
||||||
$ref: '#/components/schemas/RecommendedFee'
|
$ref: '#/components/schemas/RecommendedFee'
|
||||||
|
nodeConfig:
|
||||||
|
$ref: '#/components/schemas/NodeConfig'
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- network
|
- network
|
||||||
@@ -2602,6 +2610,7 @@ components:
|
|||||||
- auction
|
- auction
|
||||||
- withdrawalDelayer
|
- withdrawalDelayer
|
||||||
- recommendedFee
|
- recommendedFee
|
||||||
|
- nodeConfig
|
||||||
StateNetwork:
|
StateNetwork:
|
||||||
type: object
|
type: object
|
||||||
description: Gobal statistics of the network
|
description: Gobal statistics of the network
|
||||||
@@ -2812,6 +2821,10 @@ components:
|
|||||||
type: number
|
type: number
|
||||||
description: Average fee percentage paid for L2 transactions in the last 24 hours.
|
description: Average fee percentage paid for L2 transactions in the last 24 hours.
|
||||||
example: 1.54
|
example: 1.54
|
||||||
|
estimatedTimeToForgeL1:
|
||||||
|
type: number
|
||||||
|
description: Estimated time needed to forge a L1 transaction, from the time it's added on the smart contract, until it's actualy forged. In seconds.
|
||||||
|
example: 193.4
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- transactionsPerBatch
|
- transactionsPerBatch
|
||||||
@@ -2820,6 +2833,7 @@ components:
|
|||||||
- totalAccounts
|
- totalAccounts
|
||||||
- totalBJJs
|
- totalBJJs
|
||||||
- avgTransactionFee
|
- avgTransactionFee
|
||||||
|
- estimatedTimeToForgeL1
|
||||||
PendingItems:
|
PendingItems:
|
||||||
type: integer
|
type: integer
|
||||||
description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent.
|
description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent.
|
||||||
@@ -2916,7 +2930,7 @@ components:
|
|||||||
example: 101
|
example: 101
|
||||||
l1UserTotalBytes:
|
l1UserTotalBytes:
|
||||||
type: integer
|
type: integer
|
||||||
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
|
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx).
|
||||||
example: 72
|
example: 72
|
||||||
maxL1UserTx:
|
maxL1UserTx:
|
||||||
type: integer
|
type: integer
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
@@ -27,6 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
|
|||||||
retBadReq(err, c)
|
retBadReq(err, c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
writeTx.ClientIP = c.ClientIP()
|
||||||
// Insert to DB
|
// Insert to DB
|
||||||
if err := a.l2.AddTxAPI(writeTx); err != nil {
|
if err := a.l2.AddTxAPI(writeTx); err != nil {
|
||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
@@ -169,16 +171,21 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// Get public key
|
|
||||||
account, err := a.s.LastGetAccount(poolTx.FromIdx)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
// Validate feeAmount
|
// Validate feeAmount
|
||||||
_, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee)
|
_, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
// Get public key
|
||||||
|
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Validate TokenID
|
||||||
|
if poolTx.TokenID != account.TokenID {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("tx.TokenID (%v) != account.TokenID (%v)",
|
||||||
|
poolTx.TokenID, account.TokenID))
|
||||||
|
}
|
||||||
// Check signature
|
// Check signature
|
||||||
if !poolTx.VerifySignature(a.chainID, account.BJJ) {
|
if !poolTx.VerifySignature(a.chainID, account.BJJ) {
|
||||||
return tracerr.Wrap(errors.New("wrong signature"))
|
return tracerr.Wrap(errors.New("wrong signature"))
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testPoolTxReceive is a struct to be used to assert the response
|
// testPoolTxReceive is a struct to be used to assert the response
|
||||||
@@ -170,9 +171,9 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
fetchedTxID := common.TxID{}
|
fetchedTxID := common.TxID{}
|
||||||
for _, tx := range tc.poolTxsToSend {
|
for _, tx := range tc.poolTxsToSend {
|
||||||
jsonTxBytes, err := json.Marshal(tx)
|
jsonTxBytes, err := json.Marshal(tx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
||||||
assert.NoError(
|
require.NoError(
|
||||||
t, doGoodReq(
|
t, doGoodReq(
|
||||||
"POST",
|
"POST",
|
||||||
endpoint,
|
endpoint,
|
||||||
@@ -187,42 +188,42 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
badTx.Amount = "99950000000000000"
|
badTx.Amount = "99950000000000000"
|
||||||
badTx.Fee = 255
|
badTx.Fee = 255
|
||||||
jsonTxBytes, err := json.Marshal(badTx)
|
jsonTxBytes, err := json.Marshal(badTx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
||||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Wrong signature
|
// Wrong signature
|
||||||
badTx = tc.poolTxsToSend[0]
|
badTx = tc.poolTxsToSend[0]
|
||||||
badTx.FromIdx = "hez:foo:1000"
|
badTx.FromIdx = "hez:foo:1000"
|
||||||
jsonTxBytes, err = json.Marshal(badTx)
|
jsonTxBytes, err = json.Marshal(badTx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
||||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Wrong to
|
// Wrong to
|
||||||
badTx = tc.poolTxsToSend[0]
|
badTx = tc.poolTxsToSend[0]
|
||||||
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
|
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
|
||||||
badTx.ToEthAddr = ðAddr
|
badTx.ToEthAddr = ðAddr
|
||||||
badTx.ToIdx = nil
|
badTx.ToIdx = nil
|
||||||
jsonTxBytes, err = json.Marshal(badTx)
|
jsonTxBytes, err = json.Marshal(badTx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
||||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Wrong rq
|
// Wrong rq
|
||||||
badTx = tc.poolTxsToSend[0]
|
badTx = tc.poolTxsToSend[0]
|
||||||
rqFromIdx := "hez:foo:30"
|
rqFromIdx := "hez:foo:30"
|
||||||
badTx.RqFromIdx = &rqFromIdx
|
badTx.RqFromIdx = &rqFromIdx
|
||||||
jsonTxBytes, err = json.Marshal(badTx)
|
jsonTxBytes, err = json.Marshal(badTx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
||||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// GET
|
// GET
|
||||||
endpoint += "/"
|
endpoint += "/"
|
||||||
for _, tx := range tc.poolTxsToReceive {
|
for _, tx := range tc.poolTxsToReceive {
|
||||||
fetchedTx := testPoolTxReceive{}
|
fetchedTx := testPoolTxReceive{}
|
||||||
assert.NoError(
|
require.NoError(
|
||||||
t, doGoodReq(
|
t, doGoodReq(
|
||||||
"GET",
|
"GET",
|
||||||
endpoint+tx.TxID.String(),
|
endpoint+tx.TxID.String(),
|
||||||
@@ -233,10 +234,10 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// 400, due invalid TxID
|
// 400, due invalid TxID
|
||||||
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
|
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// 404, due inexistent TxID in DB
|
// 404, due inexistent TxID in DB
|
||||||
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
|
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {
|
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package batchbuilder
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/kvdb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
@@ -28,8 +29,14 @@ type ConfigBatch struct {
|
|||||||
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
|
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
|
||||||
// method
|
// method
|
||||||
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
|
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
|
||||||
localStateDB, err := statedb.NewLocalStateDB(dbpath, 128, synchronizerStateDB,
|
localStateDB, err := statedb.NewLocalStateDB(
|
||||||
statedb.TypeBatchBuilder, int(nLevels))
|
statedb.Config{
|
||||||
|
Path: dbpath,
|
||||||
|
Keep: kvdb.DefaultKeep,
|
||||||
|
Type: statedb.TypeBatchBuilder,
|
||||||
|
NLevels: int(nLevels),
|
||||||
|
},
|
||||||
|
synchronizerStateDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -47,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
|
|||||||
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
||||||
// it can just roll back the internal copy.
|
// it can just roll back the internal copy.
|
||||||
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
|
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
||||||
@@ -57,7 +64,10 @@ func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBa
|
|||||||
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
|
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
|
||||||
|
|
||||||
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
|
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
|
||||||
return ptOut.ZKInputs, tracerr.Wrap(err)
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return ptOut.ZKInputs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalStateDB returns the underlying LocalStateDB
|
// LocalStateDB returns the underlying LocalStateDB
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ func TestBatchBuilder(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
synchDB, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 0)
|
synchDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeBatchBuilder, NLevels: 0})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
||||||
|
|||||||
1
cli/node/.gitignore
vendored
1
cli/node/.gitignore
vendored
@@ -1,2 +1,3 @@
|
|||||||
cfg.example.secret.toml
|
cfg.example.secret.toml
|
||||||
cfg.toml
|
cfg.toml
|
||||||
|
node
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
|
|
||||||
This is the main cli for the node
|
This is the main cli for the node
|
||||||
|
|
||||||
|
## Go version
|
||||||
|
|
||||||
|
The `hermez-node` has been tested with go version 1.14
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -65,29 +69,64 @@ when running the coordinator in sync mode
|
|||||||
- The node requires a PostgreSQL database. The parameters of the server and
|
- The node requires a PostgreSQL database. The parameters of the server and
|
||||||
database must be set in the `PostgreSQL` section.
|
database must be set in the `PostgreSQL` section.
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
*All commands assume you are at the `cli/node` directory.*
|
||||||
|
|
||||||
|
Building the node requires using the packr utility to bundle the database
|
||||||
|
migrations inside the resulting binary. Install the packr utility with:
|
||||||
|
```
|
||||||
|
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
|
||||||
|
not be found.
|
||||||
|
|
||||||
|
Now build the node executable:
|
||||||
|
```
|
||||||
|
cd ../../db && packr2 && cd -
|
||||||
|
go build .
|
||||||
|
cd ../../db && packr2 clean && cd -
|
||||||
|
```
|
||||||
|
|
||||||
|
The executable is `node`.
|
||||||
|
|
||||||
## Usage Examples
|
## Usage Examples
|
||||||
|
|
||||||
|
The following commands assume you have built the node previously. You can also
|
||||||
|
run the following examples by replacing `./node` with `go run .` and executing
|
||||||
|
them in the `cli/node` directory to build from source and run at the same time.
|
||||||
|
|
||||||
Run the node in mode synchronizer:
|
Run the node in mode synchronizer:
|
||||||
```
|
```
|
||||||
go run . --mode sync --cfg cfg.buidler.toml run
|
./node --mode sync --cfg cfg.buidler.toml run
|
||||||
```
|
```
|
||||||
|
|
||||||
Run the node in mode coordinator:
|
Run the node in mode coordinator:
|
||||||
```
|
```
|
||||||
go run . --mode coord --cfg cfg.buidler.toml run
|
./node --mode coord --cfg cfg.buidler.toml run
|
||||||
```
|
```
|
||||||
|
|
||||||
Import an ethereum private key into the keystore:
|
Import an ethereum private key into the keystore:
|
||||||
```
|
```
|
||||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
||||||
```
|
```
|
||||||
|
|
||||||
Generate a new BabyJubJub key pair:
|
Generate a new BabyJubJub key pair:
|
||||||
```
|
```
|
||||||
go run . --mode coord --cfg cfg.buidler.toml genbjj
|
./node --mode coord --cfg cfg.buidler.toml genbjj
|
||||||
```
|
```
|
||||||
|
|
||||||
Wipe the entier SQL database (this will destroy all synchronized and pool data):
|
Wipe the entier SQL database (this will destroy all synchronized and pool
|
||||||
|
data):
|
||||||
```
|
```
|
||||||
go run . --mode coord --cfg cfg.buidler.toml wipesql
|
./node --mode coord --cfg cfg.buidler.toml wipesql
|
||||||
|
```
|
||||||
|
|
||||||
|
Discard all synchronized blocks and associated state up to a given block
|
||||||
|
number. This command is useful in case the synchronizer reaches an invalid
|
||||||
|
state and you want to roll back a few blocks and try again (maybe with some
|
||||||
|
fixes in the code).
|
||||||
|
```
|
||||||
|
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -14,17 +14,23 @@ Type = "bitfinexV2"
|
|||||||
[Debug]
|
[Debug]
|
||||||
APIAddress = "localhost:12345"
|
APIAddress = "localhost:12345"
|
||||||
MeddlerLogs = true
|
MeddlerLogs = true
|
||||||
|
GinDebugMode = true
|
||||||
|
|
||||||
[StateDB]
|
[StateDB]
|
||||||
Path = "/tmp/iden3-test/hermez/statedb"
|
Path = "/tmp/iden3-test/hermez/statedb"
|
||||||
Keep = 256
|
Keep = 256
|
||||||
|
|
||||||
[PostgreSQL]
|
[PostgreSQL]
|
||||||
Port = 5432
|
PortWrite = 5432
|
||||||
Host = "localhost"
|
HostWrite = "localhost"
|
||||||
User = "hermez"
|
UserWrite = "hermez"
|
||||||
Password = "yourpasswordhere"
|
PasswordWrite = "yourpasswordhere"
|
||||||
Name = "hermez"
|
NameWrite = "hermez"
|
||||||
|
# PortRead = 5432
|
||||||
|
# HostRead = "localhost"
|
||||||
|
# UserRead = "hermez"
|
||||||
|
# PasswordRead = "yourpasswordhere"
|
||||||
|
# NameRead = "hermez"
|
||||||
|
|
||||||
[Web3]
|
[Web3]
|
||||||
URL = "http://localhost:8545"
|
URL = "http://localhost:8545"
|
||||||
@@ -41,15 +47,22 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
|
|||||||
TokenHEZName = "Hermez Network Token"
|
TokenHEZName = "Hermez Network Token"
|
||||||
|
|
||||||
[Coordinator]
|
[Coordinator]
|
||||||
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||||
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
||||||
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||||
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
||||||
|
MinimumForgeAddressBalance = 0
|
||||||
ConfirmBlocks = 10
|
ConfirmBlocks = 10
|
||||||
L1BatchTimeoutPerc = 0.999
|
L1BatchTimeoutPerc = 0.6
|
||||||
|
StartSlotBlocksDelay = 2
|
||||||
|
ScheduleBatchBlocksAheadCheck = 3
|
||||||
|
SendBatchBlocksMarginCheck = 1
|
||||||
ProofServerPollInterval = "1s"
|
ProofServerPollInterval = "1s"
|
||||||
ForgeRetryInterval = "500ms"
|
ForgeRetryInterval = "500ms"
|
||||||
SyncRetryInterval = "1s"
|
SyncRetryInterval = "1s"
|
||||||
|
ForgeDelay = "10s"
|
||||||
|
ForgeNoTxsDelay = "0s"
|
||||||
|
PurgeByExtDelInterval = "1m"
|
||||||
|
|
||||||
[Coordinator.FeeAccount]
|
[Coordinator.FeeAccount]
|
||||||
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
|
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
|
||||||
@@ -60,6 +73,7 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
|
|||||||
[Coordinator.L2DB]
|
[Coordinator.L2DB]
|
||||||
SafetyPeriod = 10
|
SafetyPeriod = 10
|
||||||
MaxTxs = 512
|
MaxTxs = 512
|
||||||
|
MinFeeUSD = 0.0
|
||||||
TTL = "24h"
|
TTL = "24h"
|
||||||
PurgeBatchDelay = 10
|
PurgeBatchDelay = 10
|
||||||
InvalidateBatchDelay = 20
|
InvalidateBatchDelay = 20
|
||||||
@@ -80,21 +94,28 @@ MaxTx = 512
|
|||||||
NLevels = 32
|
NLevels = 32
|
||||||
|
|
||||||
[Coordinator.EthClient]
|
[Coordinator.EthClient]
|
||||||
ReceiptTimeout = "60s"
|
|
||||||
ReceiptLoopInterval = "500ms"
|
|
||||||
CheckLoopInterval = "500ms"
|
CheckLoopInterval = "500ms"
|
||||||
Attempts = 4
|
Attempts = 4
|
||||||
AttemptsDelay = "500ms"
|
AttemptsDelay = "500ms"
|
||||||
CallGasLimit = 300000
|
TxResendTimeout = "2m"
|
||||||
GasPriceDiv = 100
|
NoReuseNonce = false
|
||||||
|
MaxGasPrice = "5000000000"
|
||||||
|
GasPriceIncPerc = 10
|
||||||
|
|
||||||
[Coordinator.EthClient.Keystore]
|
[Coordinator.EthClient.Keystore]
|
||||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||||
Password = "yourpasswordhere"
|
Password = "yourpasswordhere"
|
||||||
|
|
||||||
|
[Coordinator.EthClient.ForgeBatchGasCost]
|
||||||
|
Fixed = 500000
|
||||||
|
L1UserTx = 8000
|
||||||
|
L1CoordTx = 9000
|
||||||
|
L2Tx = 1
|
||||||
|
|
||||||
[Coordinator.API]
|
[Coordinator.API]
|
||||||
Coordinator = true
|
Coordinator = true
|
||||||
|
|
||||||
[Coordinator.Debug]
|
[Coordinator.Debug]
|
||||||
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
||||||
LightScrypt = true
|
LightScrypt = true
|
||||||
|
# RollupVerifierIndex = 0
|
||||||
|
|||||||
183
cli/node/main.go
183
cli/node/main.go
@@ -11,10 +11,13 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/hermeznetwork/hermez-node/config"
|
"github.com/hermeznetwork/hermez-node/config"
|
||||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/hermez-node/node"
|
"github.com/hermeznetwork/hermez-node/node"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -23,6 +26,7 @@ const (
|
|||||||
flagMode = "mode"
|
flagMode = "mode"
|
||||||
flagSK = "privatekey"
|
flagSK = "privatekey"
|
||||||
flagYes = "yes"
|
flagYes = "yes"
|
||||||
|
flagBlock = "block"
|
||||||
modeSync = "sync"
|
modeSync = "sync"
|
||||||
modeCoord = "coord"
|
modeCoord = "coord"
|
||||||
)
|
)
|
||||||
@@ -87,11 +91,11 @@ func cmdWipeSQL(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
db, err := dbUtils.ConnectSQLDB(
|
db, err := dbUtils.ConnectSQLDB(
|
||||||
cfg.PostgreSQL.Port,
|
cfg.PostgreSQL.PortWrite,
|
||||||
cfg.PostgreSQL.Host,
|
cfg.PostgreSQL.HostWrite,
|
||||||
cfg.PostgreSQL.User,
|
cfg.PostgreSQL.UserWrite,
|
||||||
cfg.PostgreSQL.Password,
|
cfg.PostgreSQL.PasswordWrite,
|
||||||
cfg.PostgreSQL.Name,
|
cfg.PostgreSQL.NameWrite,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
@@ -103,17 +107,7 @@ func cmdWipeSQL(c *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdRun(c *cli.Context) error {
|
func waitSigInt() {
|
||||||
cfg, err := parseCli(c)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
|
||||||
}
|
|
||||||
node, err := node.NewNode(cfg.mode, cfg.node)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
|
|
||||||
}
|
|
||||||
node.Start()
|
|
||||||
|
|
||||||
stopCh := make(chan interface{})
|
stopCh := make(chan interface{})
|
||||||
|
|
||||||
// catch ^C to send the stop signal
|
// catch ^C to send the stop signal
|
||||||
@@ -134,11 +128,101 @@ func cmdRun(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
<-stopCh
|
<-stopCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdRun(c *cli.Context) error {
|
||||||
|
cfg, err := parseCli(c)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
||||||
|
}
|
||||||
|
node, err := node.NewNode(cfg.mode, cfg.node)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
|
||||||
|
}
|
||||||
|
node.Start()
|
||||||
|
waitSigInt()
|
||||||
node.Stop()
|
node.Stop()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cmdServeAPI(c *cli.Context) error {
|
||||||
|
cfg, err := parseCliAPIServer(c)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
||||||
|
}
|
||||||
|
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
|
||||||
|
}
|
||||||
|
srv.Start()
|
||||||
|
waitSigInt()
|
||||||
|
srv.Stop()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdDiscard(c *cli.Context) error {
|
||||||
|
_cfg, err := parseCli(c)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
||||||
|
}
|
||||||
|
cfg := _cfg.node
|
||||||
|
blockNum := c.Int64(flagBlock)
|
||||||
|
log.Infof("Discarding all blocks up to block %v...", blockNum)
|
||||||
|
|
||||||
|
dbWrite, err := dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortWrite,
|
||||||
|
cfg.PostgreSQL.HostWrite,
|
||||||
|
cfg.PostgreSQL.UserWrite,
|
||||||
|
cfg.PostgreSQL.PasswordWrite,
|
||||||
|
cfg.PostgreSQL.NameWrite,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
var dbRead *sqlx.DB
|
||||||
|
if cfg.PostgreSQL.HostRead == "" {
|
||||||
|
dbRead = dbWrite
|
||||||
|
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
|
||||||
|
return tracerr.Wrap(fmt.Errorf(
|
||||||
|
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
dbRead, err = dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortRead,
|
||||||
|
cfg.PostgreSQL.HostRead,
|
||||||
|
cfg.PostgreSQL.UserRead,
|
||||||
|
cfg.PostgreSQL.PasswordRead,
|
||||||
|
cfg.PostgreSQL.NameRead,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, nil)
|
||||||
|
if err := historyDB.Reorg(blockNum); err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
|
||||||
|
}
|
||||||
|
batchNum, err := historyDB.GetLastBatchNum()
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
||||||
|
}
|
||||||
|
l2DB := l2db.NewL2DB(
|
||||||
|
dbRead, dbWrite,
|
||||||
|
cfg.Coordinator.L2DB.SafetyPeriod,
|
||||||
|
cfg.Coordinator.L2DB.MaxTxs,
|
||||||
|
cfg.Coordinator.L2DB.MinFeeUSD,
|
||||||
|
cfg.Coordinator.L2DB.TTL.Duration,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err := l2DB.Reorg(batchNum); err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Config is the configuration of the hermez node execution
|
// Config is the configuration of the hermez node execution
|
||||||
type Config struct {
|
type Config struct {
|
||||||
mode node.Mode
|
mode node.Mode
|
||||||
@@ -160,20 +244,59 @@ func getConfig(c *cli.Context) (*Config, error) {
|
|||||||
var cfg Config
|
var cfg Config
|
||||||
mode := c.String(flagMode)
|
mode := c.String(flagMode)
|
||||||
nodeCfgPath := c.String(flagCfg)
|
nodeCfgPath := c.String(flagCfg)
|
||||||
if nodeCfgPath == "" {
|
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
switch mode {
|
switch mode {
|
||||||
case modeSync:
|
case modeSync:
|
||||||
cfg.mode = node.ModeSynchronizer
|
cfg.mode = node.ModeSynchronizer
|
||||||
cfg.node, err = config.LoadNode(nodeCfgPath)
|
cfg.node, err = config.LoadNode(nodeCfgPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
case modeCoord:
|
case modeCoord:
|
||||||
cfg.mode = node.ModeCoordinator
|
cfg.mode = node.ModeCoordinator
|
||||||
cfg.node, err = config.LoadCoordinator(nodeCfgPath)
|
cfg.node, err = config.LoadNode(nodeCfgPath, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigAPIServer is the configuration of the api server execution
|
||||||
|
type ConfigAPIServer struct {
|
||||||
|
mode node.Mode
|
||||||
|
server *config.APIServer
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
||||||
|
cfg, err := getConfigAPIServer(c)
|
||||||
|
if err != nil {
|
||||||
|
if err := cli.ShowAppHelp(c); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
||||||
|
var cfg ConfigAPIServer
|
||||||
|
mode := c.String(flagMode)
|
||||||
|
nodeCfgPath := c.String(flagCfg)
|
||||||
|
var err error
|
||||||
|
switch mode {
|
||||||
|
case modeSync:
|
||||||
|
cfg.mode = node.ModeSynchronizer
|
||||||
|
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
case modeCoord:
|
||||||
|
cfg.mode = node.ModeCoordinator
|
||||||
|
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -239,6 +362,24 @@ func main() {
|
|||||||
Usage: "Run the hermez-node in the indicated mode",
|
Usage: "Run the hermez-node in the indicated mode",
|
||||||
Action: cmdRun,
|
Action: cmdRun,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "serveapi",
|
||||||
|
Aliases: []string{},
|
||||||
|
Usage: "Serve the API only",
|
||||||
|
Action: cmdServeAPI,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "discard",
|
||||||
|
Aliases: []string{},
|
||||||
|
Usage: "Discard blocks up to a specified block number",
|
||||||
|
Action: cmdDiscard,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.Int64Flag{
|
||||||
|
Name: flagBlock,
|
||||||
|
Usage: "last block number to keep",
|
||||||
|
Required: false,
|
||||||
|
}},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := app.Run(os.Args)
|
err := app.Run(os.Args)
|
||||||
|
|||||||
@@ -263,3 +263,13 @@ type IdxNonce struct {
|
|||||||
Idx Idx `db:"idx"`
|
Idx Idx `db:"idx"`
|
||||||
Nonce Nonce `db:"nonce"`
|
Nonce Nonce `db:"nonce"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AccountUpdate represents an account balance and/or nonce update after a
|
||||||
|
// processed batch
|
||||||
|
type AccountUpdate struct {
|
||||||
|
EthBlockNum int64 `meddler:"eth_block_num"`
|
||||||
|
BatchNum BatchNum `meddler:"batch_num"`
|
||||||
|
Idx Idx `meddler:"idx"`
|
||||||
|
Nonce Nonce `meddler:"nonce"`
|
||||||
|
Balance *big.Int `meddler:"balance,bigint"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,21 +1,30 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
ethMath "github.com/ethereum/go-ethereum/common/math"
|
||||||
ethCrypto "github.com/ethereum/go-ethereum/crypto"
|
ethCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||||
|
ethSigner "github.com/ethereum/go-ethereum/signer/core"
|
||||||
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez
|
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez
|
||||||
// account creation
|
// account creation
|
||||||
const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollup account creation"
|
const AccountCreationAuthMsg = "Account creation"
|
||||||
|
|
||||||
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
|
// EIP712Version is the used version of the EIP-712
|
||||||
const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
|
const EIP712Version = "1"
|
||||||
|
|
||||||
|
// EIP712Provider defines the Provider for the EIP-712
|
||||||
|
const EIP712Provider = "Hermez Network"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// EmptyEthSignature is an ethereum signature of all zeroes
|
||||||
|
EmptyEthSignature = make([]byte, 65)
|
||||||
|
)
|
||||||
|
|
||||||
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for
|
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for
|
||||||
// account creations when necessary
|
// account creations when necessary
|
||||||
@@ -26,27 +35,64 @@ type AccountCreationAuth struct {
|
|||||||
Timestamp time.Time `meddler:"timestamp,utctime"`
|
Timestamp time.Time `meddler:"timestamp,utctime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toHash returns a byte array to be hashed from the AccountCreationAuth, which
|
||||||
|
// follows the EIP-712 encoding
|
||||||
func (a *AccountCreationAuth) toHash(chainID uint16,
|
func (a *AccountCreationAuth) toHash(chainID uint16,
|
||||||
hermezContractAddr ethCommon.Address) []byte {
|
hermezContractAddr ethCommon.Address) ([]byte, error) {
|
||||||
var chainIDBytes [2]byte
|
chainIDFormatted := ethMath.NewHexOrDecimal256(int64(chainID))
|
||||||
binary.BigEndian.PutUint16(chainIDBytes[:], chainID)
|
|
||||||
// [EthPrefix | AccountCreationAuthMsg | compressedBJJ | chainID | hermezContractAddr]
|
|
||||||
var b []byte
|
|
||||||
b = append(b, []byte(AccountCreationAuthMsg)...)
|
|
||||||
b = append(b, SwapEndianness(a.BJJ[:])...) // for js implementation compatibility
|
|
||||||
b = append(b, chainIDBytes[:]...)
|
|
||||||
b = append(b, hermezContractAddr[:]...)
|
|
||||||
|
|
||||||
ethPrefix := EthMsgPrefix + strconv.Itoa(len(b))
|
signerData := ethSigner.TypedData{
|
||||||
return append([]byte(ethPrefix), b...)
|
Types: ethSigner.Types{
|
||||||
|
"EIP712Domain": []ethSigner.Type{
|
||||||
|
{Name: "name", Type: "string"},
|
||||||
|
{Name: "version", Type: "string"},
|
||||||
|
{Name: "chainId", Type: "uint256"},
|
||||||
|
{Name: "verifyingContract", Type: "address"},
|
||||||
|
},
|
||||||
|
"Authorise": []ethSigner.Type{
|
||||||
|
{Name: "Provider", Type: "string"},
|
||||||
|
{Name: "Authorisation", Type: "string"},
|
||||||
|
{Name: "BJJKey", Type: "bytes32"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
PrimaryType: "Authorise",
|
||||||
|
Domain: ethSigner.TypedDataDomain{
|
||||||
|
Name: EIP712Provider,
|
||||||
|
Version: EIP712Version,
|
||||||
|
ChainId: chainIDFormatted,
|
||||||
|
VerifyingContract: hermezContractAddr.Hex(),
|
||||||
|
},
|
||||||
|
Message: ethSigner.TypedDataMessage{
|
||||||
|
"Provider": EIP712Provider,
|
||||||
|
"Authorisation": AccountCreationAuthMsg,
|
||||||
|
"BJJKey": SwapEndianness(a.BJJ[:]),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
domainSeparator, err := signerData.HashStruct("EIP712Domain", signerData.Domain.Map())
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
typedDataHash, err := signerData.HashStruct(signerData.PrimaryType, signerData.Message)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawData := []byte{0x19, 0x01} // "\x19\x01"
|
||||||
|
rawData = append(rawData, domainSeparator...)
|
||||||
|
rawData = append(rawData, typedDataHash...)
|
||||||
|
return rawData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashToSign returns the hash to be signed by the Etherum address to authorize
|
// HashToSign returns the hash to be signed by the Etherum address to authorize
|
||||||
// the account creation
|
// the account creation, which follows the EIP-712 encoding
|
||||||
func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
||||||
hermezContractAddr ethCommon.Address) ([]byte, error) {
|
hermezContractAddr ethCommon.Address) ([]byte, error) {
|
||||||
b := a.toHash(chainID, hermezContractAddr)
|
b, err := a.toHash(chainID, hermezContractAddr)
|
||||||
return ethCrypto.Keccak256Hash(b).Bytes(), nil
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return ethCrypto.Keccak256(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign signs the account creation authorization message using the provided
|
// Sign signs the account creation authorization message using the provided
|
||||||
@@ -54,16 +100,17 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
|||||||
// should do an ethereum signature using the account corresponding to
|
// should do an ethereum signature using the account corresponding to
|
||||||
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in
|
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in
|
||||||
// tests we sign directly using the private key, outside tests we sign using
|
// tests we sign directly using the private key, outside tests we sign using
|
||||||
// the keystore (which never exposes the private key).
|
// the keystore (which never exposes the private key). Sign follows the EIP-712
|
||||||
|
// encoding.
|
||||||
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
|
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
|
||||||
chainID uint16, hermezContractAddr ethCommon.Address) error {
|
chainID uint16, hermezContractAddr ethCommon.Address) error {
|
||||||
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
sig, err := signHash(hash)
|
sig, err := signHash(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
sig[64] += 27
|
sig[64] += 27
|
||||||
a.Signature = sig
|
a.Signature = sig
|
||||||
@@ -72,7 +119,8 @@ func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VerifySignature ensures that the Signature is done with the EthAddr, for the
|
// VerifySignature ensures that the Signature is done with the EthAddr, for the
|
||||||
// chainID and hermezContractAddress passed by parameter
|
// chainID and hermezContractAddress passed by parameter. VerifySignature
|
||||||
|
// follows the EIP-712 encoding.
|
||||||
func (a *AccountCreationAuth) VerifySignature(chainID uint16,
|
func (a *AccountCreationAuth) VerifySignature(chainID uint16,
|
||||||
hermezContractAddr ethCommon.Address) bool {
|
hermezContractAddr ethCommon.Address) bool {
|
||||||
// Calculate hash to be signed
|
// Calculate hash to be signed
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ func TestAccountCreationAuthSignVerify(t *testing.T) {
|
|||||||
// Hash and sign manually and compare the generated signature
|
// Hash and sign manually and compare the generated signature
|
||||||
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e",
|
assert.Equal(t, "9414667457e658dd31949b82996b75c65a055512244c3bbfd22ff56add02ba65",
|
||||||
hex.EncodeToString(hash))
|
hex.EncodeToString(hash))
|
||||||
sig, err := ethCrypto.Sign(hash, ethSk)
|
sig, err := ethCrypto.Sign(hash, ethSk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -75,9 +75,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
|
|||||||
pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7",
|
pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7",
|
||||||
chainID: uint16(4),
|
chainID: uint16(4),
|
||||||
hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
|
hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
|
||||||
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700047e5f4552091a69125d5dfcb7b8c2659029395bdf",
|
toHashExpected: "190189658bba487e11c7da602676ee32bc90b77d3f32a305b147e4f3c3b35f19672e5d84ccc38d0ab245c469b719549d837113465c2abf9972c49403ca6fd10ed3dc",
|
||||||
hashExpected: "39afea52d843a4de905b6b5ebb0ee8c678141f711d96d9b429c4aec10ef9911f",
|
hashExpected: "c56eba41e511df100c804c5c09288f35887efea4f033be956481af335df3bea2",
|
||||||
sigExpected: "73d10d6ecf06ee8a5f60ac90f06b78bef9c650f414ba3ac73e176dc32e896159147457e9c86f0b4bd60fdaf2c0b2aec890a7df993d69a4805e242a6b845ebf231c",
|
sigExpected: "dbedcc5ce02db8f48afbdb2feba9a3a31848eaa8fca5f312ce37b01db45d2199208335330d4445bd2f51d1db68dbc0d0bf3585c4a07504b4efbe46a69eaae5a21b",
|
||||||
}
|
}
|
||||||
tv1 := testVector{
|
tv1 := testVector{
|
||||||
ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
|
ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
@@ -85,9 +85,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
|
|||||||
pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d",
|
pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d",
|
||||||
chainID: uint16(0),
|
chainID: uint16(0),
|
||||||
hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
|
hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
|
||||||
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00002b5ad5c4795c026514f8317c7a215e218dccd6cf",
|
toHashExpected: "1901dafbc253dedf90d6421dc6e25d5d9efc6985133cb2a8d363d0a081a0e3eddddc65f603a88de36aaeabd3b4cf586538c7f3fd50c94780530a3707c8c14ad9fd11",
|
||||||
hashExpected: "89a3895993a4736232212e59566294feb3da227af44375daf3307dcad5451d5d",
|
hashExpected: "deb9afa479282cf27b442ce8ba86b19448aa87eacef691521a33db5d0feb9959",
|
||||||
sigExpected: "bb4156156c705494ad5f99030342c64657e51e2994750f92125717c40bf56ad632044aa6bd00979feea92c417b552401e65fe5f531f15010d9d1c278da8be1df1b",
|
sigExpected: "6a0da90ba2d2b1be679a28ebe54ee03082d44b836087391cd7d2607c1e4dafe04476e6e88dccb8707c68312512f16c947524b35c80f26c642d23953e9bb84c701c",
|
||||||
}
|
}
|
||||||
tv2 := testVector{
|
tv2 := testVector{
|
||||||
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
|
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
|
||||||
@@ -95,9 +95,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
|
|||||||
pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52",
|
pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52",
|
||||||
chainID: uint16(31337), // =0x7a69
|
chainID: uint16(31337), // =0x7a69
|
||||||
hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c",
|
hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c",
|
||||||
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b527a69f4e77e5da47ac3125140c470c71cbca77b5c638c",
|
toHashExpected: "190167617949b934d7e01add4009cd3d47415a26727b7d6288e5dce33fb3721d5a1a9ce511b19b694c9aaf8183f4987ed752f24884c54c003d11daa2e98c7547a79e",
|
||||||
hashExpected: "4f6ead01278ba4597d4720e37482f585a713497cea994a95209f4c57a963b4a7",
|
hashExpected: "157b570c597e615b8356ce008ac39f43bc9b6d50080bc07d968031b9378acbbb",
|
||||||
sigExpected: "43b5818802a137a72a190c1d8d767ca507f7a4804b1b69b5e055abf31f4f2b476c80bb1ba63260d95610f6f831420d32130e7f22fec5d76e16644ddfcedd0d441c",
|
sigExpected: "a0766181102428b5672e523dc4b905c10ddf025c10dbd0b3534ef864632a14652737610041c670b302fc7dca28edd5d6eac42b72d69ce58da8ce21287b244e381b",
|
||||||
}
|
}
|
||||||
tvs = append(tvs, tv0)
|
tvs = append(tvs, tv0)
|
||||||
tvs = append(tvs, tv1)
|
tvs = append(tvs, tv1)
|
||||||
@@ -122,10 +122,10 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
|
|||||||
BJJ: pkComp,
|
BJJ: pkComp,
|
||||||
}
|
}
|
||||||
|
|
||||||
toHash := a.toHash(chainID, hermezContractAddr)
|
toHash, err := a.toHash(chainID, hermezContractAddr)
|
||||||
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tv.toHashExpected,
|
assert.Equal(t, tv.toHashExpected,
|
||||||
hex.EncodeToString(toHash))
|
hex.EncodeToString(toHash))
|
||||||
assert.Equal(t, 120+len(EthMsgPrefix)+len([]byte("120")), len(toHash))
|
|
||||||
|
|
||||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -27,6 +27,24 @@ type Batch struct {
|
|||||||
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
|
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewEmptyBatch creates a new empty batch
|
||||||
|
func NewEmptyBatch() *Batch {
|
||||||
|
return &Batch{
|
||||||
|
BatchNum: 0,
|
||||||
|
EthBlockNum: 0,
|
||||||
|
ForgerAddr: ethCommon.Address{},
|
||||||
|
CollectedFees: make(map[TokenID]*big.Int),
|
||||||
|
FeeIdxsCoordinator: make([]Idx, 0),
|
||||||
|
StateRoot: big.NewInt(0),
|
||||||
|
NumAccounts: 0,
|
||||||
|
LastIdx: 0,
|
||||||
|
ExitRoot: big.NewInt(0),
|
||||||
|
ForgeL1TxsNum: nil,
|
||||||
|
SlotNum: 0,
|
||||||
|
TotalFeesUSD: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// BatchNum identifies a batch
|
// BatchNum identifies a batch
|
||||||
type BatchNum int64
|
type BatchNum int64
|
||||||
|
|
||||||
@@ -59,6 +77,7 @@ type BatchData struct {
|
|||||||
L1CoordinatorTxs []L1Tx
|
L1CoordinatorTxs []L1Tx
|
||||||
L2Txs []L2Tx
|
L2Txs []L2Tx
|
||||||
CreatedAccounts []Account
|
CreatedAccounts []Account
|
||||||
|
UpdatedAccounts []AccountUpdate
|
||||||
ExitTree []ExitInfo
|
ExitTree []ExitInfo
|
||||||
Batch Batch
|
Batch Batch
|
||||||
}
|
}
|
||||||
|
|||||||
33
common/eth.go
Normal file
33
common/eth.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
// SCVariables joins all the smart contract variables in a single struct
|
||||||
|
type SCVariables struct {
|
||||||
|
Rollup RollupVariables `validate:"required"`
|
||||||
|
Auction AuctionVariables `validate:"required"`
|
||||||
|
WDelayer WDelayerVariables `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
|
||||||
|
// original SCVariables
|
||||||
|
func (v *SCVariables) AsPtr() *SCVariablesPtr {
|
||||||
|
return &SCVariablesPtr{
|
||||||
|
Rollup: &v.Rollup,
|
||||||
|
Auction: &v.Auction,
|
||||||
|
WDelayer: &v.WDelayer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SCVariablesPtr joins all the smart contract variables as pointers in a single
|
||||||
|
// struct
|
||||||
|
type SCVariablesPtr struct {
|
||||||
|
Rollup *RollupVariables `validate:"required"`
|
||||||
|
Auction *AuctionVariables `validate:"required"`
|
||||||
|
WDelayer *WDelayerVariables `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SCConsts joins all the smart contract constants in a single struct
|
||||||
|
type SCConsts struct {
|
||||||
|
Rollup RollupConstants
|
||||||
|
Auction AuctionConstants
|
||||||
|
WDelayer WDelayerConstants
|
||||||
|
}
|
||||||
@@ -33,7 +33,8 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
|
|||||||
if blockNum >= c.GenesisBlockNum {
|
if blockNum >= c.GenesisBlockNum {
|
||||||
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||||
}
|
}
|
||||||
return -1
|
// This result will be negative
|
||||||
|
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SlotBlocks returns the first and the last block numbers included in that slot
|
// SlotBlocks returns the first and the last block numbers included in that slot
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ const (
|
|||||||
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
|
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
|
||||||
RollupConstL1CoordinatorTotalBytes = 101
|
RollupConstL1CoordinatorTotalBytes = 101
|
||||||
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
|
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
|
||||||
// [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
|
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
|
||||||
RollupConstL1UserTotalBytes = 72
|
RollupConstL1UserTotalBytes = 78
|
||||||
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
|
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
|
||||||
RollupConstMaxL1UserTx = 128
|
RollupConstMaxL1UserTx = 128
|
||||||
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch
|
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch
|
||||||
|
|||||||
@@ -1,131 +0,0 @@
|
|||||||
// Package common Float16 provides methods to work with Hermez custom half float
|
|
||||||
// precision, 16 bits, codification internally called Float16 has been adopted
|
|
||||||
// to encode large integers. This is done in order to save bits when L2
|
|
||||||
// transactions are published.
|
|
||||||
//nolint:gomnd
|
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/hermeznetwork/tracerr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
|
|
||||||
ErrRoundingLoss = errors.New("input value causes rounding loss")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Float16 represents a float in a 16 bit format
|
|
||||||
type Float16 uint16
|
|
||||||
|
|
||||||
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
|
|
||||||
func (f16 Float16) Bytes() []byte {
|
|
||||||
var b [2]byte
|
|
||||||
binary.BigEndian.PutUint16(b[:], uint16(f16))
|
|
||||||
return b[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
|
|
||||||
func Float16FromBytes(b []byte) *Float16 {
|
|
||||||
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
|
|
||||||
return &f16
|
|
||||||
}
|
|
||||||
|
|
||||||
// BigInt converts the Float16 to a *big.Int integer
|
|
||||||
func (f16 *Float16) BigInt() *big.Int {
|
|
||||||
fl := int64(*f16)
|
|
||||||
|
|
||||||
m := big.NewInt(fl & 0x3FF)
|
|
||||||
e := big.NewInt(fl >> 11)
|
|
||||||
e5 := (fl >> 10) & 0x01
|
|
||||||
|
|
||||||
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
|
|
||||||
res := m.Mul(m, exp)
|
|
||||||
|
|
||||||
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
|
|
||||||
res.Add(res, exp.Div(exp, big.NewInt(2)))
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// floorFix2Float converts a fix to a float, always rounding down
|
|
||||||
func floorFix2Float(_f *big.Int) Float16 {
|
|
||||||
zero := big.NewInt(0)
|
|
||||||
ten := big.NewInt(10)
|
|
||||||
e := int64(0)
|
|
||||||
|
|
||||||
m := big.NewInt(0)
|
|
||||||
m.Set(_f)
|
|
||||||
|
|
||||||
if m.Cmp(zero) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
s := big.NewInt(0).Rsh(m, 10)
|
|
||||||
|
|
||||||
for s.Cmp(zero) != 0 {
|
|
||||||
m.Div(m, ten)
|
|
||||||
s.Rsh(m, 10)
|
|
||||||
e++
|
|
||||||
}
|
|
||||||
|
|
||||||
return Float16(m.Int64() | e<<11)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
|
|
||||||
// case of loss during the encoding.
|
|
||||||
func NewFloat16(f *big.Int) (Float16, error) {
|
|
||||||
fl1 := floorFix2Float(f)
|
|
||||||
fi1 := fl1.BigInt()
|
|
||||||
fl2 := fl1 | 0x400
|
|
||||||
fi2 := fl2.BigInt()
|
|
||||||
|
|
||||||
m3 := (fl1 & 0x3FF) + 1
|
|
||||||
e3 := fl1 >> 11
|
|
||||||
|
|
||||||
if m3&0x400 == 0 {
|
|
||||||
m3 = 0x66
|
|
||||||
e3++
|
|
||||||
}
|
|
||||||
|
|
||||||
fl3 := m3 + e3<<11
|
|
||||||
fi3 := fl3.BigInt()
|
|
||||||
|
|
||||||
res := fl1
|
|
||||||
|
|
||||||
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
|
|
||||||
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
|
|
||||||
|
|
||||||
if d.Cmp(d2) == 1 {
|
|
||||||
res = fl2
|
|
||||||
d = d2
|
|
||||||
}
|
|
||||||
|
|
||||||
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
|
|
||||||
|
|
||||||
if d.Cmp(d3) == 1 {
|
|
||||||
res = fl3
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do rounding check
|
|
||||||
if res.BigInt().Cmp(f) == 0 {
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
return res, tracerr.Wrap(ErrRoundingLoss)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
|
|
||||||
// case of loss during the encoding.
|
|
||||||
func NewFloat16Floor(f *big.Int) Float16 {
|
|
||||||
fl1 := floorFix2Float(f)
|
|
||||||
fl2 := fl1 | 0x400
|
|
||||||
fi2 := fl2.BigInt()
|
|
||||||
|
|
||||||
if fi2.Cmp(f) < 1 {
|
|
||||||
return fl2
|
|
||||||
}
|
|
||||||
return fl1
|
|
||||||
}
|
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hermeznetwork/tracerr"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConversions(t *testing.T) {
|
|
||||||
testVector := map[Float16]string{
|
|
||||||
0x307B: "123000000",
|
|
||||||
0x1DC6: "454500",
|
|
||||||
0xFFFF: "10235000000000000000000000000000000",
|
|
||||||
0x0000: "0",
|
|
||||||
0x0400: "0",
|
|
||||||
0x0001: "1",
|
|
||||||
0x0401: "1",
|
|
||||||
0x0800: "0",
|
|
||||||
0x0c00: "5",
|
|
||||||
0x0801: "10",
|
|
||||||
0x0c01: "15",
|
|
||||||
}
|
|
||||||
|
|
||||||
for test := range testVector {
|
|
||||||
fix := test.BigInt()
|
|
||||||
|
|
||||||
assert.Equal(t, fix.String(), testVector[test])
|
|
||||||
|
|
||||||
bi := big.NewInt(0)
|
|
||||||
bi.SetString(testVector[test], 10)
|
|
||||||
|
|
||||||
fl, err := NewFloat16(bi)
|
|
||||||
assert.Equal(t, nil, err)
|
|
||||||
|
|
||||||
fx2 := fl.BigInt()
|
|
||||||
assert.Equal(t, fx2.String(), testVector[test])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFloorFix2Float(t *testing.T) {
|
|
||||||
testVector := map[string]Float16{
|
|
||||||
"87999990000000000": 0x776f,
|
|
||||||
"87950000000000001": 0x776f,
|
|
||||||
"87950000000000000": 0x776f,
|
|
||||||
"87949999999999999": 0x736f,
|
|
||||||
}
|
|
||||||
|
|
||||||
for test := range testVector {
|
|
||||||
bi := big.NewInt(0)
|
|
||||||
bi.SetString(test, 10)
|
|
||||||
|
|
||||||
testFloat := NewFloat16Floor(bi)
|
|
||||||
|
|
||||||
assert.Equal(t, testFloat, testVector[test])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConversionLosses(t *testing.T) {
|
|
||||||
a := big.NewInt(1000)
|
|
||||||
b, err := NewFloat16(a)
|
|
||||||
assert.Equal(t, nil, err)
|
|
||||||
c := b.BigInt()
|
|
||||||
assert.Equal(t, c, a)
|
|
||||||
|
|
||||||
a = big.NewInt(1024)
|
|
||||||
b, err = NewFloat16(a)
|
|
||||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
|
||||||
c = b.BigInt()
|
|
||||||
assert.NotEqual(t, c, a)
|
|
||||||
|
|
||||||
a = big.NewInt(32767)
|
|
||||||
b, err = NewFloat16(a)
|
|
||||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
|
||||||
c = b.BigInt()
|
|
||||||
assert.NotEqual(t, c, a)
|
|
||||||
|
|
||||||
a = big.NewInt(32768)
|
|
||||||
b, err = NewFloat16(a)
|
|
||||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
|
||||||
c = b.BigInt()
|
|
||||||
assert.NotEqual(t, c, a)
|
|
||||||
|
|
||||||
a = big.NewInt(65536000)
|
|
||||||
b, err = NewFloat16(a)
|
|
||||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
|
||||||
c = b.BigInt()
|
|
||||||
assert.NotEqual(t, c, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFloat16(b *testing.B) {
|
|
||||||
newBigInt := func(s string) *big.Int {
|
|
||||||
bigInt, ok := new(big.Int).SetString(s, 10)
|
|
||||||
if !ok {
|
|
||||||
panic("Bad big int")
|
|
||||||
}
|
|
||||||
return bigInt
|
|
||||||
}
|
|
||||||
type pair struct {
|
|
||||||
Float16 Float16
|
|
||||||
BigInt *big.Int
|
|
||||||
}
|
|
||||||
testVector := []pair{
|
|
||||||
{0x307B, newBigInt("123000000")},
|
|
||||||
{0x1DC6, newBigInt("454500")},
|
|
||||||
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
|
|
||||||
{0x0000, newBigInt("0")},
|
|
||||||
{0x0400, newBigInt("0")},
|
|
||||||
{0x0001, newBigInt("1")},
|
|
||||||
{0x0401, newBigInt("1")},
|
|
||||||
{0x0800, newBigInt("0")},
|
|
||||||
{0x0c00, newBigInt("5")},
|
|
||||||
{0x0801, newBigInt("10")},
|
|
||||||
{0x0c01, newBigInt("15")},
|
|
||||||
}
|
|
||||||
b.Run("floorFix2Float()", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
b.Run("NewFloat16()", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
b.Run("Float16.BigInt()", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
testVector[i%len(testVector)].Float16.BigInt()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
103
common/float40.go
Normal file
103
common/float40.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
// Package common Float40 provides methods to work with Hermez custom half
|
||||||
|
// float precision, 40 bits, codification internally called Float40 has been
|
||||||
|
// adopted to encode large integers. This is done in order to save bits when L2
|
||||||
|
// transactions are published.
|
||||||
|
//nolint:gomnd
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/hermeznetwork/tracerr"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maxFloat40Value is the maximum value that the Float40 can have
|
||||||
|
// (40 bits: maxFloat40Value=2**40-1)
|
||||||
|
maxFloat40Value = 0xffffffffff
|
||||||
|
// Float40BytesLength defines the length of the Float40 values
|
||||||
|
// represented as byte arrays
|
||||||
|
Float40BytesLength = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrFloat40Overflow is used when a given Float40 overflows the
|
||||||
|
// maximum capacity of the Float40 (2**40-1)
|
||||||
|
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
|
||||||
|
// ErrFloat40E31 is used when the e > 31 when trying to convert a
|
||||||
|
// *big.Int to Float40
|
||||||
|
ErrFloat40E31 = errors.New("Float40 error, e > 31")
|
||||||
|
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
|
||||||
|
// not be represented as Float40 due not enough precission
|
||||||
|
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Float40 represents a float in a 64 bit format
|
||||||
|
type Float40 uint64
|
||||||
|
|
||||||
|
// Bytes return a byte array of length 5 with the Float40 value encoded in
|
||||||
|
// BigEndian
|
||||||
|
func (f40 Float40) Bytes() ([]byte, error) {
|
||||||
|
if f40 > maxFloat40Value {
|
||||||
|
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
|
||||||
|
}
|
||||||
|
|
||||||
|
var f40Bytes [8]byte
|
||||||
|
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
|
||||||
|
var b [5]byte
|
||||||
|
copy(b[:], f40Bytes[3:])
|
||||||
|
return b[:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
|
||||||
|
// representation.
|
||||||
|
func Float40FromBytes(b []byte) Float40 {
|
||||||
|
var f40Bytes [8]byte
|
||||||
|
copy(f40Bytes[3:], b[:])
|
||||||
|
f40 := binary.BigEndian.Uint64(f40Bytes[:])
|
||||||
|
return Float40(f40)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
|
||||||
|
// [ e | m ]
|
||||||
|
// [ 5 bits | 35 bits ]
|
||||||
|
func (f40 Float40) BigInt() (*big.Int, error) {
|
||||||
|
// take the 5 used bytes (FF * 5)
|
||||||
|
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
|
||||||
|
f40Bytes, err := f40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
|
||||||
|
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
|
||||||
|
|
||||||
|
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
|
||||||
|
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
|
||||||
|
// of loss during the encoding.
|
||||||
|
func NewFloat40(f *big.Int) (Float40, error) {
|
||||||
|
m := f
|
||||||
|
e := big.NewInt(0)
|
||||||
|
zero := big.NewInt(0)
|
||||||
|
ten := big.NewInt(10)
|
||||||
|
thres := big.NewInt(0x08_00_00_00_00)
|
||||||
|
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
|
||||||
|
m = new(big.Int).Div(m, ten)
|
||||||
|
e = new(big.Int).Add(e, big.NewInt(1))
|
||||||
|
}
|
||||||
|
if e.Int64() > 31 {
|
||||||
|
return 0, ErrFloat40E31
|
||||||
|
}
|
||||||
|
if m.Cmp(thres) >= 0 {
|
||||||
|
return 0, ErrFloat40NotEnoughPrecission
|
||||||
|
}
|
||||||
|
r := new(big.Int).Add(m,
|
||||||
|
new(big.Int).Mul(e, thres))
|
||||||
|
return Float40(r.Uint64()), nil
|
||||||
|
}
|
||||||
95
common/float40_test.go
Normal file
95
common/float40_test.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConversionsFloat40(t *testing.T) {
|
||||||
|
testVector := map[Float40]string{
|
||||||
|
6*0x800000000 + 123: "123000000",
|
||||||
|
2*0x800000000 + 4545: "454500",
|
||||||
|
30*0x800000000 + 10235: "10235000000000000000000000000000000",
|
||||||
|
0x000000000: "0",
|
||||||
|
0x800000000: "0",
|
||||||
|
0x0001: "1",
|
||||||
|
0x0401: "1025",
|
||||||
|
0x800000000 + 1: "10",
|
||||||
|
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
|
||||||
|
}
|
||||||
|
|
||||||
|
for test := range testVector {
|
||||||
|
fix, err := test.BigInt()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, fix.String(), testVector[test])
|
||||||
|
|
||||||
|
bi, ok := new(big.Int).SetString(testVector[test], 10)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
fl, err := NewFloat40(bi)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
fx2, err := fl.BigInt()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, fx2.String(), testVector[test])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpectError(t *testing.T) {
|
||||||
|
testVector := map[string]error{
|
||||||
|
"9922334455000000000000000000000000000000": nil,
|
||||||
|
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
|
||||||
|
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
|
||||||
|
"42949672950000000000000000000000000000000": nil,
|
||||||
|
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
|
||||||
|
"992233445500000000000000000000000000000000": ErrFloat40E31,
|
||||||
|
"343597383670000000000000000000000000000000": nil,
|
||||||
|
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
|
||||||
|
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
|
||||||
|
"343597383700000000000000000000000000000000": ErrFloat40E31,
|
||||||
|
}
|
||||||
|
for test := range testVector {
|
||||||
|
bi, ok := new(big.Int).SetString(test, 10)
|
||||||
|
require.True(t, ok)
|
||||||
|
_, err := NewFloat40(bi)
|
||||||
|
assert.Equal(t, testVector[test], err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFloat40(b *testing.B) {
|
||||||
|
newBigInt := func(s string) *big.Int {
|
||||||
|
bigInt, ok := new(big.Int).SetString(s, 10)
|
||||||
|
if !ok {
|
||||||
|
panic("Can not convert string to *big.Int")
|
||||||
|
}
|
||||||
|
return bigInt
|
||||||
|
}
|
||||||
|
type pair struct {
|
||||||
|
Float40 Float40
|
||||||
|
BigInt *big.Int
|
||||||
|
}
|
||||||
|
testVector := []pair{
|
||||||
|
{6*0x800000000 + 123, newBigInt("123000000")},
|
||||||
|
{2*0x800000000 + 4545, newBigInt("454500")},
|
||||||
|
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
|
||||||
|
{0x000000000, newBigInt("0")},
|
||||||
|
{0x800000000, newBigInt("0")},
|
||||||
|
{0x0001, newBigInt("1")},
|
||||||
|
{0x0401, newBigInt("1025")},
|
||||||
|
{0x800000000 + 1, newBigInt("10")},
|
||||||
|
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
|
||||||
|
}
|
||||||
|
b.Run("NewFloat40()", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
b.Run("Float40.BigInt()", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, _ = testVector[i%len(testVector)].Float40.BigInt()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
118
common/l1tx.go
118
common/l1tx.go
@@ -11,18 +11,11 @@ import (
|
|||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
|
|
||||||
L1UserTxBytesLen = 72
|
|
||||||
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
|
|
||||||
L1CoordinatorTxBytesLen = 101
|
|
||||||
)
|
|
||||||
|
|
||||||
// L1Tx is a struct that represents a L1 tx
|
// L1Tx is a struct that represents a L1 tx
|
||||||
type L1Tx struct {
|
type L1Tx struct {
|
||||||
// Stored in DB: mandatory fileds
|
// Stored in DB: mandatory fileds
|
||||||
|
|
||||||
// TxID (12 bytes) for L1Tx is:
|
// TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of:
|
||||||
// bytes: | 1 | 8 | 2 | 1 |
|
// bytes: | 1 | 8 | 2 | 1 |
|
||||||
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
|
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
|
||||||
// where type:
|
// where type:
|
||||||
@@ -179,45 +172,38 @@ func (tx L1Tx) Tx() Tx {
|
|||||||
// [ 8 bits ] empty (userFee) // 1 byte
|
// [ 8 bits ] empty (userFee) // 1 byte
|
||||||
// [ 40 bits ] empty (nonce) // 5 bytes
|
// [ 40 bits ] empty (nonce) // 5 bytes
|
||||||
// [ 32 bits ] tokenID // 4 bytes
|
// [ 32 bits ] tokenID // 4 bytes
|
||||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
|
||||||
// [ 48 bits ] toIdx // 6 bytes
|
// [ 48 bits ] toIdx // 6 bytes
|
||||||
// [ 48 bits ] fromIdx // 6 bytes
|
// [ 48 bits ] fromIdx // 6 bytes
|
||||||
// [ 16 bits ] chainId // 2 bytes
|
// [ 16 bits ] chainId // 2 bytes
|
||||||
// [ 32 bits ] empty (signatureConstant) // 4 bytes
|
// [ 32 bits ] empty (signatureConstant) // 4 bytes
|
||||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
|
||||||
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
var b [29]byte
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var b [31]byte
|
|
||||||
// b[0:7] empty: no ToBJJSign, no fee, no nonce
|
// b[0:7] empty: no ToBJJSign, no fee, no nonce
|
||||||
copy(b[7:11], tx.TokenID.Bytes())
|
copy(b[7:11], tx.TokenID.Bytes())
|
||||||
copy(b[11:13], amountFloat16.Bytes())
|
|
||||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[13:19], toIdxBytes[:])
|
copy(b[11:17], toIdxBytes[:])
|
||||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[19:25], fromIdxBytes[:])
|
copy(b[17:23], fromIdxBytes[:])
|
||||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||||
copy(b[27:31], SignatureConstantBytes[:])
|
copy(b[25:29], SignatureConstantBytes[:])
|
||||||
|
|
||||||
bi := new(big.Int).SetBytes(b[:])
|
bi := new(big.Int).SetBytes(b[:])
|
||||||
return bi, nil
|
return bi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
|
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
|
||||||
// [ fromIdx | toIdx | amountFloat16 | Fee ]
|
// [ fromIdx | toIdx | amountFloat40 | Fee ]
|
||||||
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||||
idxLen := nLevels / 8 //nolint:gomnd
|
idxLen := nLevels / 8 //nolint:gomnd
|
||||||
|
|
||||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
|
||||||
|
|
||||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -231,13 +217,17 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
|||||||
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||||
|
|
||||||
if tx.EffectiveAmount != nil {
|
if tx.EffectiveAmount != nil {
|
||||||
amountFloat16, err := NewFloat16(tx.EffectiveAmount)
|
amountFloat40, err := NewFloat40(tx.EffectiveAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// fee = 0 (as is L1Tx) b[10:11]
|
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
|
||||||
|
}
|
||||||
|
// fee = 0 (as is L1Tx)
|
||||||
return b[:], nil
|
return b[:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,7 +237,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
|||||||
|
|
||||||
fromIdxBytes := b[0:idxLen]
|
fromIdxBytes := b[0:idxLen]
|
||||||
toIdxBytes := b[idxLen : idxLen*2]
|
toIdxBytes := b[idxLen : idxLen*2]
|
||||||
amountBytes := b[idxLen*2 : idxLen*2+2]
|
amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength]
|
||||||
|
|
||||||
l1tx := L1Tx{}
|
l1tx := L1Tx{}
|
||||||
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
|
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
|
||||||
@@ -260,8 +250,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
l1tx.ToIdx = toIdx
|
l1tx.ToIdx = toIdx
|
||||||
l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
|
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
|
||||||
return &l1tx, nil
|
return &l1tx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// BytesGeneric returns the generic representation of a L1Tx. This method is
|
// BytesGeneric returns the generic representation of a L1Tx. This method is
|
||||||
@@ -269,7 +259,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
|||||||
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
|
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
|
||||||
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
|
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
|
||||||
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
||||||
var b [L1UserTxBytesLen]byte
|
var b [RollupConstL1UserTotalBytes]byte
|
||||||
copy(b[0:20], tx.FromEthAddr.Bytes())
|
copy(b[0:20], tx.FromEthAddr.Bytes())
|
||||||
if tx.FromBJJ != EmptyBJJComp {
|
if tx.FromBJJ != EmptyBJJComp {
|
||||||
pkCompL := tx.FromBJJ
|
pkCompL := tx.FromBJJ
|
||||||
@@ -281,22 +271,33 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[52:58], fromIdxBytes[:])
|
copy(b[52:58], fromIdxBytes[:])
|
||||||
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
|
|
||||||
|
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[58:60], depositAmountFloat16.Bytes())
|
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes()
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[60:62], amountFloat16.Bytes())
|
copy(b[58:63], depositAmountFloat40Bytes)
|
||||||
copy(b[62:66], tx.TokenID.Bytes())
|
|
||||||
|
amountFloat40, err := NewFloat40(tx.Amount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
copy(b[63:68], amountFloat40Bytes)
|
||||||
|
|
||||||
|
copy(b[68:72], tx.TokenID.Bytes())
|
||||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[66:72], toIdxBytes[:])
|
copy(b[72:78], toIdxBytes[:])
|
||||||
return b[:], nil
|
return b[:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,7 +314,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
|
|||||||
if tx.UserOrigin {
|
if tx.UserOrigin {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
|
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
|
||||||
}
|
}
|
||||||
var b [L1CoordinatorTxBytesLen]byte
|
var b [RollupConstL1CoordinatorTotalBytes]byte
|
||||||
v := compressedSignatureBytes[64]
|
v := compressedSignatureBytes[64]
|
||||||
s := compressedSignatureBytes[32:64]
|
s := compressedSignatureBytes[32:64]
|
||||||
r := compressedSignatureBytes[0:32]
|
r := compressedSignatureBytes[0:32]
|
||||||
@@ -329,7 +330,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
|
|||||||
|
|
||||||
// L1UserTxFromBytes decodes a L1Tx from []byte
|
// L1UserTxFromBytes decodes a L1Tx from []byte
|
||||||
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
||||||
if len(b) != L1UserTxBytesLen {
|
if len(b) != RollupConstL1UserTotalBytes {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
|
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,13 +348,19 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
tx.FromIdx = fromIdx
|
tx.FromIdx = fromIdx
|
||||||
tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
|
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt()
|
||||||
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
|
|
||||||
tx.TokenID, err = TokenIDFromBytes(b[62:66])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
tx.ToIdx, err = IdxFromBytes(b[66:72])
|
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
tx.TokenID, err = TokenIDFromBytes(b[68:72])
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
tx.ToIdx, err = IdxFromBytes(b[72:78])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -361,19 +368,12 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
|||||||
return tx, nil
|
return tx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func signHash(data []byte) []byte {
|
|
||||||
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
|
|
||||||
return ethCrypto.Keccak256([]byte(msg))
|
|
||||||
}
|
|
||||||
|
|
||||||
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
|
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
|
||||||
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
|
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
|
||||||
if len(b) != L1CoordinatorTxBytesLen {
|
if len(b) != RollupConstL1CoordinatorTotalBytes {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
|
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesMessage := []byte("I authorize this babyjubjub key for hermez rollup account creation")
|
|
||||||
|
|
||||||
tx := &L1Tx{
|
tx := &L1Tx{
|
||||||
UserOrigin: false,
|
UserOrigin: false,
|
||||||
}
|
}
|
||||||
@@ -394,18 +394,20 @@ func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommo
|
|||||||
// L1CoordinatorTX ETH
|
// L1CoordinatorTX ETH
|
||||||
// Ethereum adds 27 to v
|
// Ethereum adds 27 to v
|
||||||
v = b[0] - byte(27) //nolint:gomnd
|
v = b[0] - byte(27) //nolint:gomnd
|
||||||
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
|
|
||||||
var data []byte
|
|
||||||
data = append(data, bytesMessage...)
|
|
||||||
data = append(data, pkCompB...)
|
|
||||||
data = append(data, chainIDBytes[:]...)
|
|
||||||
data = append(data, hermezAddress.Bytes()...)
|
|
||||||
var signature []byte
|
var signature []byte
|
||||||
signature = append(signature, r[:]...)
|
signature = append(signature, r[:]...)
|
||||||
signature = append(signature, s[:]...)
|
signature = append(signature, s[:]...)
|
||||||
signature = append(signature, v)
|
signature = append(signature, v)
|
||||||
hash := signHash(data)
|
|
||||||
pubKeyBytes, err := ethCrypto.Ecrecover(hash, signature)
|
accCreationAuth := AccountCreationAuth{
|
||||||
|
BJJ: tx.FromBJJ,
|
||||||
|
}
|
||||||
|
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKeyBytes, err := ethCrypto.Ecrecover(h, signature)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,64 +50,110 @@ func TestNewL1CoordinatorTx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestL1TxCompressedData(t *testing.T) {
|
func TestL1TxCompressedData(t *testing.T) {
|
||||||
|
// test vectors values generated from javascript implementation (using
|
||||||
|
// PoolL2Tx values)
|
||||||
|
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||||
|
require.True(t, ok)
|
||||||
tx := L1Tx{
|
tx := L1Tx{
|
||||||
FromIdx: 2,
|
FromIdx: (1 << 48) - 1,
|
||||||
ToIdx: 3,
|
ToIdx: (1 << 48) - 1,
|
||||||
Amount: big.NewInt(4),
|
Amount: amount,
|
||||||
TokenID: 5,
|
TokenID: (1 << 32) - 1,
|
||||||
}
|
}
|
||||||
chainID := uint16(0)
|
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
|
||||||
txCompressedData, err := tx.TxCompressedData(chainID)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
// test vector value generated from javascript implementation
|
tx = L1Tx{
|
||||||
expectedStr := "7307597389635308713748674793997299267459594577423"
|
FromIdx: 0,
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
ToIdx: 0,
|
||||||
assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
Amount: big.NewInt(0),
|
||||||
|
TokenID: 0,
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedStr = "c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
|
amount, ok = new(big.Int).SetString("63000000000000000", 10)
|
||||||
|
require.True(t, ok)
|
||||||
|
tx = L1Tx{
|
||||||
|
FromIdx: 324,
|
||||||
|
ToIdx: 256,
|
||||||
|
Amount: amount,
|
||||||
|
TokenID: 123,
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(1))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedStr = "7b0000000001000000000001440001c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
|
tx = L1Tx{
|
||||||
|
FromIdx: 1,
|
||||||
|
ToIdx: 2,
|
||||||
|
TokenID: 3,
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedStr = "030000000000020000000000010000c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBytesDataAvailability(t *testing.T) {
|
func TestBytesDataAvailability(t *testing.T) {
|
||||||
|
// test vectors values generated from javascript implementation
|
||||||
|
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||||
|
require.True(t, ok)
|
||||||
tx := L1Tx{
|
tx := L1Tx{
|
||||||
FromIdx: 2,
|
ToIdx: (1 << 16) - 1,
|
||||||
ToIdx: 3,
|
FromIdx: (1 << 16) - 1,
|
||||||
Amount: big.NewInt(4),
|
EffectiveAmount: amount,
|
||||||
TokenID: 5,
|
|
||||||
}
|
}
|
||||||
txCompressedData, err := tx.BytesDataAvailability(32)
|
txCompressedData, err := tx.BytesDataAvailability(16)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
|
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData))
|
||||||
|
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
|
||||||
tx = L1Tx{
|
|
||||||
FromIdx: 2,
|
|
||||||
ToIdx: 3,
|
|
||||||
EffectiveAmount: big.NewInt(4),
|
|
||||||
TokenID: 5,
|
|
||||||
}
|
|
||||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestL1TxFromDataAvailability(t *testing.T) {
|
|
||||||
tx := L1Tx{
|
|
||||||
FromIdx: 2,
|
|
||||||
ToIdx: 3,
|
|
||||||
Amount: big.NewInt(4),
|
|
||||||
}
|
|
||||||
txCompressedData, err := tx.BytesDataAvailability(32)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||||
|
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||||
|
|
||||||
tx = L1Tx{
|
tx = L1Tx{
|
||||||
FromIdx: 2,
|
ToIdx: (1 << 32) - 1,
|
||||||
ToIdx: 3,
|
FromIdx: (1 << 32) - 1,
|
||||||
EffectiveAmount: big.NewInt(4),
|
EffectiveAmount: amount,
|
||||||
}
|
}
|
||||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData))
|
||||||
|
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||||
|
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||||
|
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||||
|
|
||||||
|
tx = L1Tx{
|
||||||
|
ToIdx: 0,
|
||||||
|
FromIdx: 0,
|
||||||
|
EffectiveAmount: big.NewInt(0),
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
|
||||||
|
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||||
|
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||||
|
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||||
|
|
||||||
|
tx = L1Tx{
|
||||||
|
ToIdx: 635,
|
||||||
|
FromIdx: 296,
|
||||||
|
EffectiveAmount: big.NewInt(1000000000000000000),
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
|
||||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||||
@@ -172,18 +218,15 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
|
|||||||
UserOrigin: true,
|
UserOrigin: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
encodedData, err := l1Tx.BytesUser()
|
encodedData, err := l1Tx.BytesUser()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, expected, encodedData)
|
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
|
||||||
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestL1CoordinatorTxByteParsers(t *testing.T) {
|
func TestL1CoordinatorTxByteParsers(t *testing.T) {
|
||||||
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
|
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
|
||||||
chainID := big.NewInt(1337)
|
chainID := big.NewInt(1337)
|
||||||
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
|
|
||||||
|
|
||||||
privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
|
privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -201,18 +244,16 @@ func TestL1CoordinatorTxByteParsers(t *testing.T) {
|
|||||||
pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
|
pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
|
||||||
err = pkComp.UnmarshalText(pkCompL)
|
err = pkComp.UnmarshalText(pkCompL)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
bytesMessage1 := []byte("\x19Ethereum Signed Message:\n120")
|
|
||||||
bytesMessage2 := []byte("I authorize this babyjubjub key for hermez rollup account creation")
|
|
||||||
|
|
||||||
babyjubB := SwapEndianness(pkComp[:])
|
accCreationAuth := AccountCreationAuth{
|
||||||
var data []byte
|
EthAddr: fromEthAddr,
|
||||||
data = append(data, bytesMessage1...)
|
BJJ: pkComp,
|
||||||
data = append(data, bytesMessage2...)
|
}
|
||||||
data = append(data, babyjubB[:]...)
|
|
||||||
data = append(data, chainIDBytes...)
|
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
|
||||||
data = append(data, hermezAddress.Bytes()...)
|
require.NoError(t, err)
|
||||||
hash := crypto.Keccak256Hash(data)
|
|
||||||
signature, err := crypto.Sign(hash.Bytes(), privateKey)
|
signature, err := crypto.Sign(h, privateKey)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Ethereum adds 27 to v
|
// Ethereum adds 27 to v
|
||||||
v := int(signature[64])
|
v := int(signature[64])
|
||||||
|
|||||||
@@ -89,11 +89,15 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
|
|||||||
// TokenID
|
// TokenID
|
||||||
b = append(b, tx.TokenID.Bytes()[:]...)
|
b = append(b, tx.TokenID.Bytes()[:]...)
|
||||||
// Amount
|
// Amount
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
amountFloat40, err := NewFloat40(tx.Amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
|
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
|
||||||
}
|
}
|
||||||
b = append(b, amountFloat16.Bytes()...)
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return txID, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
b = append(b, amountFloat40Bytes...)
|
||||||
// Nonce
|
// Nonce
|
||||||
nonceBytes, err := tx.Nonce.Bytes()
|
nonceBytes, err := tx.Nonce.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -170,11 +174,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
|
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
|
||||||
// [ fromIdx | toIdx | amountFloat16 | Fee ]
|
// [ fromIdx | toIdx | amountFloat40 | Fee ]
|
||||||
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||||
idxLen := nLevels / 8 //nolint:gomnd
|
idxLen := nLevels / 8 //nolint:gomnd
|
||||||
|
|
||||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
|
||||||
|
|
||||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -188,13 +192,16 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||||
|
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
amountFloat40, err := NewFloat40(tx.Amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
if err != nil {
|
||||||
b[idxLen*2+2] = byte(tx.Fee)
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
|
||||||
|
b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
|
||||||
|
|
||||||
return b[:], nil
|
return b[:], nil
|
||||||
}
|
}
|
||||||
@@ -219,7 +226,10 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
|
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt()
|
||||||
tx.Fee = FeeSelector(b[idxLen*2+2])
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
|
||||||
return tx, nil
|
return tx, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err := NewL2Tx(l2Tx)
|
l2Tx, err := NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
|
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
|
||||||
|
|
||||||
l2Tx = &L2Tx{
|
l2Tx = &L2Tx{
|
||||||
FromIdx: 87654,
|
FromIdx: 87654,
|
||||||
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err = NewL2Tx(l2Tx)
|
l2Tx, err = NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
|
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
|
||||||
|
|
||||||
l2Tx = &L2Tx{
|
l2Tx = &L2Tx{
|
||||||
FromIdx: 87654,
|
FromIdx: 87654,
|
||||||
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err = NewL2Tx(l2Tx)
|
l2Tx, err = NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
|
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
|
||||||
|
|
||||||
l2Tx = &L2Tx{
|
l2Tx = &L2Tx{
|
||||||
FromIdx: 87654,
|
FromIdx: 87654,
|
||||||
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err = NewL2Tx(l2Tx)
|
l2Tx, err = NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
|
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
|
||||||
|
|
||||||
l2Tx = &L2Tx{
|
l2Tx = &L2Tx{
|
||||||
FromIdx: 1,
|
FromIdx: 1,
|
||||||
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err = NewL2Tx(l2Tx)
|
l2Tx, err = NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
|
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
|
||||||
|
|
||||||
l2Tx = &L2Tx{
|
l2Tx = &L2Tx{
|
||||||
FromIdx: 999,
|
FromIdx: 999,
|
||||||
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err = NewL2Tx(l2Tx)
|
l2Tx, err = NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
|
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
|
||||||
|
|
||||||
l2Tx = &L2Tx{
|
l2Tx = &L2Tx{
|
||||||
FromIdx: 4444,
|
FromIdx: 4444,
|
||||||
@@ -90,25 +90,85 @@ func TestNewL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
l2Tx, err = NewL2Tx(l2Tx)
|
l2Tx, err = NewL2Tx(l2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
|
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestL2TxByteParsers(t *testing.T) {
|
func TestL2TxByteParsers(t *testing.T) {
|
||||||
amount := new(big.Int)
|
// test vectors values generated from javascript implementation
|
||||||
amount.SetString("79000000", 10)
|
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||||
|
require.True(t, ok)
|
||||||
l2Tx := &L2Tx{
|
l2Tx := &L2Tx{
|
||||||
ToIdx: 256,
|
ToIdx: (1 << 16) - 1,
|
||||||
|
FromIdx: (1 << 16) - 1,
|
||||||
Amount: amount,
|
Amount: amount,
|
||||||
FromIdx: 257,
|
Fee: (1 << 8) - 1,
|
||||||
Fee: 201,
|
|
||||||
}
|
}
|
||||||
// Data from the compatibility test
|
expected := "ffffffffffffffffffff"
|
||||||
expected := "00000101000001002b16c9"
|
encodedData, err := l2Tx.BytesDataAvailability(16)
|
||||||
encodedData, err := l2Tx.BytesDataAvailability(32)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
|
|
||||||
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
|
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, l2Tx, decodedData)
|
||||||
|
|
||||||
|
l2Tx = &L2Tx{
|
||||||
|
ToIdx: (1 << 32) - 1,
|
||||||
|
FromIdx: (1 << 32) - 1,
|
||||||
|
Amount: amount,
|
||||||
|
Fee: (1 << 8) - 1,
|
||||||
|
}
|
||||||
|
expected = "ffffffffffffffffffffffffffff"
|
||||||
|
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
|
|
||||||
|
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, l2Tx, decodedData)
|
||||||
|
|
||||||
|
l2Tx = &L2Tx{
|
||||||
|
ToIdx: 0,
|
||||||
|
FromIdx: 0,
|
||||||
|
Amount: big.NewInt(0),
|
||||||
|
Fee: 0,
|
||||||
|
}
|
||||||
|
expected = "0000000000000000000000000000"
|
||||||
|
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
|
|
||||||
|
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, l2Tx, decodedData)
|
||||||
|
|
||||||
|
l2Tx = &L2Tx{
|
||||||
|
ToIdx: 0,
|
||||||
|
FromIdx: 1061,
|
||||||
|
Amount: big.NewInt(420000000000),
|
||||||
|
Fee: 127,
|
||||||
|
}
|
||||||
|
expected = "000004250000000010fa56ea007f"
|
||||||
|
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
|
|
||||||
|
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, l2Tx, decodedData)
|
||||||
|
|
||||||
|
l2Tx = &L2Tx{
|
||||||
|
ToIdx: 256,
|
||||||
|
FromIdx: 257,
|
||||||
|
Amount: big.NewInt(79000000),
|
||||||
|
Fee: 201,
|
||||||
|
}
|
||||||
|
expected = "00000101000001000004b571c0c9"
|
||||||
|
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
|
|
||||||
|
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l2Tx, decodedData)
|
assert.Equal(t, l2Tx, decodedData)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ type PoolL2Tx struct {
|
|||||||
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
|
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
|
||||||
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
|
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
|
||||||
TokenID TokenID `meddler:"token_id"`
|
TokenID TokenID `meddler:"token_id"`
|
||||||
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
|
Amount *big.Int `meddler:"amount,bigint"`
|
||||||
Fee FeeSelector `meddler:"fee"`
|
Fee FeeSelector `meddler:"fee"`
|
||||||
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
|
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
|
||||||
State PoolL2TxState `meddler:"state"`
|
State PoolL2TxState `meddler:"state"`
|
||||||
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
|
|||||||
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
|
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
|
||||||
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
|
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
|
||||||
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
|
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
|
||||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
|
RqAmount *big.Int `meddler:"rq_amount,bigintnull"`
|
||||||
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
|
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
|
||||||
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
|
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
|
||||||
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
|
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
|
||||||
@@ -73,7 +73,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
|
|||||||
// If original Type doesn't match the correct one, return error
|
// If original Type doesn't match the correct one, return error
|
||||||
if txTypeOld != "" && txTypeOld != tx.Type {
|
if txTypeOld != "" && txTypeOld != tx.Type {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s",
|
return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s",
|
||||||
tx.Type, txTypeOld))
|
txTypeOld, tx.Type))
|
||||||
}
|
}
|
||||||
|
|
||||||
txIDOld := tx.TxID
|
txIDOld := tx.TxID
|
||||||
@@ -83,7 +83,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
|
|||||||
// If original TxID doesn't match the correct one, return error
|
// If original TxID doesn't match the correct one, return error
|
||||||
if txIDOld != (TxID{}) && txIDOld != tx.TxID {
|
if txIDOld != (TxID{}) && txIDOld != tx.TxID {
|
||||||
return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s",
|
return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s",
|
||||||
tx.TxID.String(), txIDOld.String()))
|
txIDOld.String(), tx.TxID.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return tx, nil
|
return tx, nil
|
||||||
@@ -122,18 +122,13 @@ func (tx *PoolL2Tx) SetID() error {
|
|||||||
// [ 8 bits ] userFee // 1 byte
|
// [ 8 bits ] userFee // 1 byte
|
||||||
// [ 40 bits ] nonce // 5 bytes
|
// [ 40 bits ] nonce // 5 bytes
|
||||||
// [ 32 bits ] tokenID // 4 bytes
|
// [ 32 bits ] tokenID // 4 bytes
|
||||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
|
||||||
// [ 48 bits ] toIdx // 6 bytes
|
// [ 48 bits ] toIdx // 6 bytes
|
||||||
// [ 48 bits ] fromIdx // 6 bytes
|
// [ 48 bits ] fromIdx // 6 bytes
|
||||||
// [ 16 bits ] chainId // 2 bytes
|
// [ 16 bits ] chainId // 2 bytes
|
||||||
// [ 32 bits ] signatureConstant // 4 bytes
|
// [ 32 bits ] signatureConstant // 4 bytes
|
||||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
|
||||||
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
var b [29]byte
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
var b [31]byte
|
|
||||||
|
|
||||||
toBJJSign := byte(0)
|
toBJJSign := byte(0)
|
||||||
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
||||||
@@ -149,19 +144,18 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
|||||||
}
|
}
|
||||||
copy(b[2:7], nonceBytes[:])
|
copy(b[2:7], nonceBytes[:])
|
||||||
copy(b[7:11], tx.TokenID.Bytes())
|
copy(b[7:11], tx.TokenID.Bytes())
|
||||||
copy(b[11:13], amountFloat16.Bytes())
|
|
||||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[13:19], toIdxBytes[:])
|
copy(b[11:17], toIdxBytes[:])
|
||||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[19:25], fromIdxBytes[:])
|
copy(b[17:23], fromIdxBytes[:])
|
||||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||||
copy(b[27:31], SignatureConstantBytes[:])
|
copy(b[25:29], SignatureConstantBytes[:])
|
||||||
|
|
||||||
bi := new(big.Int).SetBytes(b[:])
|
bi := new(big.Int).SetBytes(b[:])
|
||||||
return bi, nil
|
return bi, nil
|
||||||
@@ -170,9 +164,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
|||||||
// TxCompressedDataEmpty calculates the TxCompressedData of an empty
|
// TxCompressedDataEmpty calculates the TxCompressedData of an empty
|
||||||
// transaction
|
// transaction
|
||||||
func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
||||||
var b [31]byte
|
var b [29]byte
|
||||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||||
copy(b[27:31], SignatureConstantBytes[:])
|
copy(b[25:29], SignatureConstantBytes[:])
|
||||||
bi := new(big.Int).SetBytes(b[:])
|
bi := new(big.Int).SetBytes(b[:])
|
||||||
return bi
|
return bi
|
||||||
}
|
}
|
||||||
@@ -182,19 +176,24 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
|||||||
// [ 8 bits ] userFee // 1 byte
|
// [ 8 bits ] userFee // 1 byte
|
||||||
// [ 40 bits ] nonce // 5 bytes
|
// [ 40 bits ] nonce // 5 bytes
|
||||||
// [ 32 bits ] tokenID // 4 bytes
|
// [ 32 bits ] tokenID // 4 bytes
|
||||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
// [ 40 bits ] amountFloat40 // 5 bytes
|
||||||
// [ 48 bits ] toIdx // 6 bytes
|
// [ 48 bits ] toIdx // 6 bytes
|
||||||
// [ 48 bits ] fromIdx // 6 bytes
|
// [ 48 bits ] fromIdx // 6 bytes
|
||||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
|
||||||
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||||
if tx.Amount == nil {
|
if tx.Amount == nil {
|
||||||
tx.Amount = big.NewInt(0)
|
tx.Amount = big.NewInt(0)
|
||||||
}
|
}
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
amountFloat40, err := NewFloat40(tx.Amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
var b [25]byte
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b [28]byte
|
||||||
toBJJSign := byte(0)
|
toBJJSign := byte(0)
|
||||||
if tx.ToBJJ != EmptyBJJComp {
|
if tx.ToBJJ != EmptyBJJComp {
|
||||||
sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
||||||
@@ -210,17 +209,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
|||||||
}
|
}
|
||||||
copy(b[2:7], nonceBytes[:])
|
copy(b[2:7], nonceBytes[:])
|
||||||
copy(b[7:11], tx.TokenID.Bytes())
|
copy(b[7:11], tx.TokenID.Bytes())
|
||||||
copy(b[11:13], amountFloat16.Bytes())
|
copy(b[11:16], amountFloat40Bytes)
|
||||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[13:19], toIdxBytes[:])
|
copy(b[16:22], toIdxBytes[:])
|
||||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[19:25], fromIdxBytes[:])
|
copy(b[22:28], fromIdxBytes[:])
|
||||||
|
|
||||||
bi := new(big.Int).SetBytes(b[:])
|
bi := new(big.Int).SetBytes(b[:])
|
||||||
return bi, nil
|
return bi, nil
|
||||||
@@ -236,19 +235,24 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
|||||||
// [ 8 bits ] rqUserFee // 1 byte
|
// [ 8 bits ] rqUserFee // 1 byte
|
||||||
// [ 40 bits ] rqNonce // 5 bytes
|
// [ 40 bits ] rqNonce // 5 bytes
|
||||||
// [ 32 bits ] rqTokenID // 4 bytes
|
// [ 32 bits ] rqTokenID // 4 bytes
|
||||||
// [ 16 bits ] rqAmountFloat16 // 2 bytes
|
// [ 40 bits ] rqAmountFloat40 // 5 bytes
|
||||||
// [ 48 bits ] rqToIdx // 6 bytes
|
// [ 48 bits ] rqToIdx // 6 bytes
|
||||||
// [ 48 bits ] rqFromIdx // 6 bytes
|
// [ 48 bits ] rqFromIdx // 6 bytes
|
||||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
|
||||||
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
||||||
if tx.RqAmount == nil {
|
if tx.RqAmount == nil {
|
||||||
tx.RqAmount = big.NewInt(0)
|
tx.RqAmount = big.NewInt(0)
|
||||||
}
|
}
|
||||||
amountFloat16, err := NewFloat16(tx.RqAmount)
|
amountFloat40, err := NewFloat40(tx.RqAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
var b [25]byte
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b [28]byte
|
||||||
rqToBJJSign := byte(0)
|
rqToBJJSign := byte(0)
|
||||||
if tx.RqToBJJ != EmptyBJJComp {
|
if tx.RqToBJJ != EmptyBJJComp {
|
||||||
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
|
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
|
||||||
@@ -264,17 +268,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
|||||||
}
|
}
|
||||||
copy(b[2:7], nonceBytes[:])
|
copy(b[2:7], nonceBytes[:])
|
||||||
copy(b[7:11], tx.RqTokenID.Bytes())
|
copy(b[7:11], tx.RqTokenID.Bytes())
|
||||||
copy(b[11:13], amountFloat16.Bytes())
|
copy(b[11:16], amountFloat40Bytes)
|
||||||
toIdxBytes, err := tx.RqToIdx.Bytes()
|
toIdxBytes, err := tx.RqToIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[13:19], toIdxBytes[:])
|
copy(b[16:22], toIdxBytes[:])
|
||||||
fromIdxBytes, err := tx.RqFromIdx.Bytes()
|
fromIdxBytes, err := tx.RqFromIdx.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[19:25], fromIdxBytes[:])
|
copy(b[22:28], fromIdxBytes[:])
|
||||||
|
|
||||||
bi := new(big.Int).SetBytes(b[:])
|
bi := new(big.Int).SetBytes(b[:])
|
||||||
return bi, nil
|
return bi, nil
|
||||||
@@ -287,7 +291,22 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
|
||||||
|
var e1B [25]byte
|
||||||
|
amountFloat40, err := NewFloat40(tx.Amount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
copy(e1B[0:5], amountFloat40Bytes)
|
||||||
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
|
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
|
||||||
|
copy(e1B[5:25], toEthAddr.Bytes())
|
||||||
|
e1 := new(big.Int).SetBytes(e1B[:])
|
||||||
|
|
||||||
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
|
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
|
||||||
|
|
||||||
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
|
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
|
||||||
@@ -299,7 +318,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
|||||||
|
|
||||||
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
|
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
|
||||||
|
|
||||||
return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
|
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp
|
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp
|
||||||
|
|||||||
@@ -21,80 +21,104 @@ func TestNewPoolL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
|
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
|
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTxCompressedData(t *testing.T) {
|
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
|
||||||
chainID := uint16(0)
|
// test vectors values generated from javascript implementation
|
||||||
var sk babyjub.PrivateKey
|
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
|
||||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
|
||||||
|
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||||
|
require.True(t, ok)
|
||||||
tx := PoolL2Tx{
|
tx := PoolL2Tx{
|
||||||
FromIdx: 2,
|
FromIdx: (1 << 48) - 1,
|
||||||
ToIdx: 3,
|
ToIdx: (1 << 48) - 1,
|
||||||
Amount: big.NewInt(4),
|
Amount: amount,
|
||||||
TokenID: 5,
|
TokenID: (1 << 32) - 1,
|
||||||
Nonce: 6,
|
Nonce: (1 << 40) - 1,
|
||||||
ToBJJ: sk.Public().Compress(),
|
Fee: (1 << 3) - 1,
|
||||||
|
ToBJJ: skPositive.Public().Compress(),
|
||||||
}
|
}
|
||||||
txCompressedData, err := tx.TxCompressedData(chainID)
|
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// test vector value generated from javascript implementation
|
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
|
||||||
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
|
||||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
txCompressedDataV2, err := tx.TxCompressedDataV2()
|
||||||
// using a different chainID
|
require.NoError(t, err)
|
||||||
txCompressedData, err = tx.TxCompressedData(uint16(100))
|
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||||
assert.NoError(t, err)
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
|
||||||
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
|
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
|
||||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
|
||||||
txCompressedData, err = tx.TxCompressedData(uint16(65535))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
|
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
|
||||||
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
|
||||||
|
|
||||||
tx = PoolL2Tx{
|
tx = PoolL2Tx{
|
||||||
RqFromIdx: 7,
|
FromIdx: 0,
|
||||||
RqToIdx: 8,
|
ToIdx: 0,
|
||||||
RqAmount: big.NewInt(9),
|
Amount: big.NewInt(0),
|
||||||
RqTokenID: 10,
|
TokenID: 0,
|
||||||
RqNonce: 11,
|
Nonce: 0,
|
||||||
RqFee: 12,
|
Fee: 0,
|
||||||
RqToBJJ: sk.Public().Compress(),
|
ToBJJ: skNegative.Public().Compress(),
|
||||||
}
|
|
||||||
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
// test vector value generated from javascript implementation
|
|
||||||
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
|
|
||||||
assert.Equal(t, expectedStr, rqTxCompressedData.String())
|
|
||||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
|
|
||||||
}
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedStr = "c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
func TestTxCompressedDataV2(t *testing.T) {
|
txCompressedDataV2, err = tx.TxCompressedDataV2()
|
||||||
var sk babyjub.PrivateKey
|
require.NoError(t, err)
|
||||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
assert.Equal(t, "0", txCompressedDataV2.String())
|
||||||
assert.NoError(t, err)
|
|
||||||
tx := PoolL2Tx{
|
|
||||||
FromIdx: 7,
|
|
||||||
ToIdx: 8,
|
|
||||||
Amount: big.NewInt(9),
|
|
||||||
TokenID: 10,
|
|
||||||
Nonce: 11,
|
|
||||||
Fee: 12,
|
|
||||||
ToBJJ: sk.Public().Compress(),
|
|
||||||
}
|
|
||||||
txCompressedData, err := tx.TxCompressedDataV2()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
// test vector value generated from javascript implementation
|
|
||||||
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
|
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
|
||||||
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
amount, ok = new(big.Int).SetString("63000000000000000", 10)
|
||||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
require.True(t, ok)
|
||||||
|
tx = PoolL2Tx{
|
||||||
|
FromIdx: 324,
|
||||||
|
ToIdx: 256,
|
||||||
|
Amount: amount,
|
||||||
|
TokenID: 123,
|
||||||
|
Nonce: 76,
|
||||||
|
Fee: 214,
|
||||||
|
ToBJJ: skNegative.Public().Compress(),
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(1))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
|
txCompressedDataV2, err = tx.TxCompressedDataV2()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
|
||||||
|
|
||||||
|
tx = PoolL2Tx{
|
||||||
|
FromIdx: 1,
|
||||||
|
ToIdx: 2,
|
||||||
|
TokenID: 3,
|
||||||
|
Nonce: 4,
|
||||||
|
Fee: 5,
|
||||||
|
ToBJJ: skNegative.Public().Compress(),
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
|
tx = PoolL2Tx{
|
||||||
|
FromIdx: 2,
|
||||||
|
ToIdx: 3,
|
||||||
|
TokenID: 4,
|
||||||
|
Nonce: 5,
|
||||||
|
Fee: 6,
|
||||||
|
ToBJJ: skPositive.Public().Compress(),
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f"
|
||||||
|
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRqTxCompressedDataV2(t *testing.T) {
|
func TestRqTxCompressedDataV2(t *testing.T) {
|
||||||
@@ -113,19 +137,16 @@ func TestRqTxCompressedDataV2(t *testing.T) {
|
|||||||
txCompressedData, err := tx.RqTxCompressedDataV2()
|
txCompressedData, err := tx.RqTxCompressedDataV2()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// test vector value generated from javascript implementation
|
// test vector value generated from javascript implementation
|
||||||
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
|
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663"
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||||
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
||||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHashToSign(t *testing.T) {
|
func TestHashToSign(t *testing.T) {
|
||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
var sk babyjub.PrivateKey
|
|
||||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
tx := PoolL2Tx{
|
tx := PoolL2Tx{
|
||||||
FromIdx: 2,
|
FromIdx: 2,
|
||||||
ToIdx: 3,
|
ToIdx: 3,
|
||||||
@@ -136,7 +157,7 @@ func TestHashToSign(t *testing.T) {
|
|||||||
}
|
}
|
||||||
toSign, err := tx.HashToSign(chainID)
|
toSign, err := tx.HashToSign(chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
|
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVerifyTxSignature(t *testing.T) {
|
func TestVerifyTxSignature(t *testing.T) {
|
||||||
@@ -156,7 +177,7 @@ func TestVerifyTxSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
toSign, err := tx.HashToSign(chainID)
|
toSign, err := tx.HashToSign(chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
|
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
|
||||||
|
|
||||||
sig := sk.SignPoseidon(toSign)
|
sig := sk.SignPoseidon(toSign)
|
||||||
tx.Signature = sig.Compress()
|
tx.Signature = sig.Compress()
|
||||||
|
|||||||
@@ -62,3 +62,17 @@ func RmEndingZeroes(siblings []*merkletree.Hash) []*merkletree.Hash {
|
|||||||
}
|
}
|
||||||
return siblings[:pos]
|
return siblings[:pos]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TokensToUSD is a helper function to calculate the USD value of a certain
|
||||||
|
// amount of tokens considering the normalized token price (which is the price
|
||||||
|
// commonly reported by exhanges)
|
||||||
|
func TokensToUSD(amount *big.Int, decimals uint64, valueUSD float64) float64 {
|
||||||
|
amountF := new(big.Float).SetInt(amount)
|
||||||
|
// Divide by 10^decimals to normalize the amount
|
||||||
|
baseF := new(big.Float).SetInt(new(big.Int).Exp(
|
||||||
|
big.NewInt(10), big.NewInt(int64(decimals)), nil)) //nolint:gomnd
|
||||||
|
amountF.Mul(amountF, big.NewFloat(valueUSD))
|
||||||
|
amountF.Quo(amountF, baseF)
|
||||||
|
amountUSD, _ := amountF.Float64()
|
||||||
|
return amountUSD
|
||||||
|
}
|
||||||
|
|||||||
15
common/zk.go
15
common/zk.go
@@ -102,6 +102,8 @@ type ZKInputs struct {
|
|||||||
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
|
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
|
||||||
// ToEthAddr
|
// ToEthAddr
|
||||||
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
|
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
|
||||||
|
// AmountF encoded as float40
|
||||||
|
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
|
||||||
|
|
||||||
// OnChain determines if is L1 (1/true) or L2 (0/false)
|
// OnChain determines if is L1 (1/true) or L2 (0/false)
|
||||||
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
|
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
|
||||||
@@ -112,8 +114,8 @@ type ZKInputs struct {
|
|||||||
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
|
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
|
||||||
// account (fromIdx==0)
|
// account (fromIdx==0)
|
||||||
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
|
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
|
||||||
// DepositAmountF encoded as float16
|
// DepositAmountF encoded as float40
|
||||||
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
|
DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx]
|
||||||
// FromEthAddr
|
// FromEthAddr
|
||||||
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
|
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
|
||||||
// FromBJJCompressed boolean encoded where each value is a *big.Int
|
// FromBJJCompressed boolean encoded where each value is a *big.Int
|
||||||
@@ -326,6 +328,7 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
|
|||||||
zki.AuxToIdx = newSlice(maxTx)
|
zki.AuxToIdx = newSlice(maxTx)
|
||||||
zki.ToBJJAy = newSlice(maxTx)
|
zki.ToBJJAy = newSlice(maxTx)
|
||||||
zki.ToEthAddr = newSlice(maxTx)
|
zki.ToEthAddr = newSlice(maxTx)
|
||||||
|
zki.AmountF = newSlice(maxTx)
|
||||||
zki.OnChain = newSlice(maxTx)
|
zki.OnChain = newSlice(maxTx)
|
||||||
zki.NewAccount = newSlice(maxTx)
|
zki.NewAccount = newSlice(maxTx)
|
||||||
|
|
||||||
@@ -476,8 +479,8 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
|||||||
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
|
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
|
||||||
b = append(b, newExitRoot...)
|
b = append(b, newExitRoot...)
|
||||||
|
|
||||||
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
|
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData
|
||||||
l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
|
l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
|
||||||
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
|
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
|
||||||
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
|
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
|
||||||
for i := 0; i < len(z.Metadata.L1TxsData); i++ {
|
for i := 0; i < len(z.Metadata.L1TxsData); i++ {
|
||||||
@@ -494,9 +497,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
b = append(b, l1TxsDataAvailability...)
|
b = append(b, l1TxsDataAvailability...)
|
||||||
|
|
||||||
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
|
// [MAX_TX*(2*NLevels + 48) bits] L2TxsData
|
||||||
var l2TxsData []byte
|
var l2TxsData []byte
|
||||||
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
|
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd
|
||||||
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
|
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
|
||||||
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
|
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
|
||||||
for i := 0; i < len(z.Metadata.L2TxsData); i++ {
|
for i := 0; i < len(z.Metadata.L2TxsData); i++ {
|
||||||
|
|||||||
234
config/config.go
234
config/config.go
@@ -3,6 +3,7 @@ package config
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
@@ -34,10 +35,30 @@ type ServerProof struct {
|
|||||||
URL string `validate:"required"`
|
URL string `validate:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForgeBatchGasCost is the costs associated to a ForgeBatch transaction, split
|
||||||
|
// into different parts to be used in a formula.
|
||||||
|
type ForgeBatchGasCost struct {
|
||||||
|
Fixed uint64 `validate:"required"`
|
||||||
|
L1UserTx uint64 `validate:"required"`
|
||||||
|
L1CoordTx uint64 `validate:"required"`
|
||||||
|
L2Tx uint64 `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CoordinatorAPI specifies the configuration parameters of the API in mode
|
||||||
|
// coordinator
|
||||||
|
type CoordinatorAPI struct {
|
||||||
|
// Coordinator enables the coordinator API endpoints
|
||||||
|
Coordinator bool
|
||||||
|
}
|
||||||
|
|
||||||
// Coordinator is the coordinator specific configuration.
|
// Coordinator is the coordinator specific configuration.
|
||||||
type Coordinator struct {
|
type Coordinator struct {
|
||||||
// ForgerAddress is the address under which this coordinator is forging
|
// ForgerAddress is the address under which this coordinator is forging
|
||||||
ForgerAddress ethCommon.Address `validate:"required"`
|
ForgerAddress ethCommon.Address `validate:"required"`
|
||||||
|
// MinimumForgeAddressBalance is the minimum balance the forger address
|
||||||
|
// needs to start the coordinator in wei. Of set to 0, the coordinator
|
||||||
|
// will not check the balance before starting.
|
||||||
|
MinimumForgeAddressBalance *big.Int
|
||||||
// FeeAccount is the Hermez account that the coordinator uses to receive fees
|
// FeeAccount is the Hermez account that the coordinator uses to receive fees
|
||||||
FeeAccount struct {
|
FeeAccount struct {
|
||||||
// Address is the ethereum address of the account to receive fees
|
// Address is the ethereum address of the account to receive fees
|
||||||
@@ -51,33 +72,85 @@ type Coordinator struct {
|
|||||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||||
// timeout that will trigger a schedule to forge an L1Batch
|
// timeout that will trigger a schedule to forge an L1Batch
|
||||||
L1BatchTimeoutPerc float64 `validate:"required"`
|
L1BatchTimeoutPerc float64 `validate:"required"`
|
||||||
|
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||||
|
// starting the pipeline when we reach a slot in which we can forge.
|
||||||
|
StartSlotBlocksDelay int64
|
||||||
|
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||||
|
// the forger address is checked to be allowed to forge (apart from
|
||||||
|
// checking the next block), used to decide when to stop scheduling new
|
||||||
|
// batches (by stopping the pipeline).
|
||||||
|
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||||
|
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||||
|
// stopped if we can't forge at block 15.
|
||||||
|
// This value should be the expected number of blocks it takes between
|
||||||
|
// scheduling a batch and having it mined.
|
||||||
|
ScheduleBatchBlocksAheadCheck int64
|
||||||
|
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||||
|
// which the coordinator is also checked to be allowed to forge, apart
|
||||||
|
// from the next block; used to decide when to stop sending batches to
|
||||||
|
// the smart contract.
|
||||||
|
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||||
|
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||||
|
// if we can't forge at block 15.
|
||||||
|
SendBatchBlocksMarginCheck int64
|
||||||
// ProofServerPollInterval is the waiting interval between polling the
|
// ProofServerPollInterval is the waiting interval between polling the
|
||||||
// ProofServer while waiting for a particular status
|
// ProofServer while waiting for a particular status
|
||||||
ProofServerPollInterval Duration `validate:"required"`
|
ProofServerPollInterval Duration `validate:"required"`
|
||||||
// ForgeRetryInterval is the waiting interval between calls forge a
|
// ForgeRetryInterval is the waiting interval between calls forge a
|
||||||
// batch after an error
|
// batch after an error
|
||||||
ForgeRetryInterval Duration `validate:"required"`
|
ForgeRetryInterval Duration `validate:"required"`
|
||||||
|
// ForgeDelay is the delay after which a batch is forged if the slot is
|
||||||
|
// already committed. If set to 0s, the coordinator will continuously
|
||||||
|
// forge at the maximum rate.
|
||||||
|
ForgeDelay Duration `validate:"-"`
|
||||||
|
// ForgeNoTxsDelay is the delay after which a batch is forged even if
|
||||||
|
// there are no txs to forge if the slot is already committed. If set
|
||||||
|
// to 0s, the coordinator will continuously forge even if the batches
|
||||||
|
// are empty.
|
||||||
|
ForgeNoTxsDelay Duration `validate:"-"`
|
||||||
// SyncRetryInterval is the waiting interval between calls to the main
|
// SyncRetryInterval is the waiting interval between calls to the main
|
||||||
// handler of a synced block after an error
|
// handler of a synced block after an error
|
||||||
SyncRetryInterval Duration `validate:"required"`
|
SyncRetryInterval Duration `validate:"required"`
|
||||||
|
// PurgeByExtDelInterval is the waiting interval between calls
|
||||||
|
// to the PurgeByExternalDelete function of the l2db which deletes
|
||||||
|
// pending txs externally marked by the column `external_delete`
|
||||||
|
PurgeByExtDelInterval Duration `validate:"required"`
|
||||||
// L2DB is the DB that holds the pool of L2Txs
|
// L2DB is the DB that holds the pool of L2Txs
|
||||||
L2DB struct {
|
L2DB struct {
|
||||||
// SafetyPeriod is the number of batches after which
|
// SafetyPeriod is the number of batches after which
|
||||||
// non-pending L2Txs are deleted from the pool
|
// non-pending L2Txs are deleted from the pool
|
||||||
SafetyPeriod common.BatchNum `validate:"required"`
|
SafetyPeriod common.BatchNum `validate:"required"`
|
||||||
// MaxTxs is the number of L2Txs that once reached triggers
|
// MaxTxs is the maximum number of pending L2Txs that can be
|
||||||
// deletion of old L2Txs
|
// stored in the pool. Once this number of pending L2Txs is
|
||||||
|
// reached, inserts to the pool will be denied until some of
|
||||||
|
// the pending txs are forged.
|
||||||
MaxTxs uint32 `validate:"required"`
|
MaxTxs uint32 `validate:"required"`
|
||||||
|
// MinFeeUSD is the minimum fee in USD that a tx must pay in
|
||||||
|
// order to be accepted into the pool. Txs with lower than
|
||||||
|
// minimum fee will be rejected at the API level.
|
||||||
|
MinFeeUSD float64
|
||||||
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
|
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
|
||||||
// L2Txs is reached, L2Txs older than TTL will be deleted.
|
// L2Txs is reached, L2Txs older than TTL will be deleted.
|
||||||
TTL Duration `validate:"required"`
|
TTL Duration `validate:"required"`
|
||||||
// PurgeBatchDelay is the delay between batches to purge outdated transactions
|
// PurgeBatchDelay is the delay between batches to purge
|
||||||
|
// outdated transactions. Oudated L2Txs are those that have
|
||||||
|
// been forged or marked as invalid for longer than the
|
||||||
|
// SafetyPeriod and pending L2Txs that have been in the pool
|
||||||
|
// for longer than TTL once there are MaxTxs.
|
||||||
PurgeBatchDelay int64 `validate:"required"`
|
PurgeBatchDelay int64 `validate:"required"`
|
||||||
// InvalidateBatchDelay is the delay between batches to mark invalid transactions
|
// InvalidateBatchDelay is the delay between batches to mark
|
||||||
|
// invalid transactions due to nonce lower than the account
|
||||||
|
// nonce.
|
||||||
InvalidateBatchDelay int64 `validate:"required"`
|
InvalidateBatchDelay int64 `validate:"required"`
|
||||||
// PurgeBlockDelay is the delay between blocks to purge outdated transactions
|
// PurgeBlockDelay is the delay between blocks to purge
|
||||||
|
// outdated transactions. Oudated L2Txs are those that have
|
||||||
|
// been forged or marked as invalid for longer than the
|
||||||
|
// SafetyPeriod and pending L2Txs that have been in the pool
|
||||||
|
// for longer than TTL once there are MaxTxs.
|
||||||
PurgeBlockDelay int64 `validate:"required"`
|
PurgeBlockDelay int64 `validate:"required"`
|
||||||
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions
|
// InvalidateBlockDelay is the delay between blocks to mark
|
||||||
|
// invalid transactions due to nonce lower than the account
|
||||||
|
// nonce.
|
||||||
InvalidateBlockDelay int64 `validate:"required"`
|
InvalidateBlockDelay int64 `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
TxSelector struct {
|
TxSelector struct {
|
||||||
@@ -90,7 +163,6 @@ type Coordinator struct {
|
|||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
ServerProofs []ServerProof `validate:"required"`
|
ServerProofs []ServerProof `validate:"required"`
|
||||||
Circuit struct {
|
Circuit struct {
|
||||||
// VerifierIdx uint8 `validate:"required"`
|
|
||||||
// MaxTx is the maximum number of txs supported by the circuit
|
// MaxTx is the maximum number of txs supported by the circuit
|
||||||
MaxTx int64 `validate:"required"`
|
MaxTx int64 `validate:"required"`
|
||||||
// NLevels is the maximum number of merkle tree levels
|
// NLevels is the maximum number of merkle tree levels
|
||||||
@@ -98,12 +170,13 @@ type Coordinator struct {
|
|||||||
NLevels int64 `validate:"required"`
|
NLevels int64 `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
EthClient struct {
|
EthClient struct {
|
||||||
// CallGasLimit is the default gas limit set for ethereum
|
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||||
// calls, except for methods where a particular gas limit is
|
// transactions
|
||||||
// harcoded because it's known to be a big value
|
MaxGasPrice *big.Int `validate:"required"`
|
||||||
CallGasLimit uint64 `validate:"required"`
|
// GasPriceIncPerc is the percentage increase of gas price set
|
||||||
// GasPriceDiv is the gas price division
|
// in an ethereum transaction from the suggested gas price by
|
||||||
GasPriceDiv uint64 `validate:"required"`
|
// the ehtereum node
|
||||||
|
GasPriceIncPerc int64
|
||||||
// CheckLoopInterval is the waiting interval between receipt
|
// CheckLoopInterval is the waiting interval between receipt
|
||||||
// checks of ethereum transactions in the TxManager
|
// checks of ethereum transactions in the TxManager
|
||||||
CheckLoopInterval Duration `validate:"required"`
|
CheckLoopInterval Duration `validate:"required"`
|
||||||
@@ -113,6 +186,13 @@ type Coordinator struct {
|
|||||||
// AttemptsDelay is delay between attempts do do an eth client
|
// AttemptsDelay is delay between attempts do do an eth client
|
||||||
// RPC call
|
// RPC call
|
||||||
AttemptsDelay Duration `validate:"required"`
|
AttemptsDelay Duration `validate:"required"`
|
||||||
|
// TxResendTimeout is the timeout after which a non-mined
|
||||||
|
// ethereum transaction will be resent (reusing the nonce) with
|
||||||
|
// a newly calculated gas price
|
||||||
|
TxResendTimeout Duration `validate:"required"`
|
||||||
|
// NoReuseNonce disables reusing nonces of pending transactions for
|
||||||
|
// new replacement transactions
|
||||||
|
NoReuseNonce bool
|
||||||
// Keystore is the ethereum keystore where private keys are kept
|
// Keystore is the ethereum keystore where private keys are kept
|
||||||
Keystore struct {
|
Keystore struct {
|
||||||
// Path to the keystore
|
// Path to the keystore
|
||||||
@@ -120,11 +200,11 @@ type Coordinator struct {
|
|||||||
// Password used to decrypt the keys in the keystore
|
// Password used to decrypt the keys in the keystore
|
||||||
Password string `validate:"required"`
|
Password string `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
|
// ForgeBatchGasCost contains the cost of each action in the
|
||||||
|
// ForgeBatch transaction.
|
||||||
|
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
API struct {
|
API CoordinatorAPI `validate:"required"`
|
||||||
// Coordinator enables the coordinator API endpoints
|
|
||||||
Coordinator bool
|
|
||||||
} `validate:"required"`
|
|
||||||
Debug struct {
|
Debug struct {
|
||||||
// BatchPath if set, specifies the path where batchInfo is stored
|
// BatchPath if set, specifies the path where batchInfo is stored
|
||||||
// in JSON in every step/update of the pipeline
|
// in JSON in every step/update of the pipeline
|
||||||
@@ -132,9 +212,52 @@ type Coordinator struct {
|
|||||||
// LightScrypt if set, uses light parameters for the ethereum
|
// LightScrypt if set, uses light parameters for the ethereum
|
||||||
// keystore encryption algorithm.
|
// keystore encryption algorithm.
|
||||||
LightScrypt bool
|
LightScrypt bool
|
||||||
|
// RollupVerifierIndex is the index of the verifier to use in
|
||||||
|
// the Rollup smart contract. The verifier chosen by index
|
||||||
|
// must match with the Circuit parameters.
|
||||||
|
RollupVerifierIndex *int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
|
||||||
|
// diferentiated SQL connections for read/write. If the read configuration is
|
||||||
|
// not provided, the write one it's going to be used for both reads and writes
|
||||||
|
type PostgreSQL struct {
|
||||||
|
// Port of the PostgreSQL write server
|
||||||
|
PortWrite int `validate:"required"`
|
||||||
|
// Host of the PostgreSQL write server
|
||||||
|
HostWrite string `validate:"required"`
|
||||||
|
// User of the PostgreSQL write server
|
||||||
|
UserWrite string `validate:"required"`
|
||||||
|
// Password of the PostgreSQL write server
|
||||||
|
PasswordWrite string `validate:"required"`
|
||||||
|
// Name of the PostgreSQL write server database
|
||||||
|
NameWrite string `validate:"required"`
|
||||||
|
// Port of the PostgreSQL read server
|
||||||
|
PortRead int
|
||||||
|
// Host of the PostgreSQL read server
|
||||||
|
HostRead string
|
||||||
|
// User of the PostgreSQL read server
|
||||||
|
UserRead string
|
||||||
|
// Password of the PostgreSQL read server
|
||||||
|
PasswordRead string
|
||||||
|
// Name of the PostgreSQL read server database
|
||||||
|
NameRead string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeDebug specifies debug configuration parameters
|
||||||
|
type NodeDebug struct {
|
||||||
|
// APIAddress is the address where the debugAPI will listen if
|
||||||
|
// set
|
||||||
|
APIAddress string
|
||||||
|
// MeddlerLogs enables meddler debug mode, where unused columns and struct
|
||||||
|
// fields will be logged
|
||||||
|
MeddlerLogs bool
|
||||||
|
// GinDebugMode sets Gin-Gonic (the web framework) to run in
|
||||||
|
// debug mode
|
||||||
|
GinDebugMode bool
|
||||||
|
}
|
||||||
|
|
||||||
// Node is the hermez node configuration.
|
// Node is the hermez node configuration.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
PriceUpdater struct {
|
PriceUpdater struct {
|
||||||
@@ -151,18 +274,7 @@ type Node struct {
|
|||||||
// Keep is the number of checkpoints to keep
|
// Keep is the number of checkpoints to keep
|
||||||
Keep int `validate:"required"`
|
Keep int `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
PostgreSQL struct {
|
PostgreSQL PostgreSQL `validate:"required"`
|
||||||
// Port of the PostgreSQL server
|
|
||||||
Port int `validate:"required"`
|
|
||||||
// Host of the PostgreSQL server
|
|
||||||
Host string `validate:"required"`
|
|
||||||
// User of the PostgreSQL server
|
|
||||||
User string `validate:"required"`
|
|
||||||
// Password of the PostgreSQL server
|
|
||||||
Password string `validate:"required"`
|
|
||||||
// Name of the PostgreSQL server database
|
|
||||||
Name string `validate:"required"`
|
|
||||||
} `validate:"required"`
|
|
||||||
Web3 struct {
|
Web3 struct {
|
||||||
// URL is the URL of the web3 ethereum-node RPC server
|
// URL is the URL of the web3 ethereum-node RPC server
|
||||||
URL string `validate:"required"`
|
URL string `validate:"required"`
|
||||||
@@ -193,6 +305,7 @@ type Node struct {
|
|||||||
// TokenHEZ address
|
// TokenHEZ address
|
||||||
TokenHEZName string `validate:"required"`
|
TokenHEZName string `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
|
// API specifies the configuration parameters of the API
|
||||||
API struct {
|
API struct {
|
||||||
// Address where the API will listen if set
|
// Address where the API will listen if set
|
||||||
Address string
|
Address string
|
||||||
@@ -210,17 +323,45 @@ type Node struct {
|
|||||||
// can wait to stablish a SQL connection
|
// can wait to stablish a SQL connection
|
||||||
SQLConnectionTimeout Duration
|
SQLConnectionTimeout Duration
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
Debug struct {
|
Debug NodeDebug `validate:"required"`
|
||||||
// APIAddress is the address where the debugAPI will listen if
|
|
||||||
// set
|
|
||||||
APIAddress string
|
|
||||||
// MeddlerLogs enables meddler debug mode, where unused columns and struct
|
|
||||||
// fields will be logged
|
|
||||||
MeddlerLogs bool
|
|
||||||
}
|
|
||||||
Coordinator Coordinator `validate:"-"`
|
Coordinator Coordinator `validate:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIServer is the api server configuration parameters
|
||||||
|
type APIServer struct {
|
||||||
|
// NodeAPI specifies the configuration parameters of the API
|
||||||
|
API struct {
|
||||||
|
// Address where the API will listen if set
|
||||||
|
Address string `validate:"required"`
|
||||||
|
// Explorer enables the Explorer API endpoints
|
||||||
|
Explorer bool
|
||||||
|
// Maximum concurrent connections allowed between API and SQL
|
||||||
|
MaxSQLConnections int `validate:"required"`
|
||||||
|
// SQLConnectionTimeout is the maximum amount of time that an API request
|
||||||
|
// can wait to stablish a SQL connection
|
||||||
|
SQLConnectionTimeout Duration
|
||||||
|
} `validate:"required"`
|
||||||
|
PostgreSQL PostgreSQL `validate:"required"`
|
||||||
|
Coordinator struct {
|
||||||
|
API struct {
|
||||||
|
// Coordinator enables the coordinator API endpoints
|
||||||
|
Coordinator bool
|
||||||
|
} `validate:"required"`
|
||||||
|
L2DB struct {
|
||||||
|
// MaxTxs is the maximum number of pending L2Txs that can be
|
||||||
|
// stored in the pool. Once this number of pending L2Txs is
|
||||||
|
// reached, inserts to the pool will be denied until some of
|
||||||
|
// the pending txs are forged.
|
||||||
|
MaxTxs uint32 `validate:"required"`
|
||||||
|
// MinFeeUSD is the minimum fee in USD that a tx must pay in
|
||||||
|
// order to be accepted into the pool. Txs with lower than
|
||||||
|
// minimum fee will be rejected at the API level.
|
||||||
|
MinFeeUSD float64
|
||||||
|
} `validate:"required"`
|
||||||
|
}
|
||||||
|
Debug NodeDebug `validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
// Load loads a generic config.
|
// Load loads a generic config.
|
||||||
func Load(path string, cfg interface{}) error {
|
func Load(path string, cfg interface{}) error {
|
||||||
bs, err := ioutil.ReadFile(path) //nolint:gosec
|
bs, err := ioutil.ReadFile(path) //nolint:gosec
|
||||||
@@ -234,8 +375,8 @@ func Load(path string, cfg interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadCoordinator loads the Coordinator configuration from path.
|
// LoadNode loads the Node configuration from path.
|
||||||
func LoadCoordinator(path string) (*Node, error) {
|
func LoadNode(path string, coordinator bool) (*Node, error) {
|
||||||
var cfg Node
|
var cfg Node
|
||||||
if err := Load(path, &cfg); err != nil {
|
if err := Load(path, &cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
|
||||||
@@ -244,21 +385,28 @@ func LoadCoordinator(path string) (*Node, error) {
|
|||||||
if err := validate.Struct(cfg); err != nil {
|
if err := validate.Struct(cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
}
|
}
|
||||||
|
if coordinator {
|
||||||
if err := validate.Struct(cfg.Coordinator); err != nil {
|
if err := validate.Struct(cfg.Coordinator); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return &cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadNode loads the Node configuration from path.
|
// LoadAPIServer loads the APIServer configuration from path.
|
||||||
func LoadNode(path string) (*Node, error) {
|
func LoadAPIServer(path string, coordinator bool) (*APIServer, error) {
|
||||||
var cfg Node
|
var cfg APIServer
|
||||||
if err := Load(path, &cfg); err != nil {
|
if err := Load(path, &cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
|
||||||
}
|
}
|
||||||
validate := validator.New()
|
validate := validator.New()
|
||||||
if err := validate.Struct(cfg); err != nil {
|
if err := validate.Struct(cfg); err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
}
|
}
|
||||||
|
if coordinator {
|
||||||
|
if err := validate.Struct(cfg.Coordinator); err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
return &cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,6 +47,8 @@ type Debug struct {
|
|||||||
MineBlockNum int64
|
MineBlockNum int64
|
||||||
// SendBlockNum is the blockNum when the batch was sent to ethereum
|
// SendBlockNum is the blockNum when the batch was sent to ethereum
|
||||||
SendBlockNum int64
|
SendBlockNum int64
|
||||||
|
// ResendNum is the number of times the tx has been resent
|
||||||
|
ResendNum int
|
||||||
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
|
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
|
||||||
// was scheduled
|
// was scheduled
|
||||||
LastScheduledL1BatchBlockNum int64
|
LastScheduledL1BatchBlockNum int64
|
||||||
@@ -64,10 +66,17 @@ type Debug struct {
|
|||||||
// StartToSendDelay is the delay between starting a batch and sending
|
// StartToSendDelay is the delay between starting a batch and sending
|
||||||
// it to ethereum, in seconds
|
// it to ethereum, in seconds
|
||||||
StartToSendDelay float64
|
StartToSendDelay float64
|
||||||
|
// StartToMineDelay is the delay between starting a batch and having
|
||||||
|
// it mined in seconds
|
||||||
|
StartToMineDelay float64
|
||||||
|
// SendToMineDelay is the delay between sending a batch tx and having
|
||||||
|
// it mined in seconds
|
||||||
|
SendToMineDelay float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchInfo contans the Batch information
|
// BatchInfo contans the Batch information
|
||||||
type BatchInfo struct {
|
type BatchInfo struct {
|
||||||
|
PipelineNum int
|
||||||
BatchNum common.BatchNum
|
BatchNum common.BatchNum
|
||||||
ServerProof prover.Client
|
ServerProof prover.Client
|
||||||
ZKInputs *common.ZKInputs
|
ZKInputs *common.ZKInputs
|
||||||
@@ -83,7 +92,14 @@ type BatchInfo struct {
|
|||||||
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
||||||
// FeesInfo
|
// FeesInfo
|
||||||
EthTx *types.Transaction
|
EthTx *types.Transaction
|
||||||
|
EthTxErr error
|
||||||
|
// SendTimestamp the time of batch sent to ethereum
|
||||||
|
SendTimestamp time.Time
|
||||||
Receipt *types.Receipt
|
Receipt *types.Receipt
|
||||||
|
// Fail is true if:
|
||||||
|
// - The receipt status is failed
|
||||||
|
// - A previous parent batch is failed
|
||||||
|
Fail bool
|
||||||
Debug Debug
|
Debug Debug
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,14 +3,15 @@ package coordinator
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/config"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/eth"
|
"github.com/hermeznetwork/hermez-node/eth"
|
||||||
@@ -24,6 +25,8 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
|
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
|
||||||
|
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
|
||||||
|
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -42,18 +45,68 @@ type Config struct {
|
|||||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||||
// timeout that will trigger a schedule to forge an L1Batch
|
// timeout that will trigger a schedule to forge an L1Batch
|
||||||
L1BatchTimeoutPerc float64
|
L1BatchTimeoutPerc float64
|
||||||
|
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||||
|
// starting the pipeline when we reach a slot in which we can forge.
|
||||||
|
StartSlotBlocksDelay int64
|
||||||
|
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||||
|
// the forger address is checked to be allowed to forge (apart from
|
||||||
|
// checking the next block), used to decide when to stop scheduling new
|
||||||
|
// batches (by stopping the pipeline).
|
||||||
|
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||||
|
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||||
|
// stopped if we can't forge at block 15.
|
||||||
|
// This value should be the expected number of blocks it takes between
|
||||||
|
// scheduling a batch and having it mined.
|
||||||
|
ScheduleBatchBlocksAheadCheck int64
|
||||||
|
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||||
|
// which the coordinator is also checked to be allowed to forge, apart
|
||||||
|
// from the next block; used to decide when to stop sending batches to
|
||||||
|
// the smart contract.
|
||||||
|
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||||
|
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||||
|
// if we can't forge at block 15.
|
||||||
|
// This value should be the expected number of blocks it takes between
|
||||||
|
// sending a batch and having it mined.
|
||||||
|
SendBatchBlocksMarginCheck int64
|
||||||
// EthClientAttempts is the number of attempts to do an eth client RPC
|
// EthClientAttempts is the number of attempts to do an eth client RPC
|
||||||
// call before giving up
|
// call before giving up
|
||||||
EthClientAttempts int
|
EthClientAttempts int
|
||||||
// ForgeRetryInterval is the waiting interval between calls forge a
|
// ForgeRetryInterval is the waiting interval between calls forge a
|
||||||
// batch after an error
|
// batch after an error
|
||||||
ForgeRetryInterval time.Duration
|
ForgeRetryInterval time.Duration
|
||||||
|
// ForgeDelay is the delay after which a batch is forged if the slot is
|
||||||
|
// already committed. If set to 0s, the coordinator will continuously
|
||||||
|
// forge at the maximum rate.
|
||||||
|
ForgeDelay time.Duration
|
||||||
|
// ForgeNoTxsDelay is the delay after which a batch is forged even if
|
||||||
|
// there are no txs to forge if the slot is already committed. If set
|
||||||
|
// to 0s, the coordinator will continuously forge even if the batches
|
||||||
|
// are empty.
|
||||||
|
ForgeNoTxsDelay time.Duration
|
||||||
// SyncRetryInterval is the waiting interval between calls to the main
|
// SyncRetryInterval is the waiting interval between calls to the main
|
||||||
// handler of a synced block after an error
|
// handler of a synced block after an error
|
||||||
SyncRetryInterval time.Duration
|
SyncRetryInterval time.Duration
|
||||||
|
// PurgeByExtDelInterval is the waiting interval between calls
|
||||||
|
// to the PurgeByExternalDelete function of the l2db which deletes
|
||||||
|
// pending txs externally marked by the column `external_delete`
|
||||||
|
PurgeByExtDelInterval time.Duration
|
||||||
// EthClientAttemptsDelay is delay between attempts do do an eth client
|
// EthClientAttemptsDelay is delay between attempts do do an eth client
|
||||||
// RPC call
|
// RPC call
|
||||||
EthClientAttemptsDelay time.Duration
|
EthClientAttemptsDelay time.Duration
|
||||||
|
// EthTxResendTimeout is the timeout after which a non-mined ethereum
|
||||||
|
// transaction will be resent (reusing the nonce) with a newly
|
||||||
|
// calculated gas price
|
||||||
|
EthTxResendTimeout time.Duration
|
||||||
|
// EthNoReuseNonce disables reusing nonces of pending transactions for
|
||||||
|
// new replacement transactions
|
||||||
|
EthNoReuseNonce bool
|
||||||
|
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||||
|
// transactions
|
||||||
|
MaxGasPrice *big.Int
|
||||||
|
// GasPriceIncPerc is the percentage increase of gas price set in an
|
||||||
|
// ethereum transaction from the suggested gas price by the ehtereum
|
||||||
|
// node
|
||||||
|
GasPriceIncPerc int64
|
||||||
// TxManagerCheckInterval is the waiting interval between receipt
|
// TxManagerCheckInterval is the waiting interval between receipt
|
||||||
// checks of ethereum transactions in the TxManager
|
// checks of ethereum transactions in the TxManager
|
||||||
TxManagerCheckInterval time.Duration
|
TxManagerCheckInterval time.Duration
|
||||||
@@ -61,7 +114,12 @@ type Config struct {
|
|||||||
// in JSON in every step/update of the pipeline
|
// in JSON in every step/update of the pipeline
|
||||||
DebugBatchPath string
|
DebugBatchPath string
|
||||||
Purger PurgerCfg
|
Purger PurgerCfg
|
||||||
|
// VerifierIdx is the index of the verifier contract registered in the
|
||||||
|
// smart contract
|
||||||
VerifierIdx uint8
|
VerifierIdx uint8
|
||||||
|
// ForgeBatchGasCost contains the cost of each action in the
|
||||||
|
// ForgeBatch transaction.
|
||||||
|
ForgeBatchGasCost config.ForgeBatchGasCost
|
||||||
TxProcessorConfig txprocessor.Config
|
TxProcessorConfig txprocessor.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,13 +132,20 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fromBatch struct {
|
||||||
|
BatchNum common.BatchNum
|
||||||
|
ForgerAddr ethCommon.Address
|
||||||
|
StateRoot *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
// Coordinator implements the Coordinator type
|
// Coordinator implements the Coordinator type
|
||||||
type Coordinator struct {
|
type Coordinator struct {
|
||||||
// State
|
// State
|
||||||
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
|
pipelineNum int // Pipeline sequential number. The first pipeline is 1
|
||||||
|
pipelineFromBatch fromBatch // batch from which we started the pipeline
|
||||||
provers []prover.Client
|
provers []prover.Client
|
||||||
consts synchronizer.SCConsts
|
consts common.SCConsts
|
||||||
vars synchronizer.SCVariables
|
vars common.SCVariables
|
||||||
stats synchronizer.Stats
|
stats synchronizer.Stats
|
||||||
started bool
|
started bool
|
||||||
|
|
||||||
@@ -96,7 +161,17 @@ type Coordinator struct {
|
|||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
// mutexL2DBUpdateDelete protects updates to the L2DB so that
|
||||||
|
// these two processes always happen exclusively:
|
||||||
|
// - Pipeline taking pending txs, running through the TxProcessor and
|
||||||
|
// marking selected txs as forging
|
||||||
|
// - Coordinator deleting pending txs that have been marked with
|
||||||
|
// `external_delete`.
|
||||||
|
// Without this mutex, the coordinator could delete a pending txs that
|
||||||
|
// has just been selected by the TxProcessor in the pipeline.
|
||||||
|
mutexL2DBUpdateDelete sync.Mutex
|
||||||
pipeline *Pipeline
|
pipeline *Pipeline
|
||||||
|
lastNonFailedBatchNum common.BatchNum
|
||||||
|
|
||||||
purger *Purger
|
purger *Purger
|
||||||
txManager *TxManager
|
txManager *TxManager
|
||||||
@@ -110,8 +185,8 @@ func NewCoordinator(cfg Config,
|
|||||||
batchBuilder *batchbuilder.BatchBuilder,
|
batchBuilder *batchbuilder.BatchBuilder,
|
||||||
serverProofs []prover.Client,
|
serverProofs []prover.Client,
|
||||||
ethClient eth.ClientInterface,
|
ethClient eth.ClientInterface,
|
||||||
scConsts *synchronizer.SCConsts,
|
scConsts *common.SCConsts,
|
||||||
initSCVars *synchronizer.SCVariables,
|
initSCVars *common.SCVariables,
|
||||||
) (*Coordinator, error) {
|
) (*Coordinator, error) {
|
||||||
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
|
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
|
||||||
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
|
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
|
||||||
@@ -139,7 +214,12 @@ func NewCoordinator(cfg Config,
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
c := Coordinator{
|
c := Coordinator{
|
||||||
pipelineBatchNum: -1,
|
pipelineNum: 0,
|
||||||
|
pipelineFromBatch: fromBatch{
|
||||||
|
BatchNum: 0,
|
||||||
|
ForgerAddr: ethCommon.Address{},
|
||||||
|
StateRoot: big.NewInt(0),
|
||||||
|
},
|
||||||
provers: serverProofs,
|
provers: serverProofs,
|
||||||
consts: *scConsts,
|
consts: *scConsts,
|
||||||
vars: *initSCVars,
|
vars: *initSCVars,
|
||||||
@@ -183,8 +263,10 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
||||||
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
|
c.pipelineNum++
|
||||||
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
|
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
|
||||||
|
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager,
|
||||||
|
c.provers, &c.consts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsgSyncBlock indicates an update to the Synchronizer stats
|
// MsgSyncBlock indicates an update to the Synchronizer stats
|
||||||
@@ -193,18 +275,21 @@ type MsgSyncBlock struct {
|
|||||||
Batches []common.BatchData
|
Batches []common.BatchData
|
||||||
// Vars contains each Smart Contract variables if they are updated, or
|
// Vars contains each Smart Contract variables if they are updated, or
|
||||||
// nil if they haven't changed.
|
// nil if they haven't changed.
|
||||||
Vars synchronizer.SCVariablesPtr
|
Vars common.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsgSyncReorg indicates a reorg
|
// MsgSyncReorg indicates a reorg
|
||||||
type MsgSyncReorg struct {
|
type MsgSyncReorg struct {
|
||||||
Stats synchronizer.Stats
|
Stats synchronizer.Stats
|
||||||
Vars synchronizer.SCVariablesPtr
|
Vars common.SCVariablesPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
// MsgStopPipeline indicates a signal to reset the pipeline
|
// MsgStopPipeline indicates a signal to reset the pipeline
|
||||||
type MsgStopPipeline struct {
|
type MsgStopPipeline struct {
|
||||||
Reason string
|
Reason string
|
||||||
|
// FailedBatchNum indicates the first batchNum that failed in the
|
||||||
|
// pipeline. If FailedBatchNum is 0, it should be ignored.
|
||||||
|
FailedBatchNum common.BatchNum
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMsg is a thread safe method to pass a message to the Coordinator
|
// SendMsg is a thread safe method to pass a message to the Coordinator
|
||||||
@@ -215,27 +300,36 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
|
||||||
if vars.Rollup != nil {
|
if update.Rollup != nil {
|
||||||
c.vars.Rollup = *vars.Rollup
|
vars.Rollup = *update.Rollup
|
||||||
}
|
}
|
||||||
if vars.Auction != nil {
|
if update.Auction != nil {
|
||||||
c.vars.Auction = *vars.Auction
|
vars.Auction = *update.Auction
|
||||||
}
|
}
|
||||||
if vars.WDelayer != nil {
|
if update.WDelayer != nil {
|
||||||
c.vars.WDelayer = *vars.WDelayer
|
vars.WDelayer = *update.WDelayer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
|
||||||
|
updateSCVars(&c.vars, vars)
|
||||||
|
}
|
||||||
|
|
||||||
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
||||||
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
||||||
|
if blockNum < auctionConstants.GenesisBlockNum {
|
||||||
|
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
|
||||||
|
"genesis", auctionConstants.GenesisBlockNum)
|
||||||
|
return false
|
||||||
|
}
|
||||||
var slot *common.Slot
|
var slot *common.Slot
|
||||||
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
|
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
|
||||||
slot = currentSlot
|
slot = currentSlot
|
||||||
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
|
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
|
||||||
slot = nextSlot
|
slot = nextSlot
|
||||||
} else {
|
} else {
|
||||||
log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
|
log.Warnw("canForge: requested blockNum is outside current and next slot",
|
||||||
"blockNum", blockNum, "currentSlot", currentSlot,
|
"blockNum", blockNum, "currentSlot", currentSlot,
|
||||||
"nextSlot", nextSlot,
|
"nextSlot", nextSlot,
|
||||||
)
|
)
|
||||||
@@ -244,16 +338,23 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
|
|||||||
anyoneForge := false
|
anyoneForge := false
|
||||||
if !slot.ForgerCommitment &&
|
if !slot.ForgerCommitment &&
|
||||||
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
|
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
|
||||||
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
|
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
|
||||||
"block", blockNum)
|
"block", blockNum)
|
||||||
anyoneForge = true
|
anyoneForge = true
|
||||||
}
|
}
|
||||||
if slot.Forger == addr || anyoneForge {
|
if slot.Forger == addr || anyoneForge {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Coordinator) canForgeAt(blockNum int64) bool {
|
||||||
|
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||||
|
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
||||||
|
c.cfg.ForgerAddress, blockNum)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Coordinator) canForge() bool {
|
func (c *Coordinator) canForge() bool {
|
||||||
blockNum := c.stats.Eth.LastBlock.Num + 1
|
blockNum := c.stats.Eth.LastBlock.Num + 1
|
||||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||||
@@ -262,21 +363,51 @@ func (c *Coordinator) canForge() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
||||||
canForge := c.canForge()
|
nextBlock := c.stats.Eth.LastBlock.Num + 1
|
||||||
|
canForge := c.canForgeAt(nextBlock)
|
||||||
|
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
|
||||||
|
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
|
||||||
|
}
|
||||||
if c.pipeline == nil {
|
if c.pipeline == nil {
|
||||||
if canForge {
|
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
|
||||||
|
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
|
||||||
|
log.Debugf("Coordinator: delaying pipeline start due to "+
|
||||||
|
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
|
||||||
|
relativeBlock, c.cfg.StartSlotBlocksDelay)
|
||||||
|
} else if canForge {
|
||||||
log.Infow("Coordinator: forging state begin", "block",
|
log.Infow("Coordinator: forging state begin", "block",
|
||||||
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
|
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
|
||||||
batchNum := common.BatchNum(stats.Sync.LastBatch)
|
fromBatch := fromBatch{
|
||||||
|
BatchNum: stats.Sync.LastBatch.BatchNum,
|
||||||
|
ForgerAddr: stats.Sync.LastBatch.ForgerAddr,
|
||||||
|
StateRoot: stats.Sync.LastBatch.StateRoot,
|
||||||
|
}
|
||||||
|
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
|
||||||
|
fromBatch.BatchNum = c.lastNonFailedBatchNum
|
||||||
|
fromBatch.ForgerAddr = c.cfg.ForgerAddress
|
||||||
|
fromBatch.StateRoot = big.NewInt(0)
|
||||||
|
}
|
||||||
|
// Before starting the pipeline make sure we reset any
|
||||||
|
// l2tx from the pool that was forged in a batch that
|
||||||
|
// didn't end up being mined. We are already doing
|
||||||
|
// this in handleStopPipeline, but we do it again as a
|
||||||
|
// failsafe in case the last synced batchnum is
|
||||||
|
// different than in the previous call to l2DB.Reorg,
|
||||||
|
// or in case the node was restarted when there was a
|
||||||
|
// started batch that included l2txs but was not mined.
|
||||||
|
if err := c.l2DB.Reorg(fromBatch.BatchNum); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
var err error
|
var err error
|
||||||
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil {
|
c.pipelineFromBatch = fromBatch
|
||||||
|
// Start the pipeline
|
||||||
|
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
|
||||||
c.pipeline = nil
|
c.pipeline = nil
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
c.pipelineBatchNum = batchNum
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !canForge {
|
if !canForge {
|
||||||
@@ -286,25 +417,12 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.pipeline == nil {
|
if c.pipeline == nil {
|
||||||
// Mark invalid in Pool due to forged L2Txs
|
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
||||||
// for _, batch := range batches {
|
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||||
// if err := c.l2DB.InvalidateOldNonces(
|
|
||||||
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
|
|
||||||
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
|
||||||
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||||
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -331,33 +449,43 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
|
|||||||
if c.pipeline != nil {
|
if c.pipeline != nil {
|
||||||
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
|
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
|
||||||
}
|
}
|
||||||
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
|
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
|
||||||
// There's been a reorg and the batch from which the pipeline
|
(c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil ||
|
||||||
// was started was in a block that was discarded. The batch
|
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) {
|
||||||
// may not be in the main chain, so we stop the pipeline as a
|
// There's been a reorg and the batch state root from which the
|
||||||
// precaution (it will be started again once the node is in
|
// pipeline was started has changed (probably because it was in
|
||||||
// sync).
|
// a block that was discarded), and it was sent by a different
|
||||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
|
// coordinator than us. That batch may never be in the main
|
||||||
"sync.LastBatch", c.stats.Sync.LastBatch,
|
// chain, so we stop the pipeline (it will be started again
|
||||||
"c.pipelineBatchNum", c.pipelineBatchNum)
|
// once the node is in sync).
|
||||||
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
|
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
|
||||||
|
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
|
||||||
|
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
|
||||||
|
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
|
||||||
|
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
|
||||||
|
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
|
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
|
||||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
// the next pipeline will start from the last state of the synchronizer,
|
||||||
return tracerr.Wrap(err)
|
// otherwise, it will state from failedBatchNum-1.
|
||||||
|
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
|
||||||
|
batchNum := c.stats.Sync.LastBatch.BatchNum
|
||||||
|
if failedBatchNum != 0 {
|
||||||
|
batchNum = failedBatchNum - 1
|
||||||
}
|
}
|
||||||
if c.pipeline != nil {
|
if c.pipeline != nil {
|
||||||
c.pipeline.Stop(c.ctx)
|
c.pipeline.Stop(c.ctx)
|
||||||
c.pipeline = nil
|
c.pipeline = nil
|
||||||
}
|
}
|
||||||
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
|
if err := c.l2DB.Reorg(batchNum); err != nil {
|
||||||
// TODO: Check that we are in a slot in which we can't forge
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
c.lastNonFailedBatchNum = batchNum
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -373,7 +501,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
|
|||||||
}
|
}
|
||||||
case MsgStopPipeline:
|
case MsgStopPipeline:
|
||||||
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
|
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
|
||||||
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
|
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
|
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@@ -396,7 +524,7 @@ func (c *Coordinator) Start() {
|
|||||||
|
|
||||||
c.wg.Add(1)
|
c.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
waitDuration := longWaitDuration
|
timer := time.NewTimer(longWaitDuration)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.ctx.Done():
|
case <-c.ctx.Done():
|
||||||
@@ -408,23 +536,45 @@ func (c *Coordinator) Start() {
|
|||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Errorw("Coordinator.handleMsg", "err", err)
|
log.Errorw("Coordinator.handleMsg", "err", err)
|
||||||
waitDuration = c.cfg.SyncRetryInterval
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
|
}
|
||||||
|
timer.Reset(c.cfg.SyncRetryInterval)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
waitDuration = longWaitDuration
|
case <-timer.C:
|
||||||
case <-time.After(waitDuration):
|
timer.Reset(longWaitDuration)
|
||||||
if !c.stats.Synced() {
|
if !c.stats.Synced() {
|
||||||
waitDuration = longWaitDuration
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
|
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Errorw("Coordinator.syncStats", "err", err)
|
log.Errorw("Coordinator.syncStats", "err", err)
|
||||||
waitDuration = c.cfg.SyncRetryInterval
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
|
}
|
||||||
|
timer.Reset(c.cfg.SyncRetryInterval)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
waitDuration = longWaitDuration
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
c.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
|
||||||
|
c.wg.Done()
|
||||||
|
return
|
||||||
|
case <-time.After(c.cfg.PurgeByExtDelInterval):
|
||||||
|
c.mutexL2DBUpdateDelete.Lock()
|
||||||
|
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
|
||||||
|
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
|
||||||
|
}
|
||||||
|
c.mutexL2DBUpdateDelete.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -97,15 +97,16 @@ func newTestModules(t *testing.T) modules {
|
|||||||
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
|
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
deleteme = append(deleteme, syncDBPath)
|
deleteme = append(deleteme, syncDBPath)
|
||||||
syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
|
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 48})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
test.WipeDB(db)
|
test.WipeDB(db)
|
||||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||||
historyDB := historydb.NewHistoryDB(db, nil)
|
historyDB := historydb.NewHistoryDB(db, db, nil)
|
||||||
|
|
||||||
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
|
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -186,12 +187,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
|
|||||||
&prover.MockClient{Delay: 400 * time.Millisecond},
|
&prover.MockClient{Delay: 400 * time.Millisecond},
|
||||||
}
|
}
|
||||||
|
|
||||||
scConsts := &synchronizer.SCConsts{
|
scConsts := &common.SCConsts{
|
||||||
Rollup: *ethClientSetup.RollupConstants,
|
Rollup: *ethClientSetup.RollupConstants,
|
||||||
Auction: *ethClientSetup.AuctionConstants,
|
Auction: *ethClientSetup.AuctionConstants,
|
||||||
WDelayer: *ethClientSetup.WDelayerConstants,
|
WDelayer: *ethClientSetup.WDelayerConstants,
|
||||||
}
|
}
|
||||||
initSCVars := &synchronizer.SCVariables{
|
initSCVars := &common.SCVariables{
|
||||||
Rollup: *ethClientSetup.RollupVariables,
|
Rollup: *ethClientSetup.RollupVariables,
|
||||||
Auction: *ethClientSetup.AuctionVariables,
|
Auction: *ethClientSetup.AuctionVariables,
|
||||||
WDelayer: *ethClientSetup.WDelayerVariables,
|
WDelayer: *ethClientSetup.WDelayerVariables,
|
||||||
@@ -260,8 +261,8 @@ func TestCoordinatorFlow(t *testing.T) {
|
|||||||
var stats synchronizer.Stats
|
var stats synchronizer.Stats
|
||||||
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
|
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
|
||||||
stats.Sync.LastBlock = stats.Eth.LastBlock
|
stats.Sync.LastBlock = stats.Eth.LastBlock
|
||||||
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
|
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
|
||||||
stats.Sync.LastBatch = stats.Eth.LastBatch
|
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
|
||||||
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
|
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var slot common.Slot
|
var slot common.Slot
|
||||||
@@ -278,7 +279,7 @@ func TestCoordinatorFlow(t *testing.T) {
|
|||||||
// Copy stateDB to synchronizer if there was a new batch
|
// Copy stateDB to synchronizer if there was a new batch
|
||||||
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
|
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
|
||||||
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
|
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
|
||||||
if stats.Sync.LastBatch != 0 {
|
if stats.Sync.LastBatch.BatchNum != 0 {
|
||||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||||
log.Infow("Making pebble checkpoint for sync",
|
log.Infow("Making pebble checkpoint for sync",
|
||||||
"source", source, "dest", dest)
|
"source", source, "dest", dest)
|
||||||
@@ -516,7 +517,7 @@ func TestCoordinatorStress(t *testing.T) {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
blockData, _, err := syn.Sync2(ctx, nil)
|
blockData, _, err := syn.Sync(ctx, nil)
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
return
|
return
|
||||||
@@ -527,7 +528,7 @@ func TestCoordinatorStress(t *testing.T) {
|
|||||||
coord.SendMsg(ctx, MsgSyncBlock{
|
coord.SendMsg(ctx, MsgSyncBlock{
|
||||||
Stats: *stats,
|
Stats: *stats,
|
||||||
Batches: blockData.Rollup.Batches,
|
Batches: blockData.Rollup.Batches,
|
||||||
Vars: synchronizer.SCVariablesPtr{
|
Vars: common.SCVariablesPtr{
|
||||||
Rollup: blockData.Rollup.Vars,
|
Rollup: blockData.Rollup.Vars,
|
||||||
Auction: blockData.Auction.Vars,
|
Auction: blockData.Auction.Vars,
|
||||||
WDelayer: blockData.WDelayer.Vars,
|
WDelayer: blockData.WDelayer.Vars,
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package coordinator
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -21,31 +22,42 @@ import (
|
|||||||
|
|
||||||
type statsVars struct {
|
type statsVars struct {
|
||||||
Stats synchronizer.Stats
|
Stats synchronizer.Stats
|
||||||
Vars synchronizer.SCVariablesPtr
|
Vars common.SCVariablesPtr
|
||||||
|
}
|
||||||
|
|
||||||
|
type state struct {
|
||||||
|
batchNum common.BatchNum
|
||||||
|
lastScheduledL1BatchBlockNum int64
|
||||||
|
lastForgeL1TxsNum int64
|
||||||
|
lastSlotForged int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pipeline manages the forging of batches with parallel server proofs
|
// Pipeline manages the forging of batches with parallel server proofs
|
||||||
type Pipeline struct {
|
type Pipeline struct {
|
||||||
|
num int
|
||||||
cfg Config
|
cfg Config
|
||||||
consts synchronizer.SCConsts
|
consts common.SCConsts
|
||||||
|
|
||||||
// state
|
// state
|
||||||
batchNum common.BatchNum
|
state state
|
||||||
lastScheduledL1BatchBlockNum int64
|
|
||||||
lastForgeL1TxsNum int64
|
|
||||||
started bool
|
started bool
|
||||||
|
rw sync.RWMutex
|
||||||
|
errAtBatchNum common.BatchNum
|
||||||
|
lastForgeTime time.Time
|
||||||
|
|
||||||
proversPool *ProversPool
|
proversPool *ProversPool
|
||||||
provers []prover.Client
|
provers []prover.Client
|
||||||
|
coord *Coordinator
|
||||||
txManager *TxManager
|
txManager *TxManager
|
||||||
historyDB *historydb.HistoryDB
|
historyDB *historydb.HistoryDB
|
||||||
l2DB *l2db.L2DB
|
l2DB *l2db.L2DB
|
||||||
txSelector *txselector.TxSelector
|
txSelector *txselector.TxSelector
|
||||||
batchBuilder *batchbuilder.BatchBuilder
|
batchBuilder *batchbuilder.BatchBuilder
|
||||||
|
mutexL2DBUpdateDelete *sync.Mutex
|
||||||
purger *Purger
|
purger *Purger
|
||||||
|
|
||||||
stats synchronizer.Stats
|
stats synchronizer.Stats
|
||||||
vars synchronizer.SCVariables
|
vars common.SCVariables
|
||||||
statsVarsCh chan statsVars
|
statsVarsCh chan statsVars
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
@@ -53,17 +65,32 @@ type Pipeline struct {
|
|||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
|
||||||
|
p.rw.Lock()
|
||||||
|
defer p.rw.Unlock()
|
||||||
|
p.errAtBatchNum = batchNum
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
|
||||||
|
p.rw.RLock()
|
||||||
|
defer p.rw.RUnlock()
|
||||||
|
return p.errAtBatchNum
|
||||||
|
}
|
||||||
|
|
||||||
// NewPipeline creates a new Pipeline
|
// NewPipeline creates a new Pipeline
|
||||||
func NewPipeline(ctx context.Context,
|
func NewPipeline(ctx context.Context,
|
||||||
cfg Config,
|
cfg Config,
|
||||||
|
num int, // Pipeline sequential number
|
||||||
historyDB *historydb.HistoryDB,
|
historyDB *historydb.HistoryDB,
|
||||||
l2DB *l2db.L2DB,
|
l2DB *l2db.L2DB,
|
||||||
txSelector *txselector.TxSelector,
|
txSelector *txselector.TxSelector,
|
||||||
batchBuilder *batchbuilder.BatchBuilder,
|
batchBuilder *batchbuilder.BatchBuilder,
|
||||||
|
mutexL2DBUpdateDelete *sync.Mutex,
|
||||||
purger *Purger,
|
purger *Purger,
|
||||||
|
coord *Coordinator,
|
||||||
txManager *TxManager,
|
txManager *TxManager,
|
||||||
provers []prover.Client,
|
provers []prover.Client,
|
||||||
scConsts *synchronizer.SCConsts,
|
scConsts *common.SCConsts,
|
||||||
) (*Pipeline, error) {
|
) (*Pipeline, error) {
|
||||||
proversPool := NewProversPool(len(provers))
|
proversPool := NewProversPool(len(provers))
|
||||||
proversPoolSize := 0
|
proversPoolSize := 0
|
||||||
@@ -79,6 +106,7 @@ func NewPipeline(ctx context.Context,
|
|||||||
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
|
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
|
||||||
}
|
}
|
||||||
return &Pipeline{
|
return &Pipeline{
|
||||||
|
num: num,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
historyDB: historyDB,
|
historyDB: historyDB,
|
||||||
l2DB: l2DB,
|
l2DB: l2DB,
|
||||||
@@ -86,7 +114,9 @@ func NewPipeline(ctx context.Context,
|
|||||||
batchBuilder: batchBuilder,
|
batchBuilder: batchBuilder,
|
||||||
provers: provers,
|
provers: provers,
|
||||||
proversPool: proversPool,
|
proversPool: proversPool,
|
||||||
|
mutexL2DBUpdateDelete: mutexL2DBUpdateDelete,
|
||||||
purger: purger,
|
purger: purger,
|
||||||
|
coord: coord,
|
||||||
txManager: txManager,
|
txManager: txManager,
|
||||||
consts: *scConsts,
|
consts: *scConsts,
|
||||||
statsVarsCh: make(chan statsVars, queueLen),
|
statsVarsCh: make(chan statsVars, queueLen),
|
||||||
@@ -94,7 +124,7 @@ func NewPipeline(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
||||||
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
|
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
|
||||||
select {
|
select {
|
||||||
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -103,54 +133,77 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
|
|||||||
|
|
||||||
// reset pipeline state
|
// reset pipeline state
|
||||||
func (p *Pipeline) reset(batchNum common.BatchNum,
|
func (p *Pipeline) reset(batchNum common.BatchNum,
|
||||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
stats *synchronizer.Stats, vars *common.SCVariables) error {
|
||||||
p.batchNum = batchNum
|
p.state = state{
|
||||||
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
|
batchNum: batchNum,
|
||||||
|
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
||||||
|
lastScheduledL1BatchBlockNum: 0,
|
||||||
|
lastSlotForged: -1,
|
||||||
|
}
|
||||||
p.stats = *stats
|
p.stats = *stats
|
||||||
p.vars = *vars
|
p.vars = *vars
|
||||||
p.lastScheduledL1BatchBlockNum = 0
|
|
||||||
|
|
||||||
err := p.txSelector.Reset(p.batchNum)
|
// Reset the StateDB in TxSelector and BatchBuilder from the
|
||||||
|
// synchronizer only if the checkpoint we reset from either:
|
||||||
|
// a. Doesn't exist in the TxSelector/BatchBuilder
|
||||||
|
// b. The batch has already been synced by the synchronizer and has a
|
||||||
|
// different MTRoot than the BatchBuilder
|
||||||
|
// Otherwise, reset from the local checkpoint.
|
||||||
|
|
||||||
|
// First attempt to reset from local checkpoint if such checkpoint exists
|
||||||
|
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
err = p.batchBuilder.Reset(p.batchNum, true)
|
fromSynchronizerTxSelector := !existsTxSelector
|
||||||
|
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
fromSynchronizerBatchBuilder := !existsBatchBuilder
|
||||||
|
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// After reset, check that if the batch exists in the historyDB, the
|
||||||
|
// stateRoot matches with the local one, if not, force a reset from
|
||||||
|
// synchronizer
|
||||||
|
batch, err := p.historyDB.GetBatch(p.state.batchNum)
|
||||||
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
|
// nothing to do
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
} else {
|
||||||
|
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
|
||||||
|
if batch.StateRoot.Cmp(localStateRoot) != 0 {
|
||||||
|
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
|
||||||
|
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
|
||||||
|
// StateRoot from synchronizer doesn't match StateRoot
|
||||||
|
// from batchBuilder, force a reset from synchronizer
|
||||||
|
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
|
||||||
if vars.Rollup != nil {
|
updateSCVars(&p.vars, vars)
|
||||||
p.vars.Rollup = *vars.Rollup
|
|
||||||
}
|
|
||||||
if vars.Auction != nil {
|
|
||||||
p.vars.Auction = *vars.Auction
|
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
p.vars.WDelayer = *vars.WDelayer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
|
// handleForgeBatch waits for an available proof server, calls p.forgeBatch to
|
||||||
// and then waits for an available proof server and sends the zkInputs to it so
|
// forge the batch and get the zkInputs, and then sends the zkInputs to the
|
||||||
// that the proof computation begins.
|
// selected proof server so that the proof computation begins.
|
||||||
func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
|
func (p *Pipeline) handleForgeBatch(ctx context.Context,
|
||||||
batchInfo, err := p.forgeBatch(batchNum)
|
batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
|
||||||
if ctx.Err() != nil {
|
// 1. Wait for an available serverProof (blocking call)
|
||||||
return nil, ctx.Err()
|
|
||||||
} else if err != nil {
|
|
||||||
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
|
||||||
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
|
||||||
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
|
|
||||||
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
|
||||||
} else {
|
|
||||||
log.Errorw("forgeBatch", "err", err)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// 6. Wait for an available server proof (blocking call)
|
|
||||||
serverProof, err := p.proversPool.Get(ctx)
|
serverProof, err := p.proversPool.Get(ctx)
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
@@ -158,13 +211,43 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
|
|||||||
log.Errorw("proversPool.Get", "err", err)
|
log.Errorw("proversPool.Get", "err", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
// If we encounter any error (notice that this function returns
|
||||||
|
// errors to notify that a batch is not forged not only because
|
||||||
|
// of unexpected errors but also due to benign causes), add the
|
||||||
|
// serverProof back to the pool
|
||||||
|
if err != nil {
|
||||||
|
p.proversPool.Add(ctx, serverProof)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// 2. Forge the batch internally (make a selection of txs and prepare
|
||||||
|
// all the smart contract arguments)
|
||||||
|
p.mutexL2DBUpdateDelete.Lock()
|
||||||
|
batchInfo, err = p.forgeBatch(batchNum)
|
||||||
|
p.mutexL2DBUpdateDelete.Unlock()
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
} else if err != nil {
|
||||||
|
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||||
|
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
||||||
|
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
||||||
|
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
||||||
|
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
||||||
|
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
||||||
|
// no log
|
||||||
|
} else {
|
||||||
|
log.Errorw("forgeBatch", "err", err)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Send the ZKInputs to the proof server
|
||||||
batchInfo.ServerProof = serverProof
|
batchInfo.ServerProof = serverProof
|
||||||
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
|
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Errorw("sendServerProof", "err", err)
|
log.Errorw("sendServerProof", "err", err)
|
||||||
batchInfo.ServerProof = nil
|
|
||||||
p.proversPool.Add(ctx, serverProof)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return batchInfo, nil
|
return batchInfo, nil
|
||||||
@@ -172,7 +255,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
|
|||||||
|
|
||||||
// Start the forging pipeline
|
// Start the forging pipeline
|
||||||
func (p *Pipeline) Start(batchNum common.BatchNum,
|
func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
stats *synchronizer.Stats, vars *common.SCVariables) error {
|
||||||
if p.started {
|
if p.started {
|
||||||
log.Fatal("Pipeline already started")
|
log.Fatal("Pipeline already started")
|
||||||
}
|
}
|
||||||
@@ -188,7 +271,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
|
|
||||||
p.wg.Add(1)
|
p.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
waitDuration := zeroDuration
|
timer := time.NewTimer(zeroDuration)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-p.ctx.Done():
|
case <-p.ctx.Done():
|
||||||
@@ -198,18 +281,42 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
case statsVars := <-p.statsVarsCh:
|
case statsVars := <-p.statsVarsCh:
|
||||||
p.stats = statsVars.Stats
|
p.stats = statsVars.Stats
|
||||||
p.syncSCVars(statsVars.Vars)
|
p.syncSCVars(statsVars.Vars)
|
||||||
case <-time.After(waitDuration):
|
case <-timer.C:
|
||||||
batchNum = p.batchNum + 1
|
timer.Reset(p.cfg.ForgeRetryInterval)
|
||||||
if batchInfo, err := p.handleForgeBatch(p.ctx, batchNum); err != nil {
|
// Once errAtBatchNum != 0, we stop forging
|
||||||
waitDuration = p.cfg.SyncRetryInterval
|
// batches because there's been an error and we
|
||||||
|
// wait for the pipeline to be stopped.
|
||||||
|
if p.getErrAtBatchNum() != 0 {
|
||||||
continue
|
continue
|
||||||
} else {
|
}
|
||||||
p.batchNum = batchNum
|
batchNum = p.state.batchNum + 1
|
||||||
|
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||||
|
if p.ctx.Err() != nil {
|
||||||
|
continue
|
||||||
|
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
|
||||||
|
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
||||||
|
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
p.setErrAtBatchNum(batchNum)
|
||||||
|
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf(
|
||||||
|
"Pipeline.handleForgBatch: %v", err),
|
||||||
|
FailedBatchNum: batchNum,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.lastForgeTime = time.Now()
|
||||||
|
|
||||||
|
p.state.batchNum = batchNum
|
||||||
select {
|
select {
|
||||||
case batchChSentServerProof <- batchInfo:
|
case batchChSentServerProof <- batchInfo:
|
||||||
case <-p.ctx.Done():
|
case <-p.ctx.Done():
|
||||||
}
|
}
|
||||||
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
}
|
}
|
||||||
|
timer.Reset(zeroDuration)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -223,16 +330,27 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
|||||||
p.wg.Done()
|
p.wg.Done()
|
||||||
return
|
return
|
||||||
case batchInfo := <-batchChSentServerProof:
|
case batchInfo := <-batchChSentServerProof:
|
||||||
|
// Once errAtBatchNum != 0, we stop forging
|
||||||
|
// batches because there's been an error and we
|
||||||
|
// wait for the pipeline to be stopped.
|
||||||
|
if p.getErrAtBatchNum() != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
err := p.waitServerProof(p.ctx, batchInfo)
|
err := p.waitServerProof(p.ctx, batchInfo)
|
||||||
// We are done with this serverProof, add it back to the pool
|
|
||||||
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
|
||||||
batchInfo.ServerProof = nil
|
|
||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Errorw("waitServerProof", "err", err)
|
log.Errorw("waitServerProof", "err", err)
|
||||||
|
p.setErrAtBatchNum(batchInfo.BatchNum)
|
||||||
|
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf(
|
||||||
|
"Pipeline.waitServerProof: %v", err),
|
||||||
|
FailedBatchNum: batchInfo.BatchNum,
|
||||||
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// We are done with this serverProof, add it back to the pool
|
||||||
|
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
||||||
p.txManager.AddBatch(p.ctx, batchInfo)
|
p.txManager.AddBatch(p.ctx, batchInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -282,9 +400,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
// Structure to accumulate data and metadata of the batch
|
||||||
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
|
now := time.Now()
|
||||||
batchInfo.Debug.StartTimestamp = time.Now()
|
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
|
||||||
|
batchInfo.Debug.StartTimestamp = now
|
||||||
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
|
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||||
|
|
||||||
selectionCfg := &txselector.SelectionConfig{
|
selectionCfg := &txselector.SelectionConfig{
|
||||||
@@ -298,22 +417,26 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
var auths [][]byte
|
var auths [][]byte
|
||||||
var coordIdxs []common.Idx
|
var coordIdxs []common.Idx
|
||||||
|
|
||||||
|
// Check if the slot is not yet fulfilled
|
||||||
|
slotCommitted := false
|
||||||
|
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
|
||||||
|
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
|
||||||
|
slotCommitted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we haven't reached the ForgeDelay, skip forging the batch
|
||||||
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
|
||||||
|
return nil, errForgeBeforeDelay
|
||||||
|
}
|
||||||
|
|
||||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||||
if p.shouldL1L2Batch(batchInfo) {
|
if p.shouldL1L2Batch(batchInfo) {
|
||||||
batchInfo.L1Batch = true
|
batchInfo.L1Batch = true
|
||||||
defer func() {
|
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||||
// If there's no error, update the parameters related
|
|
||||||
// to the last L1Batch forged
|
|
||||||
if err == nil {
|
|
||||||
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
|
||||||
p.lastForgeL1TxsNum++
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
|
||||||
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
||||||
}
|
}
|
||||||
// 2a: L1+L2 txs
|
// 2a: L1+L2 txs
|
||||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
|
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -332,6 +455,43 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
l1UserTxsExtra = nil
|
l1UserTxsExtra = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there are no txs to forge, no l1UserTxs in the open queue to
|
||||||
|
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
|
||||||
|
// batch.
|
||||||
|
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
|
||||||
|
noTxs := false
|
||||||
|
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
|
||||||
|
if batchInfo.L1Batch {
|
||||||
|
// Query the L1UserTxs in the queue following
|
||||||
|
// the one we are trying to forge.
|
||||||
|
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
|
||||||
|
p.state.lastForgeL1TxsNum + 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// If there are future L1UserTxs, we forge a
|
||||||
|
// batch to advance the queues and forge the
|
||||||
|
// L1UserTxs in the future. Otherwise, skip.
|
||||||
|
if len(nextL1UserTxs) == 0 {
|
||||||
|
noTxs = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
noTxs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if noTxs {
|
||||||
|
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return nil, errForgeNoTxsBeforeDelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if batchInfo.L1Batch {
|
||||||
|
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||||
|
p.state.lastForgeL1TxsNum++
|
||||||
|
}
|
||||||
|
|
||||||
// 3. Save metadata from TxSelector output for BatchNum
|
// 3. Save metadata from TxSelector output for BatchNum
|
||||||
batchInfo.L1UserTxsExtra = l1UserTxsExtra
|
batchInfo.L1UserTxsExtra = l1UserTxsExtra
|
||||||
batchInfo.L1CoordTxs = l1CoordTxs
|
batchInfo.L1CoordTxs = l1CoordTxs
|
||||||
@@ -376,6 +536,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
p.cfg.debugBatchStore(batchInfo)
|
p.cfg.debugBatchStore(batchInfo)
|
||||||
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
|
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
|
||||||
|
|
||||||
|
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
|
||||||
|
|
||||||
return batchInfo, nil
|
return batchInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -397,12 +559,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
|||||||
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
||||||
// Take the lastL1BatchBlockNum as the biggest between the last
|
// Take the lastL1BatchBlockNum as the biggest between the last
|
||||||
// scheduled one, and the synchronized one.
|
// scheduled one, and the synchronized one.
|
||||||
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
|
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
|
||||||
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||||
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
||||||
}
|
}
|
||||||
// Set Debug information
|
// Set Debug information
|
||||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
|
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
|
||||||
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
||||||
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
||||||
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
||||||
|
|||||||
@@ -25,6 +25,14 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func newBigInt(s string) *big.Int {
|
||||||
|
v, ok := new(big.Int).SetString(s, 10)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Errorf("Can't set big.Int from %s", s))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
func TestPipelineShouldL1L2Batch(t *testing.T) {
|
func TestPipelineShouldL1L2Batch(t *testing.T) {
|
||||||
ethClientSetup := test.NewClientSetupExample()
|
ethClientSetup := test.NewClientSetupExample()
|
||||||
ethClientSetup.ChainID = big.NewInt(int64(chainID))
|
ethClientSetup.ChainID = big.NewInt(int64(chainID))
|
||||||
@@ -77,7 +85,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// Scheduled L1Batch
|
// Scheduled L1Batch
|
||||||
//
|
//
|
||||||
pipeline.lastScheduledL1BatchBlockNum = startBlock
|
pipeline.state.lastScheduledL1BatchBlockNum = startBlock
|
||||||
stats.Sync.LastL1BatchBlock = startBlock - 10
|
stats.Sync.LastL1BatchBlock = startBlock - 10
|
||||||
|
|
||||||
// We are are one block before the timeout range * 0.5
|
// We are are one block before the timeout range * 0.5
|
||||||
@@ -128,6 +136,11 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
|
|||||||
blocks, err := tc.GenerateBlocksFromInstructions(set)
|
blocks, err := tc.GenerateBlocksFromInstructions(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, blocks)
|
require.NotNil(t, blocks)
|
||||||
|
// Set StateRoots for batches manually (til doesn't set it)
|
||||||
|
blocks[0].Rollup.Batches[0].Batch.StateRoot =
|
||||||
|
newBigInt("0")
|
||||||
|
blocks[0].Rollup.Batches[1].Batch.StateRoot =
|
||||||
|
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
|
||||||
|
|
||||||
ethAddTokens(blocks, ethClient)
|
ethAddTokens(blocks, ethClient)
|
||||||
err = ethClient.CtlAddBlocks(blocks)
|
err = ethClient.CtlAddBlocks(blocks)
|
||||||
@@ -135,7 +148,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for {
|
for {
|
||||||
syncBlock, discards, err := sync.Sync2(ctx, nil)
|
syncBlock, discards, err := sync.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
if syncBlock == nil {
|
if syncBlock == nil {
|
||||||
@@ -172,7 +185,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
|
|||||||
// users with positive balances
|
// users with positive balances
|
||||||
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
|
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
|
||||||
syncStats := sync.Stats()
|
syncStats := sync.Stats()
|
||||||
batchNum := common.BatchNum(syncStats.Sync.LastBatch)
|
batchNum := syncStats.Sync.LastBatch.BatchNum
|
||||||
syncSCVars := sync.SCVars()
|
syncSCVars := sync.SCVars()
|
||||||
|
|
||||||
pipeline, err := coord.newPipeline(ctx)
|
pipeline, err := coord.newPipeline(ctx)
|
||||||
@@ -193,11 +206,7 @@ PoolTransfer(0) User2-User3: 300 (126)
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{
|
err = pipeline.reset(batchNum, syncStats, syncSCVars)
|
||||||
Rollup: *syncSCVars.Rollup,
|
|
||||||
Auction: *syncSCVars.Auction,
|
|
||||||
WDelayer: *syncSCVars.WDelayer,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Sanity check
|
// Sanity check
|
||||||
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()
|
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()
|
||||||
|
|||||||
@@ -13,13 +13,23 @@ import (
|
|||||||
|
|
||||||
// PurgerCfg is the purger configuration
|
// PurgerCfg is the purger configuration
|
||||||
type PurgerCfg struct {
|
type PurgerCfg struct {
|
||||||
// PurgeBatchDelay is the delay between batches to purge outdated transactions
|
// PurgeBatchDelay is the delay between batches to purge outdated
|
||||||
|
// transactions. Oudated L2Txs are those that have been forged or
|
||||||
|
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
||||||
|
// that have been in the pool for longer than TTL once there are
|
||||||
|
// MaxTxs.
|
||||||
PurgeBatchDelay int64
|
PurgeBatchDelay int64
|
||||||
// InvalidateBatchDelay is the delay between batches to mark invalid transactions
|
// InvalidateBatchDelay is the delay between batches to mark invalid
|
||||||
|
// transactions due to nonce lower than the account nonce.
|
||||||
InvalidateBatchDelay int64
|
InvalidateBatchDelay int64
|
||||||
// PurgeBlockDelay is the delay between blocks to purge outdated transactions
|
// PurgeBlockDelay is the delay between blocks to purge outdated
|
||||||
|
// transactions. Oudated L2Txs are those that have been forged or
|
||||||
|
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
||||||
|
// that have been in the pool for longer than TTL once there are
|
||||||
|
// MaxTxs.
|
||||||
PurgeBlockDelay int64
|
PurgeBlockDelay int64
|
||||||
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions
|
// InvalidateBlockDelay is the delay between blocks to mark invalid
|
||||||
|
// transactions due to nonce lower than the account nonce.
|
||||||
InvalidateBlockDelay int64
|
InvalidateBlockDelay int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,19 +21,21 @@ func newL2DB(t *testing.T) *l2db.L2DB {
|
|||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
test.WipeDB(db)
|
test.WipeDB(db)
|
||||||
return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStateDB(t *testing.T) *statedb.LocalStateDB {
|
func newStateDB(t *testing.T) *statedb.LocalStateDB {
|
||||||
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
|
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
deleteme = append(deleteme, syncDBPath)
|
deleteme = append(deleteme, syncDBPath)
|
||||||
syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
|
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 48})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
|
stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
deleteme = append(deleteme, stateDBPath)
|
deleteme = append(deleteme, stateDBPath)
|
||||||
stateDB, err := statedb.NewLocalStateDB(stateDBPath, 128, syncStateDB, statedb.TypeTxSelector, 0)
|
stateDB, err := statedb.NewLocalStateDB(statedb.Config{Path: stateDBPath, Keep: 128,
|
||||||
|
Type: statedb.TypeTxSelector, NLevels: 0}, syncStateDB)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return stateDB
|
return stateDB
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,11 +4,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
@@ -29,23 +31,31 @@ type TxManager struct {
|
|||||||
batchCh chan *BatchInfo
|
batchCh chan *BatchInfo
|
||||||
chainID *big.Int
|
chainID *big.Int
|
||||||
account accounts.Account
|
account accounts.Account
|
||||||
consts synchronizer.SCConsts
|
consts common.SCConsts
|
||||||
|
|
||||||
stats synchronizer.Stats
|
stats synchronizer.Stats
|
||||||
vars synchronizer.SCVariables
|
vars common.SCVariables
|
||||||
statsVarsCh chan statsVars
|
statsVarsCh chan statsVars
|
||||||
|
|
||||||
queue []*BatchInfo
|
discardPipelineCh chan int // int refers to the pipelineNum
|
||||||
|
|
||||||
|
minPipelineNum int
|
||||||
|
queue Queue
|
||||||
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
|
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
|
||||||
lastSuccessBatch common.BatchNum
|
lastSuccessBatch common.BatchNum
|
||||||
lastPendingBatch common.BatchNum
|
// lastPendingBatch common.BatchNum
|
||||||
lastSuccessNonce uint64
|
// accNonce is the account nonce in the last mined block (due to mined txs)
|
||||||
lastPendingNonce uint64
|
accNonce uint64
|
||||||
|
// accNextNonce is the nonce that we should use to send the next tx.
|
||||||
|
// In some cases this will be a reused nonce of an already pending tx.
|
||||||
|
accNextNonce uint64
|
||||||
|
|
||||||
|
lastSentL1BatchBlockNum int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTxManager creates a new TxManager
|
// NewTxManager creates a new TxManager
|
||||||
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
|
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
|
||||||
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (*TxManager, error) {
|
coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (*TxManager, error) {
|
||||||
chainID, err := ethClient.EthChainID()
|
chainID, err := ethClient.EthChainID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -54,19 +64,11 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
|
log.Infow("TxManager started", "nonce", accNonce)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if lastSuccessNonce != lastPendingNonce {
|
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
|
|
||||||
lastSuccessNonce, lastPendingNonce))
|
|
||||||
}
|
|
||||||
log.Infow("TxManager started", "nonce", lastSuccessNonce)
|
|
||||||
return &TxManager{
|
return &TxManager{
|
||||||
cfg: *cfg,
|
cfg: *cfg,
|
||||||
ethClient: ethClient,
|
ethClient: ethClient,
|
||||||
@@ -74,6 +76,7 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
|||||||
coord: coord,
|
coord: coord,
|
||||||
batchCh: make(chan *BatchInfo, queueLen),
|
batchCh: make(chan *BatchInfo, queueLen),
|
||||||
statsVarsCh: make(chan statsVars, queueLen),
|
statsVarsCh: make(chan statsVars, queueLen),
|
||||||
|
discardPipelineCh: make(chan int, queueLen),
|
||||||
account: accounts.Account{
|
account: accounts.Account{
|
||||||
Address: *address,
|
Address: *address,
|
||||||
},
|
},
|
||||||
@@ -82,8 +85,10 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
|||||||
|
|
||||||
vars: *initSCVars,
|
vars: *initSCVars,
|
||||||
|
|
||||||
lastSuccessNonce: lastSuccessNonce,
|
minPipelineNum: 0,
|
||||||
lastPendingNonce: lastPendingNonce,
|
queue: NewQueue(),
|
||||||
|
accNonce: accNonce,
|
||||||
|
accNextNonce: accNonce,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,35 +102,40 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
||||||
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
|
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
|
||||||
select {
|
select {
|
||||||
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
|
||||||
if vars.Rollup != nil {
|
// due to a reorg
|
||||||
t.vars.Rollup = *vars.Rollup
|
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
|
||||||
}
|
select {
|
||||||
if vars.Auction != nil {
|
case t.discardPipelineCh <- pipelineNum:
|
||||||
t.vars.Auction = *vars.Auction
|
case <-ctx.Done():
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
t.vars.WDelayer = *vars.WDelayer
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
|
||||||
|
updateSCVars(&t.vars, vars)
|
||||||
|
}
|
||||||
|
|
||||||
// NewAuth generates a new auth object for an ethereum transaction
|
// NewAuth generates a new auth object for an ethereum transaction
|
||||||
func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.TransactOpts, error) {
|
||||||
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
|
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
if t.cfg.GasPriceIncPerc != 0 {
|
||||||
inc := new(big.Int).Set(gasPrice)
|
inc := new(big.Int).Set(gasPrice)
|
||||||
const gasPriceDiv = 100
|
inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc))
|
||||||
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
|
// nolint reason: to calculate percentages we use 100
|
||||||
|
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd
|
||||||
gasPrice.Add(gasPrice, inc)
|
gasPrice.Add(gasPrice, inc)
|
||||||
|
}
|
||||||
|
|
||||||
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
|
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
|
||||||
|
|
||||||
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
|
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
|
||||||
@@ -133,42 +143,95 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auth.Value = big.NewInt(0) // in wei
|
auth.Value = big.NewInt(0) // in wei
|
||||||
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
|
|
||||||
auth.GasLimit = 1000000
|
gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
|
||||||
|
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
|
||||||
|
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
|
||||||
|
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
|
||||||
|
auth.GasLimit = gasLimit
|
||||||
auth.GasPrice = gasPrice
|
auth.GasPrice = gasPrice
|
||||||
auth.Nonce = nil
|
auth.Nonce = nil
|
||||||
|
|
||||||
return auth, nil
|
return auth, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
|
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
|
||||||
// TODO: Check if we can forge in the next blockNum, abort if we can't
|
nextBlock := t.stats.Eth.LastBlock.Num + 1
|
||||||
batchInfo.Debug.Status = StatusSent
|
if !t.canForgeAt(nextBlock) {
|
||||||
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
|
||||||
batchInfo.Debug.SendTimestamp = time.Now()
|
}
|
||||||
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
|
||||||
batchInfo.Debug.StartTimestamp).Seconds()
|
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
|
||||||
|
}
|
||||||
|
margin := t.cfg.SendBatchBlocksMarginCheck
|
||||||
|
if margin != 0 {
|
||||||
|
if !t.canForgeAt(nextBlock + margin) {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
|
||||||
|
margin, nextBlock))
|
||||||
|
}
|
||||||
|
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
|
||||||
|
margin, nextBlock))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPerc(v *big.Int, p int64) *big.Int {
|
||||||
|
r := new(big.Int).Set(v)
|
||||||
|
r.Mul(r, big.NewInt(p))
|
||||||
|
// nolint reason: to calculate percentages we divide by 100
|
||||||
|
r.Div(r, big.NewInt(100)) //nolit:gomnd
|
||||||
|
return r.Add(v, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
|
||||||
var ethTx *types.Transaction
|
var ethTx *types.Transaction
|
||||||
var err error
|
var err error
|
||||||
auth, err := t.NewAuth(ctx)
|
auth, err := t.NewAuth(ctx, batchInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
|
auth.Nonce = big.NewInt(int64(t.accNextNonce))
|
||||||
t.lastPendingNonce++
|
if resend {
|
||||||
|
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
|
||||||
|
}
|
||||||
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
|
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
|
||||||
|
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
|
||||||
|
auth.GasPrice, t.cfg.MaxGasPrice))
|
||||||
|
}
|
||||||
|
// RollupForgeBatch() calls ethclient.SendTransaction()
|
||||||
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
|
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
|
||||||
if err != nil {
|
// We check the errors via strings because we match the
|
||||||
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
|
// definition of the error from geth, with the string returned
|
||||||
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
|
// via RPC obtained by the client.
|
||||||
// "block", t.stats.Eth.LastBlock.Num+1)
|
if err == nil {
|
||||||
// return tracerr.Wrap(err)
|
break
|
||||||
// }
|
} else if strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
|
||||||
|
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
|
||||||
|
attempt--
|
||||||
|
} else if strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
|
||||||
|
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
|
||||||
|
attempt--
|
||||||
|
} else if strings.Contains(err.Error(), core.ErrReplaceUnderpriced.Error()) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||||
|
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||||
|
attempt--
|
||||||
|
} else if strings.Contains(err.Error(), core.ErrUnderpriced.Error()) {
|
||||||
|
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||||
|
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||||
|
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||||
|
attempt--
|
||||||
|
} else {
|
||||||
log.Errorw("TxManager ethClient.RollupForgeBatch",
|
log.Errorw("TxManager ethClient.RollupForgeBatch",
|
||||||
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
|
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
|
||||||
"batchNum", batchInfo.BatchNum)
|
"batchNum", batchInfo.BatchNum)
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -179,10 +242,29 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
||||||
}
|
}
|
||||||
|
if !resend {
|
||||||
|
t.accNextNonce = auth.Nonce.Uint64() + 1
|
||||||
|
}
|
||||||
batchInfo.EthTx = ethTx
|
batchInfo.EthTx = ethTx
|
||||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
|
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
|
||||||
|
now := time.Now()
|
||||||
|
batchInfo.SendTimestamp = now
|
||||||
|
|
||||||
|
if resend {
|
||||||
|
batchInfo.Debug.ResendNum++
|
||||||
|
}
|
||||||
|
batchInfo.Debug.Status = StatusSent
|
||||||
|
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||||
|
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
|
||||||
|
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
||||||
|
batchInfo.Debug.StartTimestamp).Seconds()
|
||||||
t.cfg.debugBatchStore(batchInfo)
|
t.cfg.debugBatchStore(batchInfo)
|
||||||
t.lastPendingBatch = batchInfo.BatchNum
|
|
||||||
|
if !resend {
|
||||||
|
if batchInfo.L1Batch {
|
||||||
|
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -225,13 +307,20 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
|
|||||||
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
|
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
|
||||||
receipt := batchInfo.Receipt
|
receipt := batchInfo.Receipt
|
||||||
if receipt != nil {
|
if receipt != nil {
|
||||||
|
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
|
||||||
|
t.accNonce = batchInfo.EthTx.Nonce() + 1
|
||||||
|
}
|
||||||
if receipt.Status == types.ReceiptStatusFailed {
|
if receipt.Status == types.ReceiptStatusFailed {
|
||||||
batchInfo.Debug.Status = StatusFailed
|
batchInfo.Debug.Status = StatusFailed
|
||||||
t.cfg.debugBatchStore(batchInfo)
|
|
||||||
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
|
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
|
||||||
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
|
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
|
||||||
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
|
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
|
||||||
"err", err)
|
"err", err)
|
||||||
|
batchInfo.EthTxErr = err
|
||||||
|
if batchInfo.BatchNum <= t.lastSuccessBatch {
|
||||||
|
t.lastSuccessBatch = batchInfo.BatchNum - 1
|
||||||
|
}
|
||||||
|
t.cfg.debugBatchStore(batchInfo)
|
||||||
return nil, tracerr.Wrap(fmt.Errorf(
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
"ethereum transaction receipt status is failed: %w", err))
|
"ethereum transaction receipt status is failed: %w", err))
|
||||||
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
||||||
@@ -239,6 +328,17 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
|||||||
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
|
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
|
||||||
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
|
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
|
||||||
batchInfo.Debug.StartBlockNum
|
batchInfo.Debug.StartBlockNum
|
||||||
|
if batchInfo.Debug.StartToMineDelay == 0 {
|
||||||
|
if block, err := t.ethClient.EthBlockByNumber(ctx,
|
||||||
|
receipt.BlockNumber.Int64()); err != nil {
|
||||||
|
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
|
||||||
|
} else {
|
||||||
|
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
|
||||||
|
batchInfo.Debug.SendTimestamp).Seconds()
|
||||||
|
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
|
||||||
|
batchInfo.Debug.StartTimestamp).Seconds()
|
||||||
|
}
|
||||||
|
}
|
||||||
t.cfg.debugBatchStore(batchInfo)
|
t.cfg.debugBatchStore(batchInfo)
|
||||||
if batchInfo.BatchNum > t.lastSuccessBatch {
|
if batchInfo.BatchNum > t.lastSuccessBatch {
|
||||||
t.lastSuccessBatch = batchInfo.BatchNum
|
t.lastSuccessBatch = batchInfo.BatchNum
|
||||||
@@ -250,11 +350,72 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
|
||||||
|
|
||||||
|
// Queue of BatchInfos
|
||||||
|
type Queue struct {
|
||||||
|
list []*BatchInfo
|
||||||
|
// nonceByBatchNum map[common.BatchNum]uint64
|
||||||
|
next int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueue returns a new queue
|
||||||
|
func NewQueue() Queue {
|
||||||
|
return Queue{
|
||||||
|
list: make([]*BatchInfo, 0),
|
||||||
|
// nonceByBatchNum: make(map[common.BatchNum]uint64),
|
||||||
|
next: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len is the length of the queue
|
||||||
|
func (q *Queue) Len() int {
|
||||||
|
return len(q.list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns the BatchInfo at position (or nil if position is out of bounds)
|
||||||
|
func (q *Queue) At(position int) *BatchInfo {
|
||||||
|
if position >= len(q.list) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return q.list[position]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next BatchInfo (or nil if queue is empty)
|
||||||
|
func (q *Queue) Next() (int, *BatchInfo) {
|
||||||
|
if len(q.list) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
defer func() { q.next = (q.next + 1) % len(q.list) }()
|
||||||
|
return q.next, q.list[q.next]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the BatchInfo at position
|
||||||
|
func (q *Queue) Remove(position int) {
|
||||||
|
// batchInfo := q.list[position]
|
||||||
|
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
|
||||||
|
q.list = append(q.list[:position], q.list[position+1:]...)
|
||||||
|
if len(q.list) == 0 {
|
||||||
|
q.next = 0
|
||||||
|
} else {
|
||||||
|
q.next = position % len(q.list)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push adds a new BatchInfo
|
||||||
|
func (q *Queue) Push(batchInfo *BatchInfo) {
|
||||||
|
q.list = append(q.list, batchInfo)
|
||||||
|
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
|
||||||
|
// nonce, ok := q.nonceByBatchNum[batchNum]
|
||||||
|
// return nonce, ok
|
||||||
|
// }
|
||||||
|
|
||||||
// Run the TxManager
|
// Run the TxManager
|
||||||
func (t *TxManager) Run(ctx context.Context) {
|
func (t *TxManager) Run(ctx context.Context) {
|
||||||
next := 0
|
|
||||||
waitDuration := longWaitDuration
|
|
||||||
|
|
||||||
var statsVars statsVars
|
var statsVars statsVars
|
||||||
select {
|
select {
|
||||||
case statsVars = <-t.statsVarsCh:
|
case statsVars = <-t.statsVarsCh:
|
||||||
@@ -263,8 +424,9 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
t.stats = statsVars.Stats
|
t.stats = statsVars.Stats
|
||||||
t.syncSCVars(statsVars.Vars)
|
t.syncSCVars(statsVars.Vars)
|
||||||
log.Infow("TxManager: received initial statsVars",
|
log.Infow("TxManager: received initial statsVars",
|
||||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
|
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
|
||||||
|
|
||||||
|
timer := time.NewTimer(longWaitDuration)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -273,8 +435,27 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
case statsVars := <-t.statsVarsCh:
|
case statsVars := <-t.statsVarsCh:
|
||||||
t.stats = statsVars.Stats
|
t.stats = statsVars.Stats
|
||||||
t.syncSCVars(statsVars.Vars)
|
t.syncSCVars(statsVars.Vars)
|
||||||
|
case pipelineNum := <-t.discardPipelineCh:
|
||||||
|
t.minPipelineNum = pipelineNum + 1
|
||||||
|
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
case batchInfo := <-t.batchCh:
|
case batchInfo := <-t.batchCh:
|
||||||
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
|
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||||
|
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
|
||||||
|
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
|
||||||
|
}
|
||||||
|
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
|
||||||
|
log.Warnw("TxManager: shouldSend", "err", err,
|
||||||
|
"batch", batchInfo.BatchNum)
|
||||||
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
// If we reach here it's because our ethNode has
|
// If we reach here it's because our ethNode has
|
||||||
@@ -282,19 +463,24 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
// ethereum. This could be due to the ethNode
|
// ethereum. This could be due to the ethNode
|
||||||
// failure, or an invalid transaction (that
|
// failure, or an invalid transaction (that
|
||||||
// can't be mined)
|
// can't be mined)
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
log.Warnw("TxManager: forgeBatch send failed", "err", err,
|
||||||
|
"batch", batchInfo.BatchNum)
|
||||||
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.queue = append(t.queue, batchInfo)
|
t.queue.Push(batchInfo)
|
||||||
waitDuration = t.cfg.TxManagerCheckInterval
|
if !timer.Stop() {
|
||||||
case <-time.After(waitDuration):
|
<-timer.C
|
||||||
if len(t.queue) == 0 {
|
}
|
||||||
waitDuration = longWaitDuration
|
timer.Reset(t.cfg.TxManagerCheckInterval)
|
||||||
|
case <-timer.C:
|
||||||
|
queuePosition, batchInfo := t.queue.Next()
|
||||||
|
if batchInfo == nil {
|
||||||
|
timer.Reset(longWaitDuration)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
current := next
|
timer.Reset(t.cfg.TxManagerCheckInterval)
|
||||||
next = (current + 1) % len(t.queue)
|
|
||||||
batchInfo := t.queue[current]
|
|
||||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil { //nolint:staticcheck
|
} else if err != nil { //nolint:staticcheck
|
||||||
@@ -304,7 +490,8 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
// if it was not mined, mined and succesfull or
|
// if it was not mined, mined and succesfull or
|
||||||
// mined and failed. This could be due to the
|
// mined and failed. This could be due to the
|
||||||
// ethNode failure.
|
// ethNode failure.
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
||||||
}
|
}
|
||||||
|
|
||||||
confirm, err := t.handleReceipt(ctx, batchInfo)
|
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||||
@@ -312,32 +499,106 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
continue
|
continue
|
||||||
} else if err != nil { //nolint:staticcheck
|
} else if err != nil { //nolint:staticcheck
|
||||||
// Transaction was rejected
|
// Transaction was rejected
|
||||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||||
if len(t.queue) == 0 {
|
continue
|
||||||
next = 0
|
} else if err != nil {
|
||||||
} else {
|
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||||
next = current % len(t.queue)
|
continue
|
||||||
}
|
}
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
|
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
now := time.Now()
|
||||||
log.Debugw("TxManager tx for RollupForgeBatch confirmed",
|
if !t.cfg.EthNoReuseNonce && confirm == nil &&
|
||||||
|
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
|
||||||
|
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
|
||||||
|
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||||
|
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
// If we reach here it's because our ethNode has
|
||||||
|
// been unable to send the transaction to
|
||||||
|
// ethereum. This could be due to the ethNode
|
||||||
|
// failure, or an invalid transaction (that
|
||||||
|
// can't be mined)
|
||||||
|
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
|
||||||
"batch", batchInfo.BatchNum)
|
"batch", batchInfo.BatchNum)
|
||||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
if len(t.queue) == 0 {
|
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
|
||||||
next = 0
|
continue
|
||||||
} else {
|
|
||||||
next = current % len(t.queue)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
||||||
|
log.Debugw("TxManager: forgeBatch tx confirmed",
|
||||||
|
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||||
|
t.queue.Remove(queuePosition)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: this function will be used in the future
|
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
|
||||||
//nolint:unused
|
next := 0
|
||||||
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
|
for {
|
||||||
|
batchInfo := t.queue.At(next)
|
||||||
|
if batchInfo == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// Our ethNode is giving an error different
|
||||||
|
// than "not found" when getting the receipt
|
||||||
|
// for the transaction, so we can't figure out
|
||||||
|
// if it was not mined, mined and succesfull or
|
||||||
|
// mined and failed. This could be due to the
|
||||||
|
// ethNode failure.
|
||||||
|
next++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// Transaction was rejected
|
||||||
|
if t.minPipelineNum <= batchInfo.PipelineNum {
|
||||||
|
t.minPipelineNum = batchInfo.PipelineNum + 1
|
||||||
|
}
|
||||||
|
t.queue.Remove(next)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If tx is pending but is from a cancelled pipeline, remove it
|
||||||
|
// from the queue
|
||||||
|
if confirm == nil {
|
||||||
|
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||||
|
t.queue.Remove(next)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
next++
|
||||||
|
}
|
||||||
|
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !t.cfg.EthNoReuseNonce {
|
||||||
|
t.accNextNonce = accNonce
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) canForgeAt(blockNum int64) bool {
|
||||||
return canForge(&t.consts.Auction, &t.vars.Auction,
|
return canForge(&t.consts.Auction, &t.vars.Auction,
|
||||||
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
|
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
|
||||||
t.cfg.ForgerAddress, blockNum)
|
t.cfg.ForgerAddress, blockNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
|
||||||
|
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
|
||||||
|
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||||
|
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
|
||||||
|
}
|
||||||
|
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
|
||||||
|
}
|
||||||
|
|||||||
15
coordinator/txmanager_test.go
Normal file
15
coordinator/txmanager_test.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
package coordinator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddPerc(t *testing.T) {
|
||||||
|
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
|
||||||
|
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
|
||||||
|
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
|
||||||
|
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
|
||||||
|
}
|
||||||
@@ -1,8 +1,11 @@
|
|||||||
package historydb
|
package historydb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
@@ -32,9 +35,18 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getBatchAPI(hdb.dbRead, batchNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBatchInternalAPI return the batch with the given batchNum
|
||||||
|
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
|
return hdb.getBatchAPI(hdb.dbRead, batchNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
batch := &BatchAPI{}
|
batch := &BatchAPI{}
|
||||||
return batch, tracerr.Wrap(meddler.QueryRow(
|
return batch, tracerr.Wrap(meddler.QueryRow(
|
||||||
hdb.db, batch,
|
d, batch,
|
||||||
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
||||||
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
||||||
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
||||||
@@ -133,10 +145,10 @@ func (hdb *HistoryDB) GetBatchesAPI(
|
|||||||
queryStr += " DESC "
|
queryStr += " DESC "
|
||||||
}
|
}
|
||||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
query = hdb.db.Rebind(queryStr)
|
query = hdb.dbRead.Rebind(queryStr)
|
||||||
// log.Debug(query)
|
// log.Debug(query)
|
||||||
batchPtrs := []*BatchAPI{}
|
batchPtrs := []*BatchAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &batchPtrs, query, args...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
|
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
|
||||||
@@ -156,7 +168,7 @@ func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
|
|||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
err = meddler.QueryRow(
|
err = meddler.QueryRow(
|
||||||
hdb.db, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
|
hdb.dbRead, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
|
||||||
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
|
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
|
||||||
INNER JOIN (
|
INNER JOIN (
|
||||||
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
|
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
|
||||||
@@ -180,6 +192,14 @@ func (hdb *HistoryDB) GetBestBidsAPI(
|
|||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
|
||||||
|
}
|
||||||
|
func (hdb *HistoryDB) getBestBidsAPI(
|
||||||
|
d meddler.DB,
|
||||||
|
minSlotNum, maxSlotNum *int64,
|
||||||
|
bidderAddr *ethCommon.Address,
|
||||||
|
limit *uint, order string,
|
||||||
|
) ([]BidAPI, uint64, error) {
|
||||||
var query string
|
var query string
|
||||||
var args []interface{}
|
var args []interface{}
|
||||||
// JOIN the best bid of each slot with the latest update of each coordinator
|
// JOIN the best bid of each slot with the latest update of each coordinator
|
||||||
@@ -212,9 +232,9 @@ func (hdb *HistoryDB) GetBestBidsAPI(
|
|||||||
if limit != nil {
|
if limit != nil {
|
||||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
}
|
}
|
||||||
query = hdb.db.Rebind(queryStr)
|
query = hdb.dbRead.Rebind(queryStr)
|
||||||
bidPtrs := []*BidAPI{}
|
bidPtrs := []*BidAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &bidPtrs, query, args...); err != nil {
|
if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// log.Debug(query)
|
// log.Debug(query)
|
||||||
@@ -296,9 +316,9 @@ func (hdb *HistoryDB) GetBidsAPI(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = hdb.db.Rebind(query)
|
query = hdb.dbRead.Rebind(query)
|
||||||
bids := []*BidAPI{}
|
bids := []*BidAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &bids, query, argsQ...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &bids, query, argsQ...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(bids) == 0 {
|
if len(bids) == 0 {
|
||||||
@@ -384,9 +404,9 @@ func (hdb *HistoryDB) GetTokensAPI(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = hdb.db.Rebind(query)
|
query = hdb.dbRead.Rebind(query)
|
||||||
tokens := []*TokenWithUSD{}
|
tokens := []*TokenWithUSD{}
|
||||||
if err := meddler.QueryAll(hdb.db, &tokens, query, argsQ...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &tokens, query, argsQ...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(tokens) == 0 {
|
if len(tokens) == 0 {
|
||||||
@@ -408,7 +428,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
|
|||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
tx := &TxAPI{}
|
tx := &TxAPI{}
|
||||||
err = meddler.QueryRow(
|
err = meddler.QueryRow(
|
||||||
hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
|
hdb.dbRead, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
|
||||||
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
|
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
|
||||||
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
|
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
|
||||||
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
|
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
|
||||||
@@ -541,10 +561,10 @@ func (hdb *HistoryDB) GetTxsAPI(
|
|||||||
queryStr += " DESC "
|
queryStr += " DESC "
|
||||||
}
|
}
|
||||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
query = hdb.db.Rebind(queryStr)
|
query = hdb.dbRead.Rebind(queryStr)
|
||||||
// log.Debug(query)
|
// log.Debug(query)
|
||||||
txsPtrs := []*TxAPI{}
|
txsPtrs := []*TxAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &txsPtrs, query, args...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
|
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
|
||||||
@@ -564,7 +584,7 @@ func (hdb *HistoryDB) GetExitAPI(batchNum *uint, idx *common.Idx) (*ExitAPI, err
|
|||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
exit := &ExitAPI{}
|
exit := &ExitAPI{}
|
||||||
err = meddler.QueryRow(
|
err = meddler.QueryRow(
|
||||||
hdb.db, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
|
hdb.dbRead, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
|
||||||
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
|
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
|
||||||
account.bjj, account.eth_addr,
|
account.bjj, account.eth_addr,
|
||||||
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
|
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
|
||||||
@@ -685,10 +705,10 @@ func (hdb *HistoryDB) GetExitsAPI(
|
|||||||
queryStr += " DESC "
|
queryStr += " DESC "
|
||||||
}
|
}
|
||||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
query = hdb.db.Rebind(queryStr)
|
query = hdb.dbRead.Rebind(queryStr)
|
||||||
// log.Debug(query)
|
// log.Debug(query)
|
||||||
exits := []*ExitAPI{}
|
exits := []*ExitAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &exits, query, args...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(exits) == 0 {
|
if len(exits) == 0 {
|
||||||
@@ -697,25 +717,6 @@ func (hdb *HistoryDB) GetExitsAPI(
|
|||||||
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
|
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBucketUpdatesAPI retrieves latest values for each bucket
|
|
||||||
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
|
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
|
||||||
defer cancel()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
defer hdb.apiConnCon.Release()
|
|
||||||
var bucketUpdates []*BucketUpdateAPI
|
|
||||||
err = meddler.QueryAll(
|
|
||||||
hdb.db, &bucketUpdates,
|
|
||||||
`SELECT num_bucket, withdrawals FROM bucket_update
|
|
||||||
WHERE item_id in(SELECT max(item_id) FROM bucket_update
|
|
||||||
group by num_bucket)
|
|
||||||
ORDER BY num_bucket ASC;`,
|
|
||||||
)
|
|
||||||
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
|
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
|
||||||
func (hdb *HistoryDB) GetCoordinatorsAPI(
|
func (hdb *HistoryDB) GetCoordinatorsAPI(
|
||||||
bidderAddr, forgerAddr *ethCommon.Address,
|
bidderAddr, forgerAddr *ethCommon.Address,
|
||||||
@@ -772,10 +773,10 @@ func (hdb *HistoryDB) GetCoordinatorsAPI(
|
|||||||
queryStr += " DESC "
|
queryStr += " DESC "
|
||||||
}
|
}
|
||||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
query = hdb.db.Rebind(queryStr)
|
query = hdb.dbRead.Rebind(queryStr)
|
||||||
|
|
||||||
coordinators := []*CoordinatorAPI{}
|
coordinators := []*CoordinatorAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &coordinators, query, args...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &coordinators, query, args...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(coordinators) == 0 {
|
if len(coordinators) == 0 {
|
||||||
@@ -795,34 +796,11 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
|
|||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
auctionVars := &common.AuctionVariables{}
|
auctionVars := &common.AuctionVariables{}
|
||||||
err = meddler.QueryRow(
|
err = meddler.QueryRow(
|
||||||
hdb.db, auctionVars, `SELECT * FROM auction_vars;`,
|
hdb.dbRead, auctionVars, `SELECT * FROM auction_vars;`,
|
||||||
)
|
)
|
||||||
return auctionVars, tracerr.Wrap(err)
|
return auctionVars, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
|
|
||||||
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
|
|
||||||
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
|
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
|
||||||
defer cancel()
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
defer hdb.apiConnCon.Release()
|
|
||||||
auctionVars := []*MinBidInfo{}
|
|
||||||
query := `
|
|
||||||
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
|
|
||||||
WHERE default_slot_set_bid_slot_num < $1
|
|
||||||
ORDER BY default_slot_set_bid_slot_num DESC
|
|
||||||
LIMIT $2;
|
|
||||||
`
|
|
||||||
err = meddler.QueryAll(hdb.db, &auctionVars, query, slotNum, maxItems)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccountAPI returns an account by its index
|
// GetAccountAPI returns an account by its index
|
||||||
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
@@ -832,11 +810,19 @@ func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
|||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
account := &AccountAPI{}
|
account := &AccountAPI{}
|
||||||
err = meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx,
|
err = meddler.QueryRow(hdb.dbRead, account, `SELECT account.item_id, hez_idx(account.idx,
|
||||||
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
|
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
|
||||||
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd,
|
||||||
FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx)
|
token.usd_update, account_update.nonce, account_update.balance
|
||||||
|
FROM account inner JOIN (
|
||||||
|
SELECT idx, nonce, balance
|
||||||
|
FROM account_update
|
||||||
|
WHERE idx = $1
|
||||||
|
ORDER BY item_id DESC LIMIT 1
|
||||||
|
) AS account_update ON account_update.idx = account.idx
|
||||||
|
INNER JOIN token ON account.token_id = token.token_id
|
||||||
|
WHERE account.idx = $1;`, idx)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -864,8 +850,13 @@ func (hdb *HistoryDB) GetAccountsAPI(
|
|||||||
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
|
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
|
||||||
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
|
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
|
||||||
COUNT(*) OVER() AS total_items
|
account_update.nonce, account_update.balance, COUNT(*) OVER() AS total_items
|
||||||
FROM account INNER JOIN token ON account.token_id = token.token_id `
|
FROM account inner JOIN (
|
||||||
|
SELECT DISTINCT idx,
|
||||||
|
first_value(nonce) over(partition by idx ORDER BY item_id DESC) as nonce,
|
||||||
|
first_value(balance) over(partition by idx ORDER BY item_id DESC) as balance
|
||||||
|
FROM account_update
|
||||||
|
) AS account_update ON account_update.idx = account.idx INNER JOIN token ON account.token_id = token.token_id `
|
||||||
// Apply filters
|
// Apply filters
|
||||||
nextIsAnd := false
|
nextIsAnd := false
|
||||||
// ethAddr filter
|
// ethAddr filter
|
||||||
@@ -914,10 +905,10 @@ func (hdb *HistoryDB) GetAccountsAPI(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = hdb.db.Rebind(query)
|
query = hdb.dbRead.Rebind(query)
|
||||||
|
|
||||||
accounts := []*AccountAPI{}
|
accounts := []*AccountAPI{}
|
||||||
if err := meddler.QueryAll(hdb.db, &accounts, query, argsQ...); err != nil {
|
if err := meddler.QueryAll(hdb.dbRead, &accounts, query, argsQ...); err != nil {
|
||||||
return nil, 0, tracerr.Wrap(err)
|
return nil, 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(accounts) == 0 {
|
if len(accounts) == 0 {
|
||||||
@@ -928,99 +919,267 @@ func (hdb *HistoryDB) GetAccountsAPI(
|
|||||||
accounts[0].TotalItems - uint64(len(accounts)), nil
|
accounts[0].TotalItems - uint64(len(accounts)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricsAPI returns metrics
|
// GetCommonAccountAPI returns the account associated to an account idx
|
||||||
func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) {
|
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
metricsTotals := &MetricsTotals{}
|
account := &common.Account{}
|
||||||
metrics := &Metrics{}
|
|
||||||
err = meddler.QueryRow(
|
err = meddler.QueryRow(
|
||||||
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
|
hdb.dbRead, account, `SELECT * FROM account WHERE idx = $1;`, idx,
|
||||||
COALESCE (MIN(tx.batch_num), 0) as batch_num, COALESCE (MIN(block.timestamp),
|
)
|
||||||
NOW()) AS min_timestamp, COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
|
return account, tracerr.Wrap(err)
|
||||||
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
}
|
||||||
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`)
|
|
||||||
|
// GetCoordinatorAPI returns a coordinator by its bidderAddr
|
||||||
|
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
||||||
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
|
defer cancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
|
||||||
|
}
|
||||||
|
|
||||||
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
|
func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
||||||
// Avoid dividing by 0
|
coordinator := &CoordinatorAPI{}
|
||||||
if seconds == 0 {
|
err := meddler.QueryRow(
|
||||||
|
d, coordinator,
|
||||||
|
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
|
||||||
|
bidderAddr,
|
||||||
|
)
|
||||||
|
return coordinator, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeInfoAPI retusnt he NodeInfo
|
||||||
|
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
|
||||||
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
|
defer cancel()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
defer hdb.apiConnCon.Release()
|
||||||
|
return hdb.GetNodeInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBucketUpdatesInternalAPI returns the latest bucket updates
|
||||||
|
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
|
||||||
|
var bucketUpdates []*BucketUpdateAPI
|
||||||
|
err := meddler.QueryAll(
|
||||||
|
hdb.dbRead, &bucketUpdates,
|
||||||
|
`SELECT num_bucket, withdrawals FROM bucket_update
|
||||||
|
WHERE item_id in(SELECT max(item_id) FROM bucket_update
|
||||||
|
group by num_bucket)
|
||||||
|
ORDER BY num_bucket ASC;`,
|
||||||
|
)
|
||||||
|
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNextForgersInternalAPI returns next forgers
|
||||||
|
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
|
||||||
|
auctionConsts *common.AuctionConstants,
|
||||||
|
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
|
||||||
|
secondsPerBlock := int64(15) //nolint:gomnd
|
||||||
|
// currentSlot and lastClosedSlot included
|
||||||
|
limit := uint(lastClosedSlot - currentSlot + 1)
|
||||||
|
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, ¤tSlot, &lastClosedSlot, nil, &limit, "ASC")
|
||||||
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
nextForgers := []NextForgerAPI{}
|
||||||
|
// Get min bid info
|
||||||
|
var minBidInfo []MinBidInfo
|
||||||
|
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
|
||||||
|
// All min bids can be calculated with the last update of AuctionVariables
|
||||||
|
|
||||||
|
minBidInfo = []MinBidInfo{{
|
||||||
|
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
|
||||||
|
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
|
||||||
|
}}
|
||||||
|
} else {
|
||||||
|
// Get all the relevant updates from the DB
|
||||||
|
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create nextForger for each slot
|
||||||
|
for i := currentSlot; i <= lastClosedSlot; i++ {
|
||||||
|
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
|
||||||
|
auctionConsts.GenesisBlockNum
|
||||||
|
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
|
||||||
|
auctionConsts.GenesisBlockNum - 1
|
||||||
|
nextForger := NextForgerAPI{
|
||||||
|
Period: Period{
|
||||||
|
SlotNum: i,
|
||||||
|
FromBlock: fromBlock,
|
||||||
|
ToBlock: toBlock,
|
||||||
|
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
|
||||||
|
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
|
||||||
|
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
|
||||||
|
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
foundForger := false
|
||||||
|
// If there is a bid for a slot, get forger (coordinator)
|
||||||
|
for j := range bids {
|
||||||
|
slotNum := bids[j].SlotNum
|
||||||
|
if slotNum == i {
|
||||||
|
// There's a bid for the slot
|
||||||
|
// Check if the bid is greater than the minimum required
|
||||||
|
for i := 0; i < len(minBidInfo); i++ {
|
||||||
|
// Find the most recent update
|
||||||
|
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
|
||||||
|
// Get min bid
|
||||||
|
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
|
||||||
|
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
|
||||||
|
// Check if the bid has beaten the minimum
|
||||||
|
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
|
||||||
|
if !ok {
|
||||||
|
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
|
||||||
|
}
|
||||||
|
if minBid.Cmp(bid) == 1 {
|
||||||
|
// Min bid is greater than bid, the slot will be forged by boot coordinator
|
||||||
|
break
|
||||||
|
}
|
||||||
|
foundForger = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundForger { // There is no bid or it's smaller than the minimum
|
||||||
|
break
|
||||||
|
}
|
||||||
|
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
nextForger.Coordinator = *coordinator
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If there is no bid, the coordinator that will forge is boot coordinator
|
||||||
|
if !foundForger {
|
||||||
|
nextForger.Coordinator = CoordinatorAPI{
|
||||||
|
Forger: auctionVars.BootCoordinator,
|
||||||
|
URL: auctionVars.BootCoordinatorURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nextForgers = append(nextForgers, nextForger)
|
||||||
|
}
|
||||||
|
return nextForgers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricsInternalAPI returns the MetricsAPI
|
||||||
|
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
|
||||||
|
var metrics MetricsAPI
|
||||||
|
// Get the first and last batch of the last 24h and their timestamps
|
||||||
|
// if u.state.Network.LastBatch == nil {
|
||||||
|
// return &metrics, nil
|
||||||
|
// }
|
||||||
|
type period struct {
|
||||||
|
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
|
||||||
|
FromTimestamp time.Time `meddler:"from_timestamp"`
|
||||||
|
ToBatchNum common.BatchNum `meddler:"-"`
|
||||||
|
ToTimestamp time.Time `meddler:"to_timestamp"`
|
||||||
|
}
|
||||||
|
p := &period{
|
||||||
|
ToBatchNum: lastBatchNum,
|
||||||
|
}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, p, `SELECT
|
||||||
|
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
|
||||||
|
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
|
||||||
|
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
|
||||||
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
||||||
|
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Get the amount of txs of that period
|
||||||
|
row := hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var nTxs int
|
||||||
|
if err := row.Scan(&nTxs); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Set txs/s
|
||||||
|
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
|
||||||
|
if seconds == 0 { // Avoid dividing by 0
|
||||||
seconds++
|
seconds++
|
||||||
}
|
}
|
||||||
|
metrics.TransactionsPerSecond = float64(nTxs) / seconds
|
||||||
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
|
// Set txs/batch
|
||||||
|
nBatches := p.ToBatchNum - p.FromBatchNum + 1
|
||||||
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
|
if nBatches == 0 { // Avoid dividing by 0
|
||||||
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
|
nBatches++
|
||||||
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
|
|
||||||
} else {
|
|
||||||
metrics.TransactionsPerBatch = float64(0)
|
|
||||||
}
|
}
|
||||||
|
if (p.ToBatchNum - p.FromBatchNum) > 0 {
|
||||||
err = meddler.QueryRow(
|
fmt.Printf("DBG ntxs: %v, nBatches: %v\n", nTxs, nBatches)
|
||||||
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
metrics.TransactionsPerBatch = float64(nTxs) /
|
||||||
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
float64(nBatches)
|
||||||
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
|
} else {
|
||||||
if err != nil {
|
metrics.TransactionsPerBatch = 0
|
||||||
|
}
|
||||||
|
// Get total fee of that period
|
||||||
|
row = hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var totalFee float64
|
||||||
|
if err := row.Scan(&totalFee); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if metricsTotals.TotalBatches > 0 {
|
// Set batch frequency
|
||||||
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
|
metrics.BatchFrequency = seconds / float64(nBatches)
|
||||||
} else {
|
if nTxs > 0 {
|
||||||
metrics.BatchFrequency = 0
|
metrics.AvgTransactionFee = totalFee / float64(nTxs)
|
||||||
}
|
|
||||||
if metricsTotals.TotalTransactions > 0 {
|
|
||||||
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
|
||||||
} else {
|
} else {
|
||||||
metrics.AvgTransactionFee = 0
|
metrics.AvgTransactionFee = 0
|
||||||
}
|
}
|
||||||
err = meddler.QueryRow(
|
// Get and set amount of registered accounts
|
||||||
hdb.db, metrics,
|
type registeredAccounts struct {
|
||||||
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
|
TotalIdx int64 `meddler:"total_idx"`
|
||||||
if err != nil {
|
TotalBJJ int64 `meddler:"total_bjj"`
|
||||||
|
}
|
||||||
|
ra := ®isteredAccounts{}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, ra,
|
||||||
|
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
|
||||||
|
); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
metrics.TotalAccounts = ra.TotalIdx
|
||||||
return metrics, nil
|
metrics.TotalBJJs = ra.TotalBJJ
|
||||||
|
// Get and set estimated time to forge L1 tx
|
||||||
|
row = hdb.dbRead.QueryRow(
|
||||||
|
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
|
||||||
|
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
|
||||||
|
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
|
||||||
|
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
|
||||||
|
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
|
||||||
|
p.FromBatchNum, p.ToBatchNum,
|
||||||
|
)
|
||||||
|
var timeToForgeL1 float64
|
||||||
|
if err := row.Scan(&timeToForgeL1); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
|
||||||
|
return &metrics, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAvgTxFeeAPI returns average transaction fee of the last 1h
|
// GetStateAPI returns the StateAPI
|
||||||
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
|
func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
|
||||||
cancel, err := hdb.apiConnCon.Acquire()
|
cancel, err := hdb.apiConnCon.Acquire()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer hdb.apiConnCon.Release()
|
defer hdb.apiConnCon.Release()
|
||||||
metricsTotals := &MetricsTotals{}
|
return hdb.getStateAPI(hdb.dbRead)
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
|
|
||||||
COALESCE (MIN(tx.batch_num), 0) as batch_num
|
|
||||||
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
|
||||||
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
|
|
||||||
if err != nil {
|
|
||||||
return 0, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
err = meddler.QueryRow(
|
|
||||||
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
|
||||||
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
|
||||||
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
|
|
||||||
if err != nil {
|
|
||||||
return 0, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var avgTransactionFee float64
|
|
||||||
if metricsTotals.TotalTransactions > 0 {
|
|
||||||
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
|
||||||
} else {
|
|
||||||
avgTransactionFee = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return avgTransactionFee, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,30 +27,35 @@ const (
|
|||||||
|
|
||||||
// HistoryDB persist the historic of the rollup
|
// HistoryDB persist the historic of the rollup
|
||||||
type HistoryDB struct {
|
type HistoryDB struct {
|
||||||
db *sqlx.DB
|
dbRead *sqlx.DB
|
||||||
|
dbWrite *sqlx.DB
|
||||||
apiConnCon *db.APIConnectionController
|
apiConnCon *db.APIConnectionController
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHistoryDB initialize the DB
|
// NewHistoryDB initialize the DB
|
||||||
func NewHistoryDB(db *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
|
func NewHistoryDB(dbRead, dbWrite *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
|
||||||
return &HistoryDB{db: db, apiConnCon: apiConnCon}
|
return &HistoryDB{
|
||||||
|
dbRead: dbRead,
|
||||||
|
dbWrite: dbWrite,
|
||||||
|
apiConnCon: apiConnCon,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DB returns a pointer to the L2DB.db. This method should be used only for
|
// DB returns a pointer to the L2DB.db. This method should be used only for
|
||||||
// internal testing purposes.
|
// internal testing purposes.
|
||||||
func (hdb *HistoryDB) DB() *sqlx.DB {
|
func (hdb *HistoryDB) DB() *sqlx.DB {
|
||||||
return hdb.db
|
return hdb.dbWrite
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddBlock insert a block into the DB
|
// AddBlock insert a block into the DB
|
||||||
func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.db, block) }
|
func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.dbWrite, block) }
|
||||||
func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error {
|
func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error {
|
||||||
return tracerr.Wrap(meddler.Insert(d, "block", block))
|
return tracerr.Wrap(meddler.Insert(d, "block", block))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddBlocks inserts blocks into the DB
|
// AddBlocks inserts blocks into the DB
|
||||||
func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error {
|
func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error {
|
||||||
return tracerr.Wrap(hdb.addBlocks(hdb.db, blocks))
|
return tracerr.Wrap(hdb.addBlocks(hdb.dbWrite, blocks))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
|
func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
|
||||||
@@ -61,7 +66,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
|
|||||||
timestamp,
|
timestamp,
|
||||||
hash
|
hash
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
blocks[:],
|
blocks,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +74,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
|
|||||||
func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
|
func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
|
||||||
block := &common.Block{}
|
block := &common.Block{}
|
||||||
err := meddler.QueryRow(
|
err := meddler.QueryRow(
|
||||||
hdb.db, block,
|
hdb.dbRead, block,
|
||||||
"SELECT * FROM block WHERE eth_block_num = $1;", blockNum,
|
"SELECT * FROM block WHERE eth_block_num = $1;", blockNum,
|
||||||
)
|
)
|
||||||
return block, tracerr.Wrap(err)
|
return block, tracerr.Wrap(err)
|
||||||
@@ -79,7 +84,7 @@ func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
|
|||||||
func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
|
func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
|
||||||
var blocks []*common.Block
|
var blocks []*common.Block
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &blocks,
|
hdb.dbRead, &blocks,
|
||||||
"SELECT * FROM block ORDER BY eth_block_num;",
|
"SELECT * FROM block ORDER BY eth_block_num;",
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
|
||||||
@@ -89,7 +94,7 @@ func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
|
|||||||
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
|
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
|
||||||
var blocks []*common.Block
|
var blocks []*common.Block
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &blocks,
|
hdb.dbRead, &blocks,
|
||||||
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;",
|
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;",
|
||||||
from, to,
|
from, to,
|
||||||
)
|
)
|
||||||
@@ -100,13 +105,13 @@ func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
|
|||||||
func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) {
|
func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) {
|
||||||
block := &common.Block{}
|
block := &common.Block{}
|
||||||
err := meddler.QueryRow(
|
err := meddler.QueryRow(
|
||||||
hdb.db, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;",
|
hdb.dbRead, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;",
|
||||||
)
|
)
|
||||||
return block, tracerr.Wrap(err)
|
return block, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddBatch insert a Batch into the DB
|
// AddBatch insert a Batch into the DB
|
||||||
func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.db, batch) }
|
func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.dbWrite, batch) }
|
||||||
func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
|
func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
|
||||||
// Calculate total collected fees in USD
|
// Calculate total collected fees in USD
|
||||||
// Get IDs of collected tokens for fees
|
// Get IDs of collected tokens for fees
|
||||||
@@ -129,9 +134,9 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = hdb.db.Rebind(query)
|
query = hdb.dbWrite.Rebind(query)
|
||||||
if err := meddler.QueryAll(
|
if err := meddler.QueryAll(
|
||||||
hdb.db, &tokenPrices, query, args...,
|
hdb.dbWrite, &tokenPrices, query, args...,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -153,7 +158,7 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
|
|||||||
|
|
||||||
// AddBatches insert Bids into the DB
|
// AddBatches insert Bids into the DB
|
||||||
func (hdb *HistoryDB) AddBatches(batches []common.Batch) error {
|
func (hdb *HistoryDB) AddBatches(batches []common.Batch) error {
|
||||||
return tracerr.Wrap(hdb.addBatches(hdb.db, batches))
|
return tracerr.Wrap(hdb.addBatches(hdb.dbWrite, batches))
|
||||||
}
|
}
|
||||||
func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
||||||
for i := 0; i < len(batches); i++ {
|
for i := 0; i < len(batches); i++ {
|
||||||
@@ -164,11 +169,24 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBatch returns the batch with the given batchNum
|
||||||
|
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
|
||||||
|
var batch common.Batch
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
|
||||||
|
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
|
||||||
|
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
|
||||||
|
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
|
||||||
|
batchNum,
|
||||||
|
)
|
||||||
|
return &batch, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetAllBatches retrieve all batches from the DB
|
// GetAllBatches retrieve all batches from the DB
|
||||||
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
|
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
|
||||||
var batches []*common.Batch
|
var batches []*common.Batch
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &batches,
|
hdb.dbRead, &batches,
|
||||||
`SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected,
|
`SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected,
|
||||||
batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root,
|
batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root,
|
||||||
batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch
|
batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch
|
||||||
@@ -181,7 +199,7 @@ func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
|
|||||||
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
|
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
|
||||||
var batches []*common.Batch
|
var batches []*common.Batch
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &batches,
|
hdb.dbRead, &batches,
|
||||||
`SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator,
|
`SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator,
|
||||||
state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd
|
state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd
|
||||||
FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`,
|
FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`,
|
||||||
@@ -193,7 +211,7 @@ func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, erro
|
|||||||
// GetFirstBatchBlockNumBySlot returns the ethereum block number of the first
|
// GetFirstBatchBlockNumBySlot returns the ethereum block number of the first
|
||||||
// batch within a slot
|
// batch within a slot
|
||||||
func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) {
|
func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) {
|
||||||
row := hdb.db.QueryRow(
|
row := hdb.dbRead.QueryRow(
|
||||||
`SELECT eth_block_num FROM batch
|
`SELECT eth_block_num FROM batch
|
||||||
WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum,
|
WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum,
|
||||||
)
|
)
|
||||||
@@ -203,14 +221,26 @@ func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error)
|
|||||||
|
|
||||||
// GetLastBatchNum returns the BatchNum of the latest forged batch
|
// GetLastBatchNum returns the BatchNum of the latest forged batch
|
||||||
func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
|
func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
|
||||||
row := hdb.db.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;")
|
row := hdb.dbRead.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;")
|
||||||
var batchNum common.BatchNum
|
var batchNum common.BatchNum
|
||||||
return batchNum, tracerr.Wrap(row.Scan(&batchNum))
|
return batchNum, tracerr.Wrap(row.Scan(&batchNum))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLastBatch returns the last forged batch
|
||||||
|
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
|
||||||
|
var batch common.Batch
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
|
||||||
|
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
|
||||||
|
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
|
||||||
|
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
|
||||||
|
)
|
||||||
|
return &batch, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
|
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
|
||||||
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
|
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
|
||||||
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
|
row := hdb.dbRead.QueryRow(`SELECT eth_block_num FROM batch
|
||||||
WHERE forge_l1_txs_num IS NOT NULL
|
WHERE forge_l1_txs_num IS NOT NULL
|
||||||
ORDER BY batch_num DESC LIMIT 1;`)
|
ORDER BY batch_num DESC LIMIT 1;`)
|
||||||
var blockNum int64
|
var blockNum int64
|
||||||
@@ -220,7 +250,7 @@ func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
|
|||||||
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged
|
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged
|
||||||
// batches. If there's no batch in the DB (nil, nil) is returned.
|
// batches. If there's no batch in the DB (nil, nil) is returned.
|
||||||
func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
|
func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
|
||||||
row := hdb.db.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
|
row := hdb.dbRead.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
|
||||||
lastL1TxsNum := new(int64)
|
lastL1TxsNum := new(int64)
|
||||||
return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum))
|
return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum))
|
||||||
}
|
}
|
||||||
@@ -231,15 +261,15 @@ func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
|
|||||||
func (hdb *HistoryDB) Reorg(lastValidBlock int64) error {
|
func (hdb *HistoryDB) Reorg(lastValidBlock int64) error {
|
||||||
var err error
|
var err error
|
||||||
if lastValidBlock < 0 {
|
if lastValidBlock < 0 {
|
||||||
_, err = hdb.db.Exec("DELETE FROM block;")
|
_, err = hdb.dbWrite.Exec("DELETE FROM block;")
|
||||||
} else {
|
} else {
|
||||||
_, err = hdb.db.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock)
|
_, err = hdb.dbWrite.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock)
|
||||||
}
|
}
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddBids insert Bids into the DB
|
// AddBids insert Bids into the DB
|
||||||
func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.db, bids) }
|
func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.dbWrite, bids) }
|
||||||
func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
|
func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
|
||||||
if len(bids) == 0 {
|
if len(bids) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -248,7 +278,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
|
|||||||
return tracerr.Wrap(db.BulkInsert(
|
return tracerr.Wrap(db.BulkInsert(
|
||||||
d,
|
d,
|
||||||
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
|
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
|
||||||
bids[:],
|
bids,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,7 +286,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
|
|||||||
func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
|
func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
|
||||||
var bids []*common.Bid
|
var bids []*common.Bid
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &bids,
|
hdb.dbRead, &bids,
|
||||||
`SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid
|
`SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid
|
||||||
ORDER BY item_id;`,
|
ORDER BY item_id;`,
|
||||||
)
|
)
|
||||||
@@ -267,7 +297,7 @@ func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
|
|||||||
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
|
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
|
||||||
bidCoord := &common.BidCoordinator{}
|
bidCoord := &common.BidCoordinator{}
|
||||||
err := meddler.QueryRow(
|
err := meddler.QueryRow(
|
||||||
hdb.db, bidCoord,
|
hdb.dbRead, bidCoord,
|
||||||
`SELECT (
|
`SELECT (
|
||||||
SELECT default_slot_set_bid
|
SELECT default_slot_set_bid
|
||||||
FROM auction_vars
|
FROM auction_vars
|
||||||
@@ -290,7 +320,7 @@ func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinat
|
|||||||
|
|
||||||
// AddCoordinators insert Coordinators into the DB
|
// AddCoordinators insert Coordinators into the DB
|
||||||
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
|
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
|
||||||
return tracerr.Wrap(hdb.addCoordinators(hdb.db, coordinators))
|
return tracerr.Wrap(hdb.addCoordinators(hdb.dbWrite, coordinators))
|
||||||
}
|
}
|
||||||
func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error {
|
func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error {
|
||||||
if len(coordinators) == 0 {
|
if len(coordinators) == 0 {
|
||||||
@@ -299,13 +329,13 @@ func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordi
|
|||||||
return tracerr.Wrap(db.BulkInsert(
|
return tracerr.Wrap(db.BulkInsert(
|
||||||
d,
|
d,
|
||||||
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
|
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
|
||||||
coordinators[:],
|
coordinators,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddExitTree insert Exit tree into the DB
|
// AddExitTree insert Exit tree into the DB
|
||||||
func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error {
|
func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error {
|
||||||
return tracerr.Wrap(hdb.addExitTree(hdb.db, exitTree))
|
return tracerr.Wrap(hdb.addExitTree(hdb.dbWrite, exitTree))
|
||||||
}
|
}
|
||||||
func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error {
|
func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error {
|
||||||
if len(exitTree) == 0 {
|
if len(exitTree) == 0 {
|
||||||
@@ -315,7 +345,7 @@ func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) erro
|
|||||||
d,
|
d,
|
||||||
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
|
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
|
||||||
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
|
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
|
||||||
exitTree[:],
|
exitTree,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -393,11 +423,13 @@ func (hdb *HistoryDB) updateExitTree(d sqlx.Ext, blockNum int64,
|
|||||||
|
|
||||||
// AddToken insert a token into the DB
|
// AddToken insert a token into the DB
|
||||||
func (hdb *HistoryDB) AddToken(token *common.Token) error {
|
func (hdb *HistoryDB) AddToken(token *common.Token) error {
|
||||||
return tracerr.Wrap(meddler.Insert(hdb.db, "token", token))
|
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "token", token))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTokens insert tokens into the DB
|
// AddTokens insert tokens into the DB
|
||||||
func (hdb *HistoryDB) AddTokens(tokens []common.Token) error { return hdb.addTokens(hdb.db, tokens) }
|
func (hdb *HistoryDB) AddTokens(tokens []common.Token) error {
|
||||||
|
return hdb.addTokens(hdb.dbWrite, tokens)
|
||||||
|
}
|
||||||
func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
|
func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
|
||||||
if len(tokens) == 0 {
|
if len(tokens) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -418,16 +450,17 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
|
|||||||
symbol,
|
symbol,
|
||||||
decimals
|
decimals
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
tokens[:],
|
tokens,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateTokenValue updates the USD value of a token
|
// UpdateTokenValue updates the USD value of a token. Value is the price in
|
||||||
|
// USD of a normalized token (1 token = 10^decimals units)
|
||||||
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
|
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
|
||||||
// Sanitize symbol
|
// Sanitize symbol
|
||||||
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
|
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
|
||||||
|
|
||||||
_, err := hdb.db.Exec(
|
_, err := hdb.dbWrite.Exec(
|
||||||
"UPDATE token SET usd = $1 WHERE symbol = $2;",
|
"UPDATE token SET usd = $1 WHERE symbol = $2;",
|
||||||
value, tokenSymbol,
|
value, tokenSymbol,
|
||||||
)
|
)
|
||||||
@@ -438,7 +471,7 @@ func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error
|
|||||||
func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
|
func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
|
||||||
token := &TokenWithUSD{}
|
token := &TokenWithUSD{}
|
||||||
err := meddler.QueryRow(
|
err := meddler.QueryRow(
|
||||||
hdb.db, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID,
|
hdb.dbRead, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID,
|
||||||
)
|
)
|
||||||
return token, tracerr.Wrap(err)
|
return token, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -447,7 +480,7 @@ func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
|
|||||||
func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
|
func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
|
||||||
var tokens []*TokenWithUSD
|
var tokens []*TokenWithUSD
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &tokens,
|
hdb.dbRead, &tokens,
|
||||||
"SELECT * FROM token ORDER BY token_id;",
|
"SELECT * FROM token ORDER BY token_id;",
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
|
||||||
@@ -456,7 +489,7 @@ func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
|
|||||||
// GetTokenSymbols returns all the token symbols from the DB
|
// GetTokenSymbols returns all the token symbols from the DB
|
||||||
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
|
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
|
||||||
var tokenSymbols []string
|
var tokenSymbols []string
|
||||||
rows, err := hdb.db.Query("SELECT symbol FROM token;")
|
rows, err := hdb.dbRead.Query("SELECT symbol FROM token;")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -474,7 +507,7 @@ func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
|
|||||||
|
|
||||||
// AddAccounts insert accounts into the DB
|
// AddAccounts insert accounts into the DB
|
||||||
func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error {
|
func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error {
|
||||||
return tracerr.Wrap(hdb.addAccounts(hdb.db, accounts))
|
return tracerr.Wrap(hdb.addAccounts(hdb.dbWrite, accounts))
|
||||||
}
|
}
|
||||||
func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error {
|
func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error {
|
||||||
if len(accounts) == 0 {
|
if len(accounts) == 0 {
|
||||||
@@ -489,7 +522,7 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
|
|||||||
bjj,
|
bjj,
|
||||||
eth_addr
|
eth_addr
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
accounts[:],
|
accounts,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -497,18 +530,49 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
|
|||||||
func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
|
func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
|
||||||
var accs []*common.Account
|
var accs []*common.Account
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &accs,
|
hdb.dbRead, &accs,
|
||||||
"SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;",
|
"SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;",
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddAccountUpdates inserts accUpdates into the DB
|
||||||
|
func (hdb *HistoryDB) AddAccountUpdates(accUpdates []common.AccountUpdate) error {
|
||||||
|
return tracerr.Wrap(hdb.addAccountUpdates(hdb.dbWrite, accUpdates))
|
||||||
|
}
|
||||||
|
func (hdb *HistoryDB) addAccountUpdates(d meddler.DB, accUpdates []common.AccountUpdate) error {
|
||||||
|
if len(accUpdates) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return tracerr.Wrap(db.BulkInsert(
|
||||||
|
d,
|
||||||
|
`INSERT INTO account_update (
|
||||||
|
eth_block_num,
|
||||||
|
batch_num,
|
||||||
|
idx,
|
||||||
|
nonce,
|
||||||
|
balance
|
||||||
|
) VALUES %s;`,
|
||||||
|
accUpdates,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllAccountUpdates returns all the AccountUpdate from the DB
|
||||||
|
func (hdb *HistoryDB) GetAllAccountUpdates() ([]common.AccountUpdate, error) {
|
||||||
|
var accUpdates []*common.AccountUpdate
|
||||||
|
err := meddler.QueryAll(
|
||||||
|
hdb.dbRead, &accUpdates,
|
||||||
|
"SELECT eth_block_num, batch_num, idx, nonce, balance FROM account_update ORDER BY idx;",
|
||||||
|
)
|
||||||
|
return db.SlicePtrsToSlice(accUpdates).([]common.AccountUpdate), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
|
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
|
||||||
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
|
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
|
||||||
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
|
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
|
||||||
// EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB.
|
// EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB.
|
||||||
func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error {
|
func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error {
|
||||||
return tracerr.Wrap(hdb.addL1Txs(hdb.db, l1txs))
|
return tracerr.Wrap(hdb.addL1Txs(hdb.dbWrite, l1txs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
|
// addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
|
||||||
@@ -562,7 +626,7 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error {
|
|||||||
|
|
||||||
// AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
|
// AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
|
||||||
func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error {
|
func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error {
|
||||||
return tracerr.Wrap(hdb.addL2Txs(hdb.db, l2txs))
|
return tracerr.Wrap(hdb.addL2Txs(hdb.dbWrite, l2txs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
|
// addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
|
||||||
@@ -621,7 +685,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
|
|||||||
fee,
|
fee,
|
||||||
nonce
|
nonce
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
txs[:],
|
txs,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -629,7 +693,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
|
|||||||
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
|
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
|
||||||
var exits []*common.ExitInfo
|
var exits []*common.ExitInfo
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &exits,
|
hdb.dbRead, &exits,
|
||||||
`SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof,
|
`SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof,
|
||||||
exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request,
|
exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request,
|
||||||
exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`,
|
exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`,
|
||||||
@@ -641,7 +705,7 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
|
|||||||
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
|
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
|
||||||
var txs []*common.L1Tx
|
var txs []*common.L1Tx
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
|
hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
|
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
|
||||||
@@ -658,7 +722,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
|
|||||||
// Since the query specifies that only coordinator txs are returned, it's safe to assume
|
// Since the query specifies that only coordinator txs are returned, it's safe to assume
|
||||||
// that returned txs will always have effective amounts
|
// that returned txs will always have effective amounts
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs,
|
hdb.dbRead, &txs,
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.amount, tx.amount AS effective_amount,
|
tx.amount, tx.amount AS effective_amount,
|
||||||
@@ -673,7 +737,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
|
|||||||
func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
|
func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
|
||||||
var txs []*common.L2Tx
|
var txs []*common.L2Tx
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs,
|
hdb.dbRead, &txs,
|
||||||
`SELECT tx.id, tx.batch_num, tx.position,
|
`SELECT tx.id, tx.batch_num, tx.position,
|
||||||
tx.from_idx, tx.to_idx, tx.amount, tx.token_id,
|
tx.from_idx, tx.to_idx, tx.amount, tx.token_id,
|
||||||
tx.fee, tx.nonce, tx.type, tx.eth_block_num
|
tx.fee, tx.nonce, tx.type, tx.eth_block_num
|
||||||
@@ -686,7 +750,7 @@ func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
|
|||||||
func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
|
func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
|
||||||
var txs []*common.L1Tx
|
var txs []*common.L1Tx
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs, // only L1 user txs can have batch_num set to null
|
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.amount, NULL AS effective_amount,
|
tx.amount, NULL AS effective_amount,
|
||||||
@@ -703,7 +767,7 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
|
|||||||
|
|
||||||
// GetLastTxsPosition for a given to_forge_l1_txs_num
|
// GetLastTxsPosition for a given to_forge_l1_txs_num
|
||||||
func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) {
|
func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) {
|
||||||
row := hdb.db.QueryRow(
|
row := hdb.dbRead.QueryRow(
|
||||||
"SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;",
|
"SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;",
|
||||||
toForgeL1TxsNum,
|
toForgeL1TxsNum,
|
||||||
)
|
)
|
||||||
@@ -717,15 +781,15 @@ func (hdb *HistoryDB) GetSCVars() (*common.RollupVariables, *common.AuctionVaria
|
|||||||
var rollup common.RollupVariables
|
var rollup common.RollupVariables
|
||||||
var auction common.AuctionVariables
|
var auction common.AuctionVariables
|
||||||
var wDelayer common.WDelayerVariables
|
var wDelayer common.WDelayerVariables
|
||||||
if err := meddler.QueryRow(hdb.db, &rollup,
|
if err := meddler.QueryRow(hdb.dbRead, &rollup,
|
||||||
"SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
|
"SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
|
||||||
return nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if err := meddler.QueryRow(hdb.db, &auction,
|
if err := meddler.QueryRow(hdb.dbRead, &auction,
|
||||||
"SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
|
"SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
|
||||||
return nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if err := meddler.QueryRow(hdb.db, &wDelayer,
|
if err := meddler.QueryRow(hdb.dbRead, &wDelayer,
|
||||||
"SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
|
"SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
|
||||||
return nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -756,7 +820,7 @@ func (hdb *HistoryDB) addBucketUpdates(d meddler.DB, bucketUpdates []common.Buck
|
|||||||
block_stamp,
|
block_stamp,
|
||||||
withdrawals
|
withdrawals
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
bucketUpdates[:],
|
bucketUpdates,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -770,13 +834,25 @@ func (hdb *HistoryDB) AddBucketUpdatesTest(d meddler.DB, bucketUpdates []common.
|
|||||||
func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
|
func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
|
||||||
var bucketUpdates []*common.BucketUpdate
|
var bucketUpdates []*common.BucketUpdate
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &bucketUpdates,
|
hdb.dbRead, &bucketUpdates,
|
||||||
`SELECT eth_block_num, num_bucket, block_stamp, withdrawals
|
`SELECT eth_block_num, num_bucket, block_stamp, withdrawals
|
||||||
FROM bucket_update ORDER BY item_id;`,
|
FROM bucket_update ORDER BY item_id;`,
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
|
||||||
|
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
|
||||||
|
minBidInfo := []*MinBidInfo{}
|
||||||
|
query := `
|
||||||
|
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
|
||||||
|
WHERE default_slot_set_bid_slot_num < $1
|
||||||
|
ORDER BY default_slot_set_bid_slot_num DESC
|
||||||
|
LIMIT $2;`
|
||||||
|
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
||||||
|
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
|
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
|
||||||
if len(tokenExchanges) == 0 {
|
if len(tokenExchanges) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -788,7 +864,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
|
|||||||
eth_addr,
|
eth_addr,
|
||||||
value_usd
|
value_usd
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
tokenExchanges[:],
|
tokenExchanges,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -796,7 +872,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
|
|||||||
func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) {
|
func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) {
|
||||||
var tokenExchanges []*common.TokenExchange
|
var tokenExchanges []*common.TokenExchange
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &tokenExchanges,
|
hdb.dbRead, &tokenExchanges,
|
||||||
"SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;",
|
"SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;",
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err)
|
||||||
@@ -816,7 +892,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
|
|||||||
token_addr,
|
token_addr,
|
||||||
amount
|
amount
|
||||||
) VALUES %s;`,
|
) VALUES %s;`,
|
||||||
escapeHatchWithdrawals[:],
|
escapeHatchWithdrawals,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -824,7 +900,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
|
|||||||
func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) {
|
func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) {
|
||||||
var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal
|
var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &escapeHatchWithdrawals,
|
hdb.dbRead, &escapeHatchWithdrawals,
|
||||||
"SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;",
|
"SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;",
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal),
|
return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal),
|
||||||
@@ -837,7 +913,7 @@ func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHat
|
|||||||
// exist in the smart contracts.
|
// exist in the smart contracts.
|
||||||
func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables,
|
func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables,
|
||||||
auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error {
|
auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error {
|
||||||
txn, err := hdb.db.Beginx()
|
txn, err := hdb.dbWrite.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -921,7 +997,7 @@ func (hdb *HistoryDB) setExtraInfoForgedL1UserTxs(d sqlx.Ext, txs []common.L1Tx)
|
|||||||
// the pagination system of the API/DB depends on this. Within blocks, all
|
// the pagination system of the API/DB depends on this. Within blocks, all
|
||||||
// items should also be in the correct order (Accounts, Tokens, Txs, etc.)
|
// items should also be in the correct order (Accounts, Tokens, Txs, etc.)
|
||||||
func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
||||||
txn, err := hdb.db.Beginx()
|
txn, err := hdb.dbWrite.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -993,6 +1069,11 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
|||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add accountBalances if it exists
|
||||||
|
if err := hdb.addAccountUpdates(txn, batch.UpdatedAccounts); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Set the EffectiveAmount and EffectiveDepositAmount of all the
|
// Set the EffectiveAmount and EffectiveDepositAmount of all the
|
||||||
// L1UserTxs that have been forged in this batch
|
// L1UserTxs that have been forged in this batch
|
||||||
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {
|
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {
|
||||||
@@ -1070,27 +1151,16 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
|||||||
return tracerr.Wrap(txn.Commit())
|
return tracerr.Wrap(txn.Commit())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCoordinatorAPI returns a coordinator by its bidderAddr
|
|
||||||
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
|
|
||||||
coordinator := &CoordinatorAPI{}
|
|
||||||
err := meddler.QueryRow(
|
|
||||||
hdb.db, coordinator,
|
|
||||||
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
|
|
||||||
bidderAddr,
|
|
||||||
)
|
|
||||||
return coordinator, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddAuctionVars insert auction vars into the DB
|
// AddAuctionVars insert auction vars into the DB
|
||||||
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
|
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
|
||||||
return tracerr.Wrap(meddler.Insert(hdb.db, "auction_vars", auctionVars))
|
return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTokensTest used to get tokens in a testing context
|
// GetTokensTest used to get tokens in a testing context
|
||||||
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
|
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
|
||||||
tokens := []*TokenWithUSD{}
|
tokens := []*TokenWithUSD{}
|
||||||
if err := meddler.QueryAll(
|
if err := meddler.QueryAll(
|
||||||
hdb.db, &tokens,
|
hdb.dbRead, &tokens,
|
||||||
"SELECT * FROM TOKEN",
|
"SELECT * FROM TOKEN",
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -1100,3 +1170,49 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
|
|||||||
}
|
}
|
||||||
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
|
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRecommendedFee returns the RecommendedFee information
|
||||||
|
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
|
||||||
|
var recommendedFee common.RecommendedFee
|
||||||
|
// Get total txs and the batch of the first selected tx of the last hour
|
||||||
|
type totalTxsSinceBatchNum struct {
|
||||||
|
TotalTxs int `meddler:"total_txs"`
|
||||||
|
FirstBatchNum common.BatchNum `meddler:"batch_num"`
|
||||||
|
}
|
||||||
|
ttsbn := &totalTxsSinceBatchNum{}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
|
||||||
|
COALESCE (MIN(tx.batch_num), 0) as batch_num
|
||||||
|
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
||||||
|
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Get the amount of batches and acumulated fees for the last hour
|
||||||
|
type totalBatchesAndFee struct {
|
||||||
|
TotalBatches int `meddler:"total_batches"`
|
||||||
|
TotalFees float64 `meddler:"total_fees"`
|
||||||
|
}
|
||||||
|
tbf := &totalBatchesAndFee{}
|
||||||
|
if err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
|
||||||
|
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
||||||
|
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
|
||||||
|
); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
// Update NodeInfo struct
|
||||||
|
var avgTransactionFee float64
|
||||||
|
if ttsbn.TotalTxs > 0 {
|
||||||
|
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
|
||||||
|
} else {
|
||||||
|
avgTransactionFee = 0
|
||||||
|
}
|
||||||
|
recommendedFee.ExistingAccount =
|
||||||
|
math.Max(avgTransactionFee, minFeeUSD)
|
||||||
|
recommendedFee.CreatesAccount =
|
||||||
|
math.Max(createAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
|
||||||
|
recommendedFee.CreatesAccountAndRegister =
|
||||||
|
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
|
||||||
|
return &recommendedFee, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -39,12 +39,12 @@ func TestMain(m *testing.M) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
historyDB = NewHistoryDB(db, nil)
|
historyDB = NewHistoryDB(db, db, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||||
historyDBWithACC = NewHistoryDB(db, apiConnCon)
|
historyDBWithACC = NewHistoryDB(db, db, apiConnCon)
|
||||||
// Run tests
|
// Run tests
|
||||||
result := m.Run()
|
result := m.Run()
|
||||||
// Close DB
|
// Close DB
|
||||||
@@ -203,6 +203,10 @@ func TestBatches(t *testing.T) {
|
|||||||
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
|
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
||||||
|
// Test GetLastBatch
|
||||||
|
fetchedLastBatch, err := historyDB.GetLastBatch()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
|
||||||
// Test GetLastL1TxsNum
|
// Test GetLastL1TxsNum
|
||||||
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -211,6 +215,12 @@ func TestBatches(t *testing.T) {
|
|||||||
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
|
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
|
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
|
||||||
|
// Test GetBatch
|
||||||
|
fetchedBatch, err := historyDB.GetBatch(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, &batches[0], fetchedBatch)
|
||||||
|
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
|
||||||
|
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBids(t *testing.T) {
|
func TestBids(t *testing.T) {
|
||||||
@@ -367,6 +377,22 @@ func TestAccounts(t *testing.T) {
|
|||||||
accs[i].Balance = nil
|
accs[i].Balance = nil
|
||||||
assert.Equal(t, accs[i], acc)
|
assert.Equal(t, accs[i], acc)
|
||||||
}
|
}
|
||||||
|
// Test AccountBalances
|
||||||
|
accUpdates := make([]common.AccountUpdate, len(accs))
|
||||||
|
for i, acc := range accs {
|
||||||
|
accUpdates[i] = common.AccountUpdate{
|
||||||
|
EthBlockNum: batches[acc.BatchNum-1].EthBlockNum,
|
||||||
|
BatchNum: acc.BatchNum,
|
||||||
|
Idx: acc.Idx,
|
||||||
|
Nonce: common.Nonce(i),
|
||||||
|
Balance: big.NewInt(int64(i)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = historyDB.AddAccountUpdates(accUpdates)
|
||||||
|
require.NoError(t, err)
|
||||||
|
fetchedAccBalances, err := historyDB.GetAllAccountUpdates()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, accUpdates, fetchedAccBalances)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTxs(t *testing.T) {
|
func TestTxs(t *testing.T) {
|
||||||
@@ -611,10 +637,10 @@ func TestTxs(t *testing.T) {
|
|||||||
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
|
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
|
||||||
|
|
||||||
// Tx ID
|
// Tx ID
|
||||||
assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
|
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String())
|
||||||
assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
|
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String())
|
||||||
assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
|
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String())
|
||||||
assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
|
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String())
|
||||||
|
|
||||||
// Tx From and To IDx
|
// Tx From and To IDx
|
||||||
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
|
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
|
||||||
@@ -791,11 +817,11 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Add second batch to trigger the update of the batch_num,
|
// Add second batch to trigger the update of the batch_num,
|
||||||
// while avoiding the implicit call of setExtraInfoForgedL1UserTxs
|
// while avoiding the implicit call of setExtraInfoForgedL1UserTxs
|
||||||
err = historyDB.addBlock(historyDB.db, &blocks[1].Block)
|
err = historyDB.addBlock(historyDB.dbWrite, &blocks[1].Block)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = historyDB.addBatch(historyDB.db, &blocks[1].Rollup.Batches[0].Batch)
|
err = historyDB.addBatch(historyDB.dbWrite, &blocks[1].Rollup.Batches[0].Batch)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = historyDB.addAccounts(historyDB.db, blocks[1].Rollup.Batches[0].CreatedAccounts)
|
err = historyDB.addAccounts(historyDB.dbWrite, blocks[1].Rollup.Batches[0].CreatedAccounts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block
|
// Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block
|
||||||
@@ -805,7 +831,7 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
|
|||||||
l1Txs[1].EffectiveAmount = big.NewInt(0)
|
l1Txs[1].EffectiveAmount = big.NewInt(0)
|
||||||
l1Txs[2].EffectiveDepositAmount = big.NewInt(0)
|
l1Txs[2].EffectiveDepositAmount = big.NewInt(0)
|
||||||
l1Txs[2].EffectiveAmount = big.NewInt(0)
|
l1Txs[2].EffectiveAmount = big.NewInt(0)
|
||||||
err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.db, l1Txs)
|
err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.dbWrite, l1Txs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
dbL1Txs, err := historyDB.GetAllL1UserTxs()
|
dbL1Txs, err := historyDB.GetAllL1UserTxs()
|
||||||
@@ -892,10 +918,10 @@ func TestUpdateExitTree(t *testing.T) {
|
|||||||
common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false,
|
common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false,
|
||||||
Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr},
|
Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr},
|
||||||
)
|
)
|
||||||
err = historyDB.addBlock(historyDB.db, &block.Block)
|
err = historyDB.addBlock(historyDB.dbWrite, &block.Block)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = historyDB.updateExitTree(historyDB.db, block.Block.Num,
|
err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num,
|
||||||
block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
|
block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -925,10 +951,10 @@ func TestUpdateExitTree(t *testing.T) {
|
|||||||
Token: tokenAddr,
|
Token: tokenAddr,
|
||||||
Amount: big.NewInt(80),
|
Amount: big.NewInt(80),
|
||||||
})
|
})
|
||||||
err = historyDB.addBlock(historyDB.db, &block.Block)
|
err = historyDB.addBlock(historyDB.dbWrite, &block.Block)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = historyDB.updateExitTree(historyDB.db, block.Block.Num,
|
err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num,
|
||||||
block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
|
block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -971,7 +997,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
|
|||||||
URL: "bar",
|
URL: "bar",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err = historyDB.addCoordinators(historyDB.db, coords)
|
err = historyDB.addCoordinators(historyDB.dbWrite, coords)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
bids := []common.Bid{
|
bids := []common.Bid{
|
||||||
@@ -989,7 +1015,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = historyDB.addBids(historyDB.db, bids)
|
err = historyDB.addBids(historyDB.dbWrite, bids)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
forger10, err := historyDB.GetBestBidCoordinator(10)
|
forger10, err := historyDB.GetBestBidCoordinator(10)
|
||||||
@@ -1027,7 +1053,7 @@ func TestAddBucketUpdates(t *testing.T) {
|
|||||||
Withdrawals: big.NewInt(42),
|
Withdrawals: big.NewInt(42),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := historyDB.addBucketUpdates(historyDB.db, bucketUpdates)
|
err := historyDB.addBucketUpdates(historyDB.dbWrite, bucketUpdates)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dbBucketUpdates, err := historyDB.GetAllBucketUpdates()
|
dbBucketUpdates, err := historyDB.GetAllBucketUpdates()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1052,7 +1078,7 @@ func TestAddTokenExchanges(t *testing.T) {
|
|||||||
ValueUSD: 67890,
|
ValueUSD: 67890,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := historyDB.addTokenExchanges(historyDB.db, tokenExchanges)
|
err := historyDB.addTokenExchanges(historyDB.dbWrite, tokenExchanges)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dbTokenExchanges, err := historyDB.GetAllTokenExchanges()
|
dbTokenExchanges, err := historyDB.GetAllTokenExchanges()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1081,7 +1107,7 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
|
|||||||
Amount: big.NewInt(20003),
|
Amount: big.NewInt(20003),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := historyDB.addEscapeHatchWithdrawals(historyDB.db, escapeHatchWithdrawals)
|
err := historyDB.addEscapeHatchWithdrawals(historyDB.dbWrite, escapeHatchWithdrawals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals()
|
dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1146,19 +1172,15 @@ func TestGetMetricsAPI(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
|
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
|
||||||
|
|
||||||
// Frequency is not exactly the desired one, some decimals may appear
|
// Frequency is not exactly the desired one, some decimals may appear
|
||||||
assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
|
// There is a -2 as time for first and last batch is not taken into account
|
||||||
assert.Less(t, res.BatchFrequency, float64(frequency+1))
|
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
|
||||||
// Truncate frecuency into an int to do an exact check
|
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
|
||||||
assert.Equal(t, frequency, int(res.BatchFrequency))
|
|
||||||
// This may also be different in some decimals
|
|
||||||
// Truncate it to the third decimal to compare
|
|
||||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
|
|
||||||
assert.Equal(t, int64(3), res.TotalAccounts)
|
assert.Equal(t, int64(3), res.TotalAccounts)
|
||||||
assert.Equal(t, int64(3), res.TotalBJJs)
|
assert.Equal(t, int64(3), res.TotalBJJs)
|
||||||
// Til does not set fees
|
// Til does not set fees
|
||||||
@@ -1185,7 +1207,8 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
|||||||
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
|
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
|
||||||
|
|
||||||
// Transfers
|
// Transfers
|
||||||
for x := 0; x < 6000; x++ {
|
const numBlocks int = 30
|
||||||
|
for x := 0; x < numBlocks; x++ {
|
||||||
set = append(set, til.Instruction{
|
set = append(set, til.Instruction{
|
||||||
Typ: common.TxTypeTransfer,
|
Typ: common.TxTypeTransfer,
|
||||||
TokenID: common.TokenID(0),
|
TokenID: common.TokenID(0),
|
||||||
@@ -1209,36 +1232,31 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
|||||||
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
|
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
const numBatches int = 6002
|
const numBatches int = 2 + numBlocks
|
||||||
const numTx int = 6003
|
const blockNum = 4 + numBlocks
|
||||||
const blockNum = 6005 - 1
|
|
||||||
|
|
||||||
// Sanity check
|
// Sanity check
|
||||||
require.Equal(t, blockNum, len(blocks))
|
require.Equal(t, blockNum, len(blocks))
|
||||||
|
|
||||||
// Adding one batch per block
|
// Adding one batch per block
|
||||||
// batch frequency can be chosen
|
// batch frequency can be chosen
|
||||||
const frequency int = 15
|
const blockTime time.Duration = 3600 * time.Second
|
||||||
|
now := time.Now()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
for i := range blocks {
|
for i := range blocks {
|
||||||
blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
|
blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime)
|
||||||
err = historyDB.AddBlockSCData(&blocks[i])
|
err = historyDB.AddBlockSCData(&blocks[i])
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
|
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
|
||||||
|
|
||||||
// Frequency is not exactly the desired one, some decimals may appear
|
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
|
||||||
assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
|
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
|
||||||
assert.Less(t, res.BatchFrequency, float64(frequency+1))
|
|
||||||
// Truncate frecuency into an int to do an exact check
|
|
||||||
assert.Equal(t, frequency, int(res.BatchFrequency))
|
|
||||||
// This may also be different in some decimals
|
|
||||||
// Truncate it to the third decimal to compare
|
|
||||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
|
|
||||||
assert.Equal(t, int64(3), res.TotalAccounts)
|
assert.Equal(t, int64(3), res.TotalAccounts)
|
||||||
assert.Equal(t, int64(3), res.TotalBJJs)
|
assert.Equal(t, int64(3), res.TotalBJJs)
|
||||||
// Til does not set fees
|
// Til does not set fees
|
||||||
@@ -1247,13 +1265,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetMetricsAPIEmpty(t *testing.T) {
|
func TestGetMetricsAPIEmpty(t *testing.T) {
|
||||||
test.WipeDB(historyDB.DB())
|
test.WipeDB(historyDB.DB())
|
||||||
_, err := historyDBWithACC.GetMetricsAPI(0)
|
_, err := historyDBWithACC.GetMetricsInternalAPI(0)
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAvgTxFeeEmpty(t *testing.T) {
|
|
||||||
test.WipeDB(historyDB.DB())
|
|
||||||
_, err := historyDBWithACC.GetAvgTxFeeAPI()
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1442,3 +1454,65 @@ func setTestBlocks(from, to int64) []common.Block {
|
|||||||
}
|
}
|
||||||
return blocks
|
return blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNodeInfo(t *testing.T) {
|
||||||
|
test.WipeDB(historyDB.DB())
|
||||||
|
|
||||||
|
err := historyDB.SetStateInternalAPI(&StateAPI{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
clientSetup := test.NewClientSetupExample()
|
||||||
|
constants := &Constants{
|
||||||
|
SCConsts: common.SCConsts{
|
||||||
|
Rollup: *clientSetup.RollupConstants,
|
||||||
|
Auction: *clientSetup.AuctionConstants,
|
||||||
|
WDelayer: *clientSetup.WDelayerConstants,
|
||||||
|
},
|
||||||
|
ChainID: 42,
|
||||||
|
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
|
||||||
|
}
|
||||||
|
err = historyDB.SetConstants(constants)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Test parameters
|
||||||
|
stateAPI := &StateAPI{
|
||||||
|
NodePublicConfig: NodePublicConfig{
|
||||||
|
ForgeDelay: 3.1,
|
||||||
|
},
|
||||||
|
Network: NetworkAPI{
|
||||||
|
LastEthBlock: 12,
|
||||||
|
LastSyncBlock: 34,
|
||||||
|
},
|
||||||
|
Metrics: MetricsAPI{
|
||||||
|
TransactionsPerBatch: 1.1,
|
||||||
|
TotalAccounts: 42,
|
||||||
|
},
|
||||||
|
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
|
||||||
|
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
|
||||||
|
WithdrawalDelayer: *clientSetup.WDelayerVariables,
|
||||||
|
RecommendedFee: common.RecommendedFee{
|
||||||
|
ExistingAccount: 0.15,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = historyDB.SetStateInternalAPI(stateAPI)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
nodeConfig := &NodeConfig{
|
||||||
|
MaxPoolTxs: 123,
|
||||||
|
MinFeeUSD: 0.5,
|
||||||
|
}
|
||||||
|
err = historyDB.SetNodeConfig(nodeConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dbConstants, err := historyDB.GetConstants()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, constants, dbConstants)
|
||||||
|
|
||||||
|
dbNodeConfig, err := historyDB.GetNodeConfig()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, nodeConfig, dbNodeConfig)
|
||||||
|
|
||||||
|
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, stateAPI, dbStateAPI)
|
||||||
|
}
|
||||||
|
|||||||
169
db/historydb/nodeinfo.go
Normal file
169
db/historydb/nodeinfo.go
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
package historydb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/tracerr"
|
||||||
|
"github.com/russross/meddler"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
createAccountExtraFeePercentage float64 = 2
|
||||||
|
createAccountInternalExtraFeePercentage float64 = 2.5
|
||||||
|
)
|
||||||
|
|
||||||
|
// Period represents a time period in ethereum
|
||||||
|
type Period struct {
|
||||||
|
SlotNum int64 `json:"slotNum"`
|
||||||
|
FromBlock int64 `json:"fromBlock"`
|
||||||
|
ToBlock int64 `json:"toBlock"`
|
||||||
|
FromTimestamp time.Time `json:"fromTimestamp"`
|
||||||
|
ToTimestamp time.Time `json:"toTimestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextForgerAPI represents the next forger exposed via the API
|
||||||
|
type NextForgerAPI struct {
|
||||||
|
Coordinator CoordinatorAPI `json:"coordinator"`
|
||||||
|
Period Period `json:"period"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkAPI is the network state exposed via the API
|
||||||
|
type NetworkAPI struct {
|
||||||
|
LastEthBlock int64 `json:"lastEthereumBlock"`
|
||||||
|
LastSyncBlock int64 `json:"lastSynchedBlock"`
|
||||||
|
LastBatch *BatchAPI `json:"lastBatch"`
|
||||||
|
CurrentSlot int64 `json:"currentSlot"`
|
||||||
|
NextForgers []NextForgerAPI `json:"nextForgers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodePublicConfig is the configuration of the node that is exposed via API
|
||||||
|
type NodePublicConfig struct {
|
||||||
|
// ForgeDelay in seconds
|
||||||
|
ForgeDelay float64 `json:"forgeDelay"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateAPI is an object representing the node and network state exposed via the API
|
||||||
|
type StateAPI struct {
|
||||||
|
// NodePublicConfig is the configuration of the node that is exposed via API
|
||||||
|
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
|
||||||
|
Network NetworkAPI `json:"network"`
|
||||||
|
Metrics MetricsAPI `json:"metrics"`
|
||||||
|
Rollup RollupVariablesAPI `json:"rollup"`
|
||||||
|
Auction AuctionVariablesAPI `json:"auction"`
|
||||||
|
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
|
||||||
|
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constants contains network constants
|
||||||
|
type Constants struct {
|
||||||
|
common.SCConsts
|
||||||
|
ChainID uint16
|
||||||
|
HermezAddress ethCommon.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeConfig contains the node config exposed in the API
|
||||||
|
type NodeConfig struct {
|
||||||
|
MaxPoolTxs uint32 `meddler:"max_pool_txs"`
|
||||||
|
MinFeeUSD float64 `meddler:"min_fee"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfo contains information about he node used when serving the API
|
||||||
|
type NodeInfo struct {
|
||||||
|
ItemID int `meddler:"item_id,pk"`
|
||||||
|
StateAPI *StateAPI `meddler:"state,json"`
|
||||||
|
NodeConfig *NodeConfig `meddler:"config,json"`
|
||||||
|
Constants *Constants `meddler:"constants,json"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeInfo returns the NodeInfo
|
||||||
|
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
|
||||||
|
ni := &NodeInfo{}
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
|
||||||
|
)
|
||||||
|
return ni, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConstants returns the Constats
|
||||||
|
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
|
||||||
|
var nodeInfo NodeInfo
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, &nodeInfo,
|
||||||
|
"SELECT constants FROM node_info WHERE item_id = 1;",
|
||||||
|
)
|
||||||
|
return nodeInfo.Constants, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConstants sets the Constants
|
||||||
|
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
|
||||||
|
_constants := struct {
|
||||||
|
Constants *Constants `meddler:"constants,json"`
|
||||||
|
}{constants}
|
||||||
|
values, err := meddler.Default.Values(&_constants, false)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
_, err = hdb.dbWrite.Exec(
|
||||||
|
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
|
||||||
|
values[0],
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStateInternalAPI returns the StateAPI
|
||||||
|
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
|
||||||
|
return hdb.getStateAPI(hdb.dbRead)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
|
||||||
|
var nodeInfo NodeInfo
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
d, &nodeInfo,
|
||||||
|
"SELECT state FROM node_info WHERE item_id = 1;",
|
||||||
|
)
|
||||||
|
return nodeInfo.StateAPI, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStateInternalAPI sets the StateAPI
|
||||||
|
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
|
||||||
|
_stateAPI := struct {
|
||||||
|
StateAPI *StateAPI `meddler:"state,json"`
|
||||||
|
}{stateAPI}
|
||||||
|
values, err := meddler.Default.Values(&_stateAPI, false)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
_, err = hdb.dbWrite.Exec(
|
||||||
|
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
|
||||||
|
values[0],
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeConfig returns the NodeConfig
|
||||||
|
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
|
||||||
|
var nodeInfo NodeInfo
|
||||||
|
err := meddler.QueryRow(
|
||||||
|
hdb.dbRead, &nodeInfo,
|
||||||
|
"SELECT config FROM node_info WHERE item_id = 1;",
|
||||||
|
)
|
||||||
|
return nodeInfo.NodeConfig, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNodeConfig sets the NodeConfig
|
||||||
|
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
|
||||||
|
_nodeConfig := struct {
|
||||||
|
NodeConfig *NodeConfig `meddler:"config,json"`
|
||||||
|
}{nodeConfig}
|
||||||
|
values, err := meddler.Default.Values(&_nodeConfig, false)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
_, err = hdb.dbWrite.Exec(
|
||||||
|
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
|
||||||
|
values[0],
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
@@ -239,8 +239,8 @@ type AccountAPI struct {
|
|||||||
BatchNum common.BatchNum `meddler:"batch_num"`
|
BatchNum common.BatchNum `meddler:"batch_num"`
|
||||||
PublicKey apitypes.HezBJJ `meddler:"bjj"`
|
PublicKey apitypes.HezBJJ `meddler:"bjj"`
|
||||||
EthAddr apitypes.HezEthAddr `meddler:"eth_addr"`
|
EthAddr apitypes.HezEthAddr `meddler:"eth_addr"`
|
||||||
Nonce common.Nonce `meddler:"-"` // max of 40 bits used
|
Nonce common.Nonce `meddler:"nonce"` // max of 40 bits used
|
||||||
Balance *apitypes.BigIntStr `meddler:"-"` // max of 192 bits used
|
Balance *apitypes.BigIntStr `meddler:"balance"` // max of 192 bits used
|
||||||
TotalItems uint64 `meddler:"total_items"`
|
TotalItems uint64 `meddler:"total_items"`
|
||||||
FirstItem uint64 `meddler:"first_item"`
|
FirstItem uint64 `meddler:"first_item"`
|
||||||
LastItem uint64 `meddler:"last_item"`
|
LastItem uint64 `meddler:"last_item"`
|
||||||
@@ -302,25 +302,15 @@ type BatchAPI struct {
|
|||||||
LastItem uint64 `json:"-" meddler:"last_item"`
|
LastItem uint64 `json:"-" meddler:"last_item"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metrics define metrics of the network
|
// MetricsAPI define metrics of the network
|
||||||
type Metrics struct {
|
type MetricsAPI struct {
|
||||||
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
|
TransactionsPerBatch float64 `json:"transactionsPerBatch"`
|
||||||
BatchFrequency float64 `json:"batchFrequency"`
|
BatchFrequency float64 `json:"batchFrequency"`
|
||||||
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
|
TransactionsPerSecond float64 `json:"transactionsPerSecond"`
|
||||||
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
|
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
|
||||||
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
|
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
|
||||||
AvgTransactionFee float64 `json:"avgTransactionFee"`
|
AvgTransactionFee float64 `json:"avgTransactionFee"`
|
||||||
}
|
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimatedTimeToForgeL1"`
|
||||||
|
|
||||||
// MetricsTotals is used to get temporal information from HistoryDB
|
|
||||||
// to calculate data to be stored into the Metrics struct
|
|
||||||
type MetricsTotals struct {
|
|
||||||
TotalTransactions uint64 `meddler:"total_txs"`
|
|
||||||
FirstBatchNum common.BatchNum `meddler:"batch_num"`
|
|
||||||
TotalBatches int64 `meddler:"total_batches"`
|
|
||||||
TotalFeesUSD float64 `meddler:"total_fees"`
|
|
||||||
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
|
|
||||||
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BidAPI is a representation of a bid with additional information
|
// BidAPI is a representation of a bid with additional information
|
||||||
@@ -373,6 +363,27 @@ type RollupVariablesAPI struct {
|
|||||||
SafeMode bool `json:"safeMode" meddler:"safe_mode"`
|
SafeMode bool `json:"safeMode" meddler:"safe_mode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
|
||||||
|
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
|
||||||
|
rollupVars := RollupVariablesAPI{
|
||||||
|
EthBlockNum: rollupVariables.EthBlockNum,
|
||||||
|
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
|
||||||
|
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
|
||||||
|
WithdrawalDelay: rollupVariables.WithdrawalDelay,
|
||||||
|
SafeMode: rollupVariables.SafeMode,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, bucket := range rollupVariables.Buckets {
|
||||||
|
rollupVars.Buckets[i] = BucketParamsAPI{
|
||||||
|
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
|
||||||
|
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
|
||||||
|
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
|
||||||
|
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &rollupVars
|
||||||
|
}
|
||||||
|
|
||||||
// AuctionVariablesAPI are the variables of the Auction Smart Contract
|
// AuctionVariablesAPI are the variables of the Auction Smart Contract
|
||||||
type AuctionVariablesAPI struct {
|
type AuctionVariablesAPI struct {
|
||||||
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
||||||
@@ -397,3 +408,28 @@ type AuctionVariablesAPI struct {
|
|||||||
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
|
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
|
||||||
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
|
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
|
||||||
|
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
|
||||||
|
auctionVars := AuctionVariablesAPI{
|
||||||
|
EthBlockNum: auctionVariables.EthBlockNum,
|
||||||
|
DonationAddress: auctionVariables.DonationAddress,
|
||||||
|
BootCoordinator: auctionVariables.BootCoordinator,
|
||||||
|
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
|
||||||
|
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
|
||||||
|
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
|
||||||
|
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
|
||||||
|
Outbidding: auctionVariables.Outbidding,
|
||||||
|
SlotDeadline: auctionVariables.SlotDeadline,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, slot := range auctionVariables.DefaultSlotSetBid {
|
||||||
|
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, ratio := range auctionVariables.AllocationRatio {
|
||||||
|
auctionVars.AllocationRatio[i] = ratio
|
||||||
|
}
|
||||||
|
|
||||||
|
return &auctionVars
|
||||||
|
}
|
||||||
|
|||||||
269
db/kvdb/kvdb.go
269
db/kvdb/kvdb.go
@@ -27,6 +27,8 @@ const (
|
|||||||
// PathLast defines the subpath of the last Batch in the subpath
|
// PathLast defines the subpath of the last Batch in the subpath
|
||||||
// of the StateDB
|
// of the StateDB
|
||||||
PathLast = "last"
|
PathLast = "last"
|
||||||
|
// DefaultKeep is the default value for the Keep parameter
|
||||||
|
DefaultKeep = 128
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -34,16 +36,18 @@ var (
|
|||||||
KeyCurrentBatch = []byte("k:currentbatch")
|
KeyCurrentBatch = []byte("k:currentbatch")
|
||||||
// keyCurrentIdx is used as key in the db to store the CurrentIdx
|
// keyCurrentIdx is used as key in the db to store the CurrentIdx
|
||||||
keyCurrentIdx = []byte("k:idx")
|
keyCurrentIdx = []byte("k:idx")
|
||||||
|
// ErrNoLast is returned when the KVDB has been configured to not have
|
||||||
|
// a Last checkpoint but a Last method is used
|
||||||
|
ErrNoLast = fmt.Errorf("no last checkpoint")
|
||||||
)
|
)
|
||||||
|
|
||||||
// KVDB represents the Key-Value DB object
|
// KVDB represents the Key-Value DB object
|
||||||
type KVDB struct {
|
type KVDB struct {
|
||||||
path string
|
cfg Config
|
||||||
db *pebble.Storage
|
db *pebble.Storage
|
||||||
// CurrentIdx holds the current Idx that the BatchBuilder is using
|
// CurrentIdx holds the current Idx that the BatchBuilder is using
|
||||||
CurrentIdx common.Idx
|
CurrentIdx common.Idx
|
||||||
CurrentBatch common.BatchNum
|
CurrentBatch common.BatchNum
|
||||||
keep int
|
|
||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
last *Last
|
last *Last
|
||||||
}
|
}
|
||||||
@@ -61,13 +65,13 @@ func (k *Last) setNew() error {
|
|||||||
defer k.rw.Unlock()
|
defer k.rw.Unlock()
|
||||||
if k.db != nil {
|
if k.db != nil {
|
||||||
k.db.Close()
|
k.db.Close()
|
||||||
|
k.db = nil
|
||||||
}
|
}
|
||||||
lastPath := path.Join(k.path, PathLast)
|
lastPath := path.Join(k.path, PathLast)
|
||||||
err := os.RemoveAll(lastPath)
|
if err := os.RemoveAll(lastPath); err != nil {
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
db, err := pebble.NewPebbleStorage(path.Join(k.path, lastPath), false)
|
db, err := pebble.NewPebbleStorage(lastPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -80,6 +84,7 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
|
|||||||
defer k.rw.Unlock()
|
defer k.rw.Unlock()
|
||||||
if k.db != nil {
|
if k.db != nil {
|
||||||
k.db.Close()
|
k.db.Close()
|
||||||
|
k.db = nil
|
||||||
}
|
}
|
||||||
lastPath := path.Join(k.path, PathLast)
|
lastPath := path.Join(k.path, PathLast)
|
||||||
if err := kvdb.MakeCheckpointFromTo(batchNum, lastPath); err != nil {
|
if err := kvdb.MakeCheckpointFromTo(batchNum, lastPath); err != nil {
|
||||||
@@ -96,26 +101,48 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
|
|||||||
func (k *Last) close() {
|
func (k *Last) close() {
|
||||||
k.rw.Lock()
|
k.rw.Lock()
|
||||||
defer k.rw.Unlock()
|
defer k.rw.Unlock()
|
||||||
|
if k.db != nil {
|
||||||
k.db.Close()
|
k.db.Close()
|
||||||
|
k.db = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config of the KVDB
|
||||||
|
type Config struct {
|
||||||
|
// Path where the checkpoints will be stored
|
||||||
|
Path string
|
||||||
|
// Keep is the number of old checkpoints to keep. If 0, all
|
||||||
|
// checkpoints are kept.
|
||||||
|
Keep int
|
||||||
|
// At every checkpoint, check that there are no gaps between the
|
||||||
|
// checkpoints
|
||||||
|
NoGapsCheck bool
|
||||||
|
// NoLast skips having an opened DB with a checkpoint to the last
|
||||||
|
// batchNum for thread-safe reads.
|
||||||
|
NoLast bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKVDB creates a new KVDB, allowing to use an in-memory or in-disk storage.
|
// NewKVDB creates a new KVDB, allowing to use an in-memory or in-disk storage.
|
||||||
// Checkpoints older than the value defined by `keep` will be deleted.
|
// Checkpoints older than the value defined by `keep` will be deleted.
|
||||||
func NewKVDB(pathDB string, keep int) (*KVDB, error) {
|
// func NewKVDB(pathDB string, keep int) (*KVDB, error) {
|
||||||
|
func NewKVDB(cfg Config) (*KVDB, error) {
|
||||||
var sto *pebble.Storage
|
var sto *pebble.Storage
|
||||||
var err error
|
var err error
|
||||||
sto, err = pebble.NewPebbleStorage(path.Join(pathDB, PathCurrent), false)
|
sto, err = pebble.NewPebbleStorage(path.Join(cfg.Path, PathCurrent), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var last *Last
|
||||||
|
if !cfg.NoLast {
|
||||||
|
last = &Last{
|
||||||
|
path: cfg.Path,
|
||||||
|
}
|
||||||
|
}
|
||||||
kvdb := &KVDB{
|
kvdb := &KVDB{
|
||||||
path: pathDB,
|
cfg: cfg,
|
||||||
db: sto,
|
db: sto,
|
||||||
keep: keep,
|
last: last,
|
||||||
last: &Last{
|
|
||||||
path: pathDB,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
// load currentBatch
|
// load currentBatch
|
||||||
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
||||||
@@ -133,29 +160,32 @@ func NewKVDB(pathDB string, keep int) (*KVDB, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LastRead is a thread-safe method to query the last KVDB
|
// LastRead is a thread-safe method to query the last KVDB
|
||||||
func (kvdb *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
|
func (k *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
|
||||||
kvdb.last.rw.RLock()
|
if k.last == nil {
|
||||||
defer kvdb.last.rw.RUnlock()
|
return tracerr.Wrap(ErrNoLast)
|
||||||
return fn(kvdb.last.db)
|
}
|
||||||
|
k.last.rw.RLock()
|
||||||
|
defer k.last.rw.RUnlock()
|
||||||
|
return fn(k.last.db)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DB returns the *pebble.Storage from the KVDB
|
// DB returns the *pebble.Storage from the KVDB
|
||||||
func (kvdb *KVDB) DB() *pebble.Storage {
|
func (k *KVDB) DB() *pebble.Storage {
|
||||||
return kvdb.db
|
return k.db
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageWithPrefix returns the db.Storage with the given prefix from the
|
// StorageWithPrefix returns the db.Storage with the given prefix from the
|
||||||
// current KVDB
|
// current KVDB
|
||||||
func (kvdb *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
|
func (k *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
|
||||||
return kvdb.db.WithPrefix(prefix)
|
return k.db.WithPrefix(prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset resets the KVDB to the checkpoint at the given batchNum. Reset does
|
// Reset resets the KVDB to the checkpoint at the given batchNum. Reset does
|
||||||
// not delete the checkpoints between old current and the new current, those
|
// not delete the checkpoints between old current and the new current, those
|
||||||
// checkpoints will remain in the storage, and eventually will be deleted when
|
// checkpoints will remain in the storage, and eventually will be deleted when
|
||||||
// MakeCheckpoint overwrites them.
|
// MakeCheckpoint overwrites them.
|
||||||
func (kvdb *KVDB) Reset(batchNum common.BatchNum) error {
|
func (k *KVDB) Reset(batchNum common.BatchNum) error {
|
||||||
return kvdb.reset(batchNum, true)
|
return k.reset(batchNum, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset resets the KVDB to the checkpoint at the given batchNum. Reset does
|
// reset resets the KVDB to the checkpoint at the given batchNum. Reset does
|
||||||
@@ -163,21 +193,19 @@ func (kvdb *KVDB) Reset(batchNum common.BatchNum) error {
|
|||||||
// checkpoints will remain in the storage, and eventually will be deleted when
|
// checkpoints will remain in the storage, and eventually will be deleted when
|
||||||
// MakeCheckpoint overwrites them. `closeCurrent` will close the currently
|
// MakeCheckpoint overwrites them. `closeCurrent` will close the currently
|
||||||
// opened db before doing the reset.
|
// opened db before doing the reset.
|
||||||
func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||||
currentPath := path.Join(kvdb.path, PathCurrent)
|
currentPath := path.Join(k.cfg.Path, PathCurrent)
|
||||||
|
|
||||||
if closeCurrent {
|
if closeCurrent && k.db != nil {
|
||||||
if err := kvdb.db.Pebble().Close(); err != nil {
|
k.db.Close()
|
||||||
return tracerr.Wrap(err)
|
k.db = nil
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// remove 'current'
|
// remove 'current'
|
||||||
err := os.RemoveAll(currentPath)
|
if err := os.RemoveAll(currentPath); err != nil {
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// remove all checkpoints > batchNum
|
// remove all checkpoints > batchNum
|
||||||
list, err := kvdb.ListCheckpoints()
|
list, err := k.ListCheckpoints()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -190,7 +218,7 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, bn := range list[start:] {
|
for _, bn := range list[start:] {
|
||||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -201,39 +229,43 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
kvdb.db = sto
|
k.db = sto
|
||||||
kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
|
k.CurrentIdx = common.RollupConstReservedIDx // 255
|
||||||
kvdb.CurrentBatch = 0
|
k.CurrentBatch = 0
|
||||||
if err := kvdb.last.setNew(); err != nil {
|
if k.last != nil {
|
||||||
|
if err := k.last.setNew(); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy 'batchNum' to 'current'
|
// copy 'batchNum' to 'current'
|
||||||
if err := kvdb.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
|
if err := k.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// copy 'batchNum' to 'last'
|
// copy 'batchNum' to 'last'
|
||||||
if err := kvdb.last.set(kvdb, batchNum); err != nil {
|
if k.last != nil {
|
||||||
|
if err := k.last.set(k, batchNum); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// open the new 'current'
|
// open the new 'current'
|
||||||
sto, err := pebble.NewPebbleStorage(currentPath, false)
|
sto, err := pebble.NewPebbleStorage(currentPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
kvdb.db = sto
|
k.db = sto
|
||||||
|
|
||||||
// get currentBatch num
|
// get currentBatch num
|
||||||
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
k.CurrentBatch, err = k.GetCurrentBatch()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// idx is obtained from the statedb reset
|
// idx is obtained from the statedb reset
|
||||||
kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
|
k.CurrentIdx, err = k.GetCurrentIdx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -243,28 +275,28 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
|||||||
|
|
||||||
// ResetFromSynchronizer performs a reset in the KVDB getting the state from
|
// ResetFromSynchronizer performs a reset in the KVDB getting the state from
|
||||||
// synchronizerKVDB for the given batchNum.
|
// synchronizerKVDB for the given batchNum.
|
||||||
func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
|
func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
|
||||||
if synchronizerKVDB == nil {
|
if synchronizerKVDB == nil {
|
||||||
return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil"))
|
return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil"))
|
||||||
}
|
}
|
||||||
|
|
||||||
currentPath := path.Join(kvdb.path, PathCurrent)
|
currentPath := path.Join(k.cfg.Path, PathCurrent)
|
||||||
if err := kvdb.db.Pebble().Close(); err != nil {
|
if k.db != nil {
|
||||||
return tracerr.Wrap(err)
|
k.db.Close()
|
||||||
|
k.db = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove 'current'
|
// remove 'current'
|
||||||
err := os.RemoveAll(currentPath)
|
if err := os.RemoveAll(currentPath); err != nil {
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// remove all checkpoints
|
// remove all checkpoints
|
||||||
list, err := kvdb.ListCheckpoints()
|
list, err := k.ListCheckpoints()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
for _, bn := range list {
|
for _, bn := range list {
|
||||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -275,14 +307,14 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
kvdb.db = sto
|
k.db = sto
|
||||||
kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
|
k.CurrentIdx = common.RollupConstReservedIDx // 255
|
||||||
kvdb.CurrentBatch = 0
|
k.CurrentBatch = 0
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||||
|
|
||||||
// copy synchronizer'BatchNumX' to 'BatchNumX'
|
// copy synchronizer'BatchNumX' to 'BatchNumX'
|
||||||
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
|
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
|
||||||
@@ -290,7 +322,7 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
|||||||
}
|
}
|
||||||
|
|
||||||
// copy 'BatchNumX' to 'current'
|
// copy 'BatchNumX' to 'current'
|
||||||
err = kvdb.MakeCheckpointFromTo(batchNum, currentPath)
|
err = k.MakeCheckpointFromTo(batchNum, currentPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -300,15 +332,15 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
kvdb.db = sto
|
k.db = sto
|
||||||
|
|
||||||
// get currentBatch num
|
// get currentBatch num
|
||||||
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
k.CurrentBatch, err = k.GetCurrentBatch()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// get currentIdx
|
// get currentIdx
|
||||||
kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
|
k.CurrentIdx, err = k.GetCurrentIdx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -317,8 +349,8 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCurrentBatch returns the current BatchNum stored in the KVDB
|
// GetCurrentBatch returns the current BatchNum stored in the KVDB
|
||||||
func (kvdb *KVDB) GetCurrentBatch() (common.BatchNum, error) {
|
func (k *KVDB) GetCurrentBatch() (common.BatchNum, error) {
|
||||||
cbBytes, err := kvdb.db.Get(KeyCurrentBatch)
|
cbBytes, err := k.db.Get(KeyCurrentBatch)
|
||||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@@ -329,12 +361,12 @@ func (kvdb *KVDB) GetCurrentBatch() (common.BatchNum, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setCurrentBatch stores the current BatchNum in the KVDB
|
// setCurrentBatch stores the current BatchNum in the KVDB
|
||||||
func (kvdb *KVDB) setCurrentBatch() error {
|
func (k *KVDB) setCurrentBatch() error {
|
||||||
tx, err := kvdb.db.NewTx()
|
tx, err := k.db.NewTx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
err = tx.Put(KeyCurrentBatch, kvdb.CurrentBatch.Bytes())
|
err = tx.Put(KeyCurrentBatch, k.CurrentBatch.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -345,9 +377,9 @@ func (kvdb *KVDB) setCurrentBatch() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCurrentIdx returns the stored Idx from the KVDB, which is the last Idx
|
// GetCurrentIdx returns the stored Idx from the KVDB, which is the last Idx
|
||||||
// used for an Account in the KVDB.
|
// used for an Account in the k.
|
||||||
func (kvdb *KVDB) GetCurrentIdx() (common.Idx, error) {
|
func (k *KVDB) GetCurrentIdx() (common.Idx, error) {
|
||||||
idxBytes, err := kvdb.db.Get(keyCurrentIdx)
|
idxBytes, err := k.db.Get(keyCurrentIdx)
|
||||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||||
return common.RollupConstReservedIDx, nil // 255, nil
|
return common.RollupConstReservedIDx, nil // 255, nil
|
||||||
}
|
}
|
||||||
@@ -358,10 +390,10 @@ func (kvdb *KVDB) GetCurrentIdx() (common.Idx, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetCurrentIdx stores Idx in the KVDB
|
// SetCurrentIdx stores Idx in the KVDB
|
||||||
func (kvdb *KVDB) SetCurrentIdx(idx common.Idx) error {
|
func (k *KVDB) SetCurrentIdx(idx common.Idx) error {
|
||||||
kvdb.CurrentIdx = idx
|
k.CurrentIdx = idx
|
||||||
|
|
||||||
tx, err := kvdb.db.NewTx()
|
tx, err := k.db.NewTx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -381,49 +413,64 @@ func (kvdb *KVDB) SetCurrentIdx(idx common.Idx) error {
|
|||||||
|
|
||||||
// MakeCheckpoint does a checkpoint at the given batchNum in the defined path.
|
// MakeCheckpoint does a checkpoint at the given batchNum in the defined path.
|
||||||
// Internally this advances & stores the current BatchNum, and then stores a
|
// Internally this advances & stores the current BatchNum, and then stores a
|
||||||
// Checkpoint of the current state of the KVDB.
|
// Checkpoint of the current state of the k.
|
||||||
func (kvdb *KVDB) MakeCheckpoint() error {
|
func (k *KVDB) MakeCheckpoint() error {
|
||||||
// advance currentBatch
|
// advance currentBatch
|
||||||
kvdb.CurrentBatch++
|
k.CurrentBatch++
|
||||||
|
|
||||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, kvdb.CurrentBatch))
|
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, k.CurrentBatch))
|
||||||
|
|
||||||
if err := kvdb.setCurrentBatch(); err != nil {
|
if err := k.setCurrentBatch(); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if checkpoint BatchNum already exist in disk, delete it
|
// if checkpoint BatchNum already exist in disk, delete it
|
||||||
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
|
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||||
err := os.RemoveAll(checkpointPath)
|
} else if err != nil {
|
||||||
if err != nil {
|
return tracerr.Wrap(err)
|
||||||
|
} else {
|
||||||
|
if err := os.RemoveAll(checkpointPath); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
} else if err != nil && !os.IsNotExist(err) {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute Checkpoint
|
// execute Checkpoint
|
||||||
if err := kvdb.db.Pebble().Checkpoint(checkpointPath); err != nil {
|
if err := k.db.Pebble().Checkpoint(checkpointPath); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// copy 'CurrentBatch' to 'last'
|
// copy 'CurrentBatch' to 'last'
|
||||||
if err := kvdb.last.set(kvdb, kvdb.CurrentBatch); err != nil {
|
if k.last != nil {
|
||||||
|
if err := k.last.set(k, k.CurrentBatch); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// delete old checkpoints
|
// delete old checkpoints
|
||||||
if err := kvdb.deleteOldCheckpoints(); err != nil {
|
if err := k.deleteOldCheckpoints(); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckpointExists returns true if the checkpoint exists
|
||||||
|
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||||
|
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||||
|
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
||||||
func (kvdb *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||||
|
|
||||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||||
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return os.RemoveAll(checkpointPath)
|
return os.RemoveAll(checkpointPath)
|
||||||
@@ -431,8 +478,8 @@ func (kvdb *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
|||||||
|
|
||||||
// ListCheckpoints returns the list of batchNums of the checkpoints, sorted.
|
// ListCheckpoints returns the list of batchNums of the checkpoints, sorted.
|
||||||
// If there's a gap between the list of checkpoints, an error is returned.
|
// If there's a gap between the list of checkpoints, an error is returned.
|
||||||
func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
func (k *KVDB) ListCheckpoints() ([]int, error) {
|
||||||
files, err := ioutil.ReadDir(kvdb.path)
|
files, err := ioutil.ReadDir(k.cfg.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -449,12 +496,12 @@ func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.Ints(checkpoints)
|
sort.Ints(checkpoints)
|
||||||
if len(checkpoints) > 0 {
|
if !k.cfg.NoGapsCheck && len(checkpoints) > 0 {
|
||||||
first := checkpoints[0]
|
first := checkpoints[0]
|
||||||
for _, checkpoint := range checkpoints[1:] {
|
for _, checkpoint := range checkpoints[1:] {
|
||||||
first++
|
first++
|
||||||
if checkpoint != first {
|
if checkpoint != first {
|
||||||
log.Errorw("GAP", "checkpoints", checkpoints)
|
log.Errorw("gap between checkpoints", "checkpoints", checkpoints)
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint))
|
return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -464,14 +511,14 @@ func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
|||||||
|
|
||||||
// deleteOldCheckpoints deletes old checkpoints when there are more than
|
// deleteOldCheckpoints deletes old checkpoints when there are more than
|
||||||
// `s.keep` checkpoints
|
// `s.keep` checkpoints
|
||||||
func (kvdb *KVDB) deleteOldCheckpoints() error {
|
func (k *KVDB) deleteOldCheckpoints() error {
|
||||||
list, err := kvdb.ListCheckpoints()
|
list, err := k.ListCheckpoints()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(list) > kvdb.keep {
|
if k.cfg.Keep > 0 && len(list) > k.cfg.Keep {
|
||||||
for _, checkpoint := range list[:len(list)-kvdb.keep] {
|
for _, checkpoint := range list[:len(list)-k.cfg.Keep] {
|
||||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
|
if err := k.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -482,43 +529,40 @@ func (kvdb *KVDB) deleteOldCheckpoints() error {
|
|||||||
// MakeCheckpointFromTo makes a checkpoint from the current db at fromBatchNum
|
// MakeCheckpointFromTo makes a checkpoint from the current db at fromBatchNum
|
||||||
// to the dest folder. This method is locking, so it can be called from
|
// to the dest folder. This method is locking, so it can be called from
|
||||||
// multiple places at the same time.
|
// multiple places at the same time.
|
||||||
func (kvdb *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
|
func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
|
||||||
source := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
|
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
|
||||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||||
// if kvdb does not have checkpoint at batchNum, return err
|
// if kvdb does not have checkpoint at batchNum, return err
|
||||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// By locking we allow calling MakeCheckpointFromTo from multiple
|
// By locking we allow calling MakeCheckpointFromTo from multiple
|
||||||
// places at the same time for the same stateDB. This allows the
|
// places at the same time for the same stateDB. This allows the
|
||||||
// synchronizer to do a reset to a batchNum at the same time as the
|
// synchronizer to do a reset to a batchNum at the same time as the
|
||||||
// pipeline is doing a txSelector.Reset and batchBuilder.Reset from
|
// pipeline is doing a txSelector.Reset and batchBuilder.Reset from
|
||||||
// synchronizer to the same batchNum
|
// synchronizer to the same batchNum
|
||||||
kvdb.m.Lock()
|
k.m.Lock()
|
||||||
defer kvdb.m.Unlock()
|
defer k.m.Unlock()
|
||||||
return pebbleMakeCheckpoint(source, dest)
|
return pebbleMakeCheckpoint(source, dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func pebbleMakeCheckpoint(source, dest string) error {
|
func pebbleMakeCheckpoint(source, dest string) error {
|
||||||
// Remove dest folder (if it exists) before doing the checkpoint
|
// Remove dest folder (if it exists) before doing the checkpoint
|
||||||
if _, err := os.Stat(dest); !os.IsNotExist(err) {
|
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||||
err := os.RemoveAll(dest)
|
} else if err != nil {
|
||||||
if err != nil {
|
return tracerr.Wrap(err)
|
||||||
|
} else {
|
||||||
|
if err := os.RemoveAll(dest); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
} else if err != nil && !os.IsNotExist(err) {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sto, err := pebble.NewPebbleStorage(source, false)
|
sto, err := pebble.NewPebbleStorage(source, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer sto.Close()
|
||||||
errClose := sto.Pebble().Close()
|
|
||||||
if errClose != nil {
|
|
||||||
log.Errorw("Pebble.Close", "err", errClose)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// execute Checkpoint
|
// execute Checkpoint
|
||||||
err = sto.Pebble().Checkpoint(dest)
|
err = sto.Pebble().Checkpoint(dest)
|
||||||
@@ -530,7 +574,12 @@ func pebbleMakeCheckpoint(source, dest string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close the DB
|
// Close the DB
|
||||||
func (kvdb *KVDB) Close() {
|
func (k *KVDB) Close() {
|
||||||
kvdb.db.Close()
|
if k.db != nil {
|
||||||
kvdb.last.close()
|
k.db.Close()
|
||||||
|
k.db = nil
|
||||||
|
}
|
||||||
|
if k.last != nil {
|
||||||
|
k.last.close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
db, err := NewKVDB(dir, 128)
|
db, err := NewKVDB(Config{Path: dir, Keep: 128})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// add test key-values
|
// add test key-values
|
||||||
@@ -72,7 +72,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
err = db.Reset(3)
|
err = db.Reset(3)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
printCheckpoints(t, db.path)
|
printCheckpoints(t, db.cfg.Path)
|
||||||
|
|
||||||
// check that currentBatch is as expected after Reset
|
// check that currentBatch is as expected after Reset
|
||||||
cb, err = db.GetCurrentBatch()
|
cb, err = db.GetCurrentBatch()
|
||||||
@@ -99,7 +99,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
dirLocal, err := ioutil.TempDir("", "ldb")
|
dirLocal, err := ioutil.TempDir("", "ldb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dirLocal))
|
defer require.NoError(t, os.RemoveAll(dirLocal))
|
||||||
ldb, err := NewKVDB(dirLocal, 128)
|
ldb, err := NewKVDB(Config{Path: dirLocal, Keep: 128})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||||
@@ -120,7 +120,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
||||||
ldb2, err := NewKVDB(dirLocal2, 128)
|
ldb2, err := NewKVDB(Config{Path: dirLocal2, Keep: 128})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||||
@@ -139,9 +139,9 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
|
|
||||||
debug := false
|
debug := false
|
||||||
if debug {
|
if debug {
|
||||||
printCheckpoints(t, db.path)
|
printCheckpoints(t, db.cfg.Path)
|
||||||
printCheckpoints(t, ldb.path)
|
printCheckpoints(t, ldb.cfg.Path)
|
||||||
printCheckpoints(t, ldb2.path)
|
printCheckpoints(t, ldb2.cfg.Path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,7 +150,7 @@ func TestListCheckpoints(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
db, err := NewKVDB(dir, 128)
|
db, err := NewKVDB(Config{Path: dir, Keep: 128})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
numCheckpoints := 16
|
numCheckpoints := 16
|
||||||
@@ -181,7 +181,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
|||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
keep := 16
|
keep := 16
|
||||||
db, err := NewKVDB(dir, keep)
|
db, err := NewKVDB(Config{Path: dir, Keep: keep})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
numCheckpoints := 32
|
numCheckpoints := 32
|
||||||
@@ -202,7 +202,7 @@ func TestGetCurrentIdx(t *testing.T) {
|
|||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
keep := 16
|
keep := 16
|
||||||
db, err := NewKVDB(dir, keep)
|
db, err := NewKVDB(Config{Path: dir, Keep: keep})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
idx, err := db.GetCurrentIdx()
|
idx, err := db.GetCurrentIdx()
|
||||||
@@ -211,7 +211,7 @@ func TestGetCurrentIdx(t *testing.T) {
|
|||||||
|
|
||||||
db.Close()
|
db.Close()
|
||||||
|
|
||||||
db, err = NewKVDB(dir, keep)
|
db, err = NewKVDB(Config{Path: dir, Keep: keep})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
idx, err = db.GetCurrentIdx()
|
idx, err = db.GetCurrentIdx()
|
||||||
@@ -227,7 +227,7 @@ func TestGetCurrentIdx(t *testing.T) {
|
|||||||
|
|
||||||
db.Close()
|
db.Close()
|
||||||
|
|
||||||
db, err = NewKVDB(dir, keep)
|
db, err = NewKVDB(Config{Path: dir, Keep: keep})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
idx, err = db.GetCurrentIdx()
|
idx, err = db.GetCurrentIdx()
|
||||||
|
|||||||
@@ -1,12 +1,18 @@
|
|||||||
package l2db
|
package l2db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errPoolFull = fmt.Errorf("the pool is at full capacity. More transactions are not accepted currently")
|
||||||
|
)
|
||||||
|
|
||||||
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
|
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
|
||||||
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
|
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
|
||||||
cancel, err := l2db.apiConnCon.Acquire()
|
cancel, err := l2db.apiConnCon.Acquire()
|
||||||
@@ -28,7 +34,7 @@ func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCre
|
|||||||
defer l2db.apiConnCon.Release()
|
defer l2db.apiConnCon.Release()
|
||||||
auth := new(AccountCreationAuthAPI)
|
auth := new(AccountCreationAuthAPI)
|
||||||
return auth, tracerr.Wrap(meddler.QueryRow(
|
return auth, tracerr.Wrap(meddler.QueryRow(
|
||||||
l2db.db, auth,
|
l2db.dbRead, auth,
|
||||||
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
||||||
addr,
|
addr,
|
||||||
))
|
))
|
||||||
@@ -42,20 +48,54 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
|
|||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
defer l2db.apiConnCon.Release()
|
defer l2db.apiConnCon.Release()
|
||||||
row := l2db.db.QueryRow(
|
|
||||||
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
|
row := l2db.dbRead.QueryRow(`SELECT
|
||||||
common.PoolL2TxStatePending,
|
($1::NUMERIC * COALESCE(token.usd, 0) * fee_percentage($2::NUMERIC)) /
|
||||||
)
|
(10.0 ^ token.decimals::NUMERIC)
|
||||||
var totalTxs uint32
|
FROM token WHERE token.token_id = $3;`,
|
||||||
if err := row.Scan(&totalTxs); err != nil {
|
tx.AmountFloat, tx.Fee, tx.TokenID)
|
||||||
|
var feeUSD float64
|
||||||
|
if err := row.Scan(&feeUSD); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if totalTxs >= l2db.maxTxs {
|
if feeUSD < l2db.minFeeUSD {
|
||||||
return tracerr.New(
|
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)",
|
||||||
"The pool is at full capacity. More transactions are not accepted currently",
|
feeUSD, l2db.minFeeUSD))
|
||||||
)
|
|
||||||
}
|
}
|
||||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
|
|
||||||
|
// Prepare insert SQL query argument parameters
|
||||||
|
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
valuesPart, err := meddler.Default.PlaceholdersString(tx, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
values, err := meddler.Default.Values(tx, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
`INSERT INTO tx_pool (%s)
|
||||||
|
SELECT %s
|
||||||
|
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
|
||||||
|
namesPart, valuesPart,
|
||||||
|
len(values)+1, len(values)+2) //nolint:gomnd
|
||||||
|
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
|
||||||
|
res, err := l2db.dbWrite.Exec(q, values...)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
rowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if rowsAffected == 0 {
|
||||||
|
return tracerr.Wrap(errPoolFull)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
||||||
@@ -78,7 +118,7 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
|||||||
defer l2db.apiConnCon.Release()
|
defer l2db.apiConnCon.Release()
|
||||||
tx := new(PoolTxAPI)
|
tx := new(PoolTxAPI)
|
||||||
return tx, tracerr.Wrap(meddler.QueryRow(
|
return tx, tracerr.Wrap(meddler.QueryRow(
|
||||||
l2db.db, tx,
|
l2db.dbRead, tx,
|
||||||
selectPoolTxAPI+"WHERE tx_id = $1;",
|
selectPoolTxAPI+"WHERE tx_id = $1;",
|
||||||
txID,
|
txID,
|
||||||
))
|
))
|
||||||
|
|||||||
@@ -21,10 +21,12 @@ import (
|
|||||||
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
|
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
|
||||||
// due to them being forged or invalid after a safety period
|
// due to them being forged or invalid after a safety period
|
||||||
type L2DB struct {
|
type L2DB struct {
|
||||||
db *sqlx.DB
|
dbRead *sqlx.DB
|
||||||
|
dbWrite *sqlx.DB
|
||||||
safetyPeriod common.BatchNum
|
safetyPeriod common.BatchNum
|
||||||
ttl time.Duration
|
ttl time.Duration
|
||||||
maxTxs uint32 // limit of txs that are accepted in the pool
|
maxTxs uint32 // limit of txs that are accepted in the pool
|
||||||
|
minFeeUSD float64
|
||||||
apiConnCon *db.APIConnectionController
|
apiConnCon *db.APIConnectionController
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,17 +34,20 @@ type L2DB struct {
|
|||||||
// To create it, it's needed db connection, safety period expressed in batches,
|
// To create it, it's needed db connection, safety period expressed in batches,
|
||||||
// maxTxs that the DB should have and TTL (time to live) for pending txs.
|
// maxTxs that the DB should have and TTL (time to live) for pending txs.
|
||||||
func NewL2DB(
|
func NewL2DB(
|
||||||
db *sqlx.DB,
|
dbRead, dbWrite *sqlx.DB,
|
||||||
safetyPeriod common.BatchNum,
|
safetyPeriod common.BatchNum,
|
||||||
maxTxs uint32,
|
maxTxs uint32,
|
||||||
|
minFeeUSD float64,
|
||||||
TTL time.Duration,
|
TTL time.Duration,
|
||||||
apiConnCon *db.APIConnectionController,
|
apiConnCon *db.APIConnectionController,
|
||||||
) *L2DB {
|
) *L2DB {
|
||||||
return &L2DB{
|
return &L2DB{
|
||||||
db: db,
|
dbRead: dbRead,
|
||||||
|
dbWrite: dbWrite,
|
||||||
safetyPeriod: safetyPeriod,
|
safetyPeriod: safetyPeriod,
|
||||||
ttl: TTL,
|
ttl: TTL,
|
||||||
maxTxs: maxTxs,
|
maxTxs: maxTxs,
|
||||||
|
minFeeUSD: minFeeUSD,
|
||||||
apiConnCon: apiConnCon,
|
apiConnCon: apiConnCon,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -50,12 +55,18 @@ func NewL2DB(
|
|||||||
// DB returns a pointer to the L2DB.db. This method should be used only for
|
// DB returns a pointer to the L2DB.db. This method should be used only for
|
||||||
// internal testing purposes.
|
// internal testing purposes.
|
||||||
func (l2db *L2DB) DB() *sqlx.DB {
|
func (l2db *L2DB) DB() *sqlx.DB {
|
||||||
return l2db.db
|
return l2db.dbWrite
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinFeeUSD returns the minimum fee in USD that is required to accept txs into
|
||||||
|
// the pool
|
||||||
|
func (l2db *L2DB) MinFeeUSD() float64 {
|
||||||
|
return l2db.minFeeUSD
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddAccountCreationAuth inserts an account creation authorization into the DB
|
// AddAccountCreationAuth inserts an account creation authorization into the DB
|
||||||
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
|
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
|
||||||
_, err := l2db.db.Exec(
|
_, err := l2db.dbWrite.Exec(
|
||||||
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
|
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
|
||||||
VALUES ($1, $2, $3);`,
|
VALUES ($1, $2, $3);`,
|
||||||
auth.EthAddr, auth.BJJ, auth.Signature,
|
auth.EthAddr, auth.BJJ, auth.Signature,
|
||||||
@@ -67,30 +78,12 @@ func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error
|
|||||||
func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) {
|
func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) {
|
||||||
auth := new(common.AccountCreationAuth)
|
auth := new(common.AccountCreationAuth)
|
||||||
return auth, tracerr.Wrap(meddler.QueryRow(
|
return auth, tracerr.Wrap(meddler.QueryRow(
|
||||||
l2db.db, auth,
|
l2db.dbRead, auth,
|
||||||
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
||||||
addr,
|
addr,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTx inserts a tx to the pool
|
|
||||||
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
|
|
||||||
row := l2db.db.QueryRow(
|
|
||||||
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
|
|
||||||
common.PoolL2TxStatePending,
|
|
||||||
)
|
|
||||||
var totalTxs uint32
|
|
||||||
if err := row.Scan(&totalTxs); err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
if totalTxs >= l2db.maxTxs {
|
|
||||||
return tracerr.New(
|
|
||||||
"The pool is at full capacity. More transactions are not accepted currently",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateTxsInfo updates the parameter Info of the pool transactions
|
// UpdateTxsInfo updates the parameter Info of the pool transactions
|
||||||
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
|
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
|
||||||
if len(txs) == 0 {
|
if len(txs) == 0 {
|
||||||
@@ -114,7 +107,7 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
|
|||||||
WHERE tx_pool.tx_id = tx_update.id;
|
WHERE tx_pool.tx_id = tx_update.id;
|
||||||
`
|
`
|
||||||
if len(txUpdates) > 0 {
|
if len(txUpdates) > 0 {
|
||||||
if _, err := sqlx.NamedExec(l2db.db, query, txUpdates); err != nil {
|
if _, err := sqlx.NamedExec(l2db.dbWrite, query, txUpdates); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -122,9 +115,8 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
|
// NewPoolL2TxWriteFromPoolL2Tx creates a new PoolL2TxWrite from a PoolL2Tx
|
||||||
// but in production txs will only be inserted through the API
|
func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite {
|
||||||
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
|
||||||
// transform tx from *common.PoolL2Tx to PoolL2TxWrite
|
// transform tx from *common.PoolL2Tx to PoolL2TxWrite
|
||||||
insertTx := &PoolL2TxWrite{
|
insertTx := &PoolL2TxWrite{
|
||||||
TxID: tx.TxID,
|
TxID: tx.TxID,
|
||||||
@@ -166,8 +158,15 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
|||||||
f := new(big.Float).SetInt(tx.Amount)
|
f := new(big.Float).SetInt(tx.Amount)
|
||||||
amountF, _ := f.Float64()
|
amountF, _ := f.Float64()
|
||||||
insertTx.AmountFloat = amountF
|
insertTx.AmountFloat = amountF
|
||||||
|
return insertTx
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
|
||||||
|
// but in production txs will only be inserted through the API
|
||||||
|
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
||||||
|
insertTx := NewPoolL2TxWriteFromPoolL2Tx(tx)
|
||||||
// insert tx
|
// insert tx
|
||||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
|
return tracerr.Wrap(meddler.Insert(l2db.dbWrite, "tx_pool", insertTx))
|
||||||
}
|
}
|
||||||
|
|
||||||
// selectPoolTxCommon select part of queries to get common.PoolL2Tx
|
// selectPoolTxCommon select part of queries to get common.PoolL2Tx
|
||||||
@@ -176,14 +175,15 @@ tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
|||||||
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
|
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
|
||||||
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
||||||
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
||||||
fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f AS fee_usd, token.usd_update
|
(fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f) /
|
||||||
|
(10.0 ^ token.decimals::NUMERIC) AS fee_usd, token.usd_update
|
||||||
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
||||||
|
|
||||||
// GetTx return the specified Tx in common.PoolL2Tx format
|
// GetTx return the specified Tx in common.PoolL2Tx format
|
||||||
func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
|
func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
|
||||||
tx := new(common.PoolL2Tx)
|
tx := new(common.PoolL2Tx)
|
||||||
return tx, tracerr.Wrap(meddler.QueryRow(
|
return tx, tracerr.Wrap(meddler.QueryRow(
|
||||||
l2db.db, tx,
|
l2db.dbRead, tx,
|
||||||
selectPoolTxCommon+"WHERE tx_id = $1;",
|
selectPoolTxCommon+"WHERE tx_id = $1;",
|
||||||
txID,
|
txID,
|
||||||
))
|
))
|
||||||
@@ -193,7 +193,7 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
|
|||||||
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
|
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
|
||||||
var txs []*common.PoolL2Tx
|
var txs []*common.PoolL2Tx
|
||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
l2db.db, &txs,
|
l2db.dbRead, &txs,
|
||||||
selectPoolTxCommon+"WHERE state = $1",
|
selectPoolTxCommon+"WHERE state = $1",
|
||||||
common.PoolL2TxStatePending,
|
common.PoolL2TxStatePending,
|
||||||
)
|
)
|
||||||
@@ -218,8 +218,8 @@ func (l2db *L2DB) StartForging(txIDs []common.TxID, batchNum common.BatchNum) er
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = l2db.db.Rebind(query)
|
query = l2db.dbWrite.Rebind(query)
|
||||||
_, err = l2db.db.Exec(query, args...)
|
_, err = l2db.dbWrite.Exec(query, args...)
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,8 +241,8 @@ func (l2db *L2DB) DoneForging(txIDs []common.TxID, batchNum common.BatchNum) err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = l2db.db.Rebind(query)
|
query = l2db.dbWrite.Rebind(query)
|
||||||
_, err = l2db.db.Exec(query, args...)
|
_, err = l2db.dbWrite.Exec(query, args...)
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,8 +263,8 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
query = l2db.db.Rebind(query)
|
query = l2db.dbWrite.Rebind(query)
|
||||||
_, err = l2db.db.Exec(query, args...)
|
_, err = l2db.dbWrite.Exec(query, args...)
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,7 +272,7 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
|
|||||||
// of unique FromIdx
|
// of unique FromIdx
|
||||||
func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) {
|
func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) {
|
||||||
var idxs []common.Idx
|
var idxs []common.Idx
|
||||||
rows, err := l2db.db.Query(`SELECT DISTINCT from_idx FROM tx_pool
|
rows, err := l2db.dbRead.Query(`SELECT DISTINCT from_idx FROM tx_pool
|
||||||
WHERE state = $1;`, common.PoolL2TxStatePending)
|
WHERE state = $1;`, common.PoolL2TxStatePending)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -313,7 +313,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
|
|||||||
// named query which works with slices, and doens't handle an extra
|
// named query which works with slices, and doens't handle an extra
|
||||||
// individual argument.
|
// individual argument.
|
||||||
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
|
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
|
||||||
if _, err := sqlx.NamedExec(l2db.db, query, updatedAccounts); err != nil {
|
if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -322,10 +322,11 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
|
|||||||
// Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg.
|
// Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg.
|
||||||
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
|
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
|
||||||
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
|
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
|
||||||
_, err := l2db.db.Exec(
|
_, err := l2db.dbWrite.Exec(
|
||||||
`UPDATE tx_pool SET batch_num = NULL, state = $1
|
`UPDATE tx_pool SET batch_num = NULL, state = $1
|
||||||
WHERE (state = $2 OR state = $3) AND batch_num > $4`,
|
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
|
||||||
common.PoolL2TxStatePending,
|
common.PoolL2TxStatePending,
|
||||||
|
common.PoolL2TxStateForging,
|
||||||
common.PoolL2TxStateForged,
|
common.PoolL2TxStateForged,
|
||||||
common.PoolL2TxStateInvalid,
|
common.PoolL2TxStateInvalid,
|
||||||
lastValidBatch,
|
lastValidBatch,
|
||||||
@@ -337,7 +338,7 @@ func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
|
|||||||
// it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded
|
// it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded
|
||||||
func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
|
func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
|
||||||
now := time.Now().UTC().Unix()
|
now := time.Now().UTC().Unix()
|
||||||
_, err = l2db.db.Exec(
|
_, err = l2db.dbWrite.Exec(
|
||||||
`DELETE FROM tx_pool WHERE (
|
`DELETE FROM tx_pool WHERE (
|
||||||
batch_num < $1 AND (state = $2 OR state = $3)
|
batch_num < $1 AND (state = $2 OR state = $3)
|
||||||
) OR (
|
) OR (
|
||||||
@@ -353,3 +354,14 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
|
|||||||
)
|
)
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PurgeByExternalDelete deletes all pending transactions marked with true in
|
||||||
|
// the `external_delete` column. An external process can set this column to
|
||||||
|
// true to instruct the coordinator to delete the tx when possible.
|
||||||
|
func (l2db *L2DB) PurgeByExternalDelete() error {
|
||||||
|
_, err := l2db.dbWrite.Exec(
|
||||||
|
`DELETE from tx_pool WHERE (external_delete = true AND state = $1);`,
|
||||||
|
common.PoolL2TxStatePending,
|
||||||
|
)
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package l2db
|
package l2db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"database/sql"
|
||||||
"math/big"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -20,12 +20,14 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var decimals = uint64(3)
|
||||||
|
var tokenValue = 1.0 // The price update gives a value of 1.0 USD to the token
|
||||||
var l2DB *L2DB
|
var l2DB *L2DB
|
||||||
var l2DBWithACC *L2DB
|
var l2DBWithACC *L2DB
|
||||||
var historyDB *historydb.HistoryDB
|
var historyDB *historydb.HistoryDB
|
||||||
var tc *til.Context
|
var tc *til.Context
|
||||||
var tokens map[common.TokenID]historydb.TokenWithUSD
|
var tokens map[common.TokenID]historydb.TokenWithUSD
|
||||||
var tokensValue map[common.TokenID]float64
|
|
||||||
var accs map[common.Idx]common.Account
|
var accs map[common.Idx]common.Account
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
@@ -35,11 +37,11 @@ func TestMain(m *testing.M) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
|
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
|
||||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||||
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
|
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
||||||
test.WipeDB(l2DB.DB())
|
test.WipeDB(l2DB.DB())
|
||||||
historyDB = historydb.NewHistoryDB(db, nil)
|
historyDB = historydb.NewHistoryDB(db, db, nil)
|
||||||
// Run tests
|
// Run tests
|
||||||
result := m.Run()
|
result := m.Run()
|
||||||
// Close DB
|
// Close DB
|
||||||
@@ -58,10 +60,10 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
|||||||
|
|
||||||
AddToken(1)
|
AddToken(1)
|
||||||
AddToken(2)
|
AddToken(2)
|
||||||
CreateAccountDeposit(1) A: 2000
|
CreateAccountDeposit(1) A: 20000
|
||||||
CreateAccountDeposit(2) A: 2000
|
CreateAccountDeposit(2) A: 20000
|
||||||
CreateAccountDeposit(1) B: 1000
|
CreateAccountDeposit(1) B: 10000
|
||||||
CreateAccountDeposit(2) B: 1000
|
CreateAccountDeposit(2) B: 10000
|
||||||
> batchL1
|
> batchL1
|
||||||
> batchL1
|
> batchL1
|
||||||
> block
|
> block
|
||||||
@@ -82,15 +84,23 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
for i := range blocks {
|
||||||
|
block := &blocks[i]
|
||||||
|
for j := range block.Rollup.AddedTokens {
|
||||||
|
token := &block.Rollup.AddedTokens[j]
|
||||||
|
token.Name = fmt.Sprintf("Token %d", token.TokenID)
|
||||||
|
token.Symbol = fmt.Sprintf("TK%d", token.TokenID)
|
||||||
|
token.Decimals = decimals
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tokens = make(map[common.TokenID]historydb.TokenWithUSD)
|
tokens = make(map[common.TokenID]historydb.TokenWithUSD)
|
||||||
tokensValue = make(map[common.TokenID]float64)
|
// tokensValue = make(map[common.TokenID]float64)
|
||||||
accs = make(map[common.Idx]common.Account)
|
accs = make(map[common.Idx]common.Account)
|
||||||
value := 5 * 5.389329
|
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
// Add all blocks except for the last one
|
// Add all blocks except for the last one
|
||||||
for i := range blocks[:len(blocks)-1] {
|
for i := range blocks[:len(blocks)-1] {
|
||||||
err = historyDB.AddBlockSCData(&blocks[i])
|
if err := historyDB.AddBlockSCData(&blocks[i]); err != nil {
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
for _, batch := range blocks[i].Rollup.Batches {
|
for _, batch := range blocks[i].Rollup.Batches {
|
||||||
@@ -106,39 +116,38 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
|||||||
Name: token.Name,
|
Name: token.Name,
|
||||||
Symbol: token.Symbol,
|
Symbol: token.Symbol,
|
||||||
Decimals: token.Decimals,
|
Decimals: token.Decimals,
|
||||||
|
USD: &tokenValue,
|
||||||
|
USDUpdate: &now,
|
||||||
}
|
}
|
||||||
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
|
|
||||||
readToken.USDUpdate = &now
|
|
||||||
readToken.USD = &value
|
|
||||||
tokens[token.TokenID] = readToken
|
tokens[token.TokenID] = readToken
|
||||||
}
|
// Set value to the tokens
|
||||||
// Set value to the tokens (tokens have no symbol)
|
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD)
|
||||||
tokenSymbol := ""
|
|
||||||
err := historyDB.UpdateTokenValue(tokenSymbol, value)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
|
func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
|
||||||
|
// Fee = 126 corresponds to ~10%
|
||||||
setPool := `
|
setPool := `
|
||||||
Type: PoolL2
|
Type: PoolL2
|
||||||
PoolTransfer(1) A-B: 6 (4)
|
PoolTransfer(1) A-B: 6000 (126)
|
||||||
PoolTransfer(2) A-B: 3 (1)
|
PoolTransfer(2) A-B: 3000 (126)
|
||||||
PoolTransfer(1) B-A: 5 (2)
|
PoolTransfer(1) B-A: 5000 (126)
|
||||||
PoolTransfer(2) B-A: 10 (3)
|
PoolTransfer(2) B-A: 10000 (126)
|
||||||
PoolTransfer(1) A-B: 7 (2)
|
PoolTransfer(1) A-B: 7000 (126)
|
||||||
PoolTransfer(2) A-B: 2 (1)
|
PoolTransfer(2) A-B: 2000 (126)
|
||||||
PoolTransfer(1) B-A: 8 (2)
|
PoolTransfer(1) B-A: 8000 (126)
|
||||||
PoolTransfer(2) B-A: 1 (1)
|
PoolTransfer(2) B-A: 1000 (126)
|
||||||
PoolTransfer(1) A-B: 3 (1)
|
PoolTransfer(1) A-B: 3000 (126)
|
||||||
PoolTransferToEthAddr(2) B-A: 5 (2)
|
PoolTransferToEthAddr(2) B-A: 5000 (126)
|
||||||
PoolTransferToBJJ(2) B-A: 5 (2)
|
PoolTransferToBJJ(2) B-A: 5000 (126)
|
||||||
|
|
||||||
PoolExit(1) A: 5 (2)
|
PoolExit(1) A: 5000 (126)
|
||||||
PoolExit(2) B: 3 (1)
|
PoolExit(2) B: 3000 (126)
|
||||||
`
|
`
|
||||||
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
|
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -153,25 +162,74 @@ func TestAddTxTest(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertTx(t, &poolL2Txs[i], fetchedTx)
|
assertTx(t, &poolL2Txs[i], fetchedTx)
|
||||||
nameZone, offset := fetchedTx.Timestamp.Zone()
|
nameZone, offset := fetchedTx.Timestamp.Zone()
|
||||||
assert.Equal(t, "UTC", nameZone)
|
assert.Equal(t, "UTC", nameZone)
|
||||||
assert.Equal(t, 0, offset)
|
assert.Equal(t, 0, offset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddTxAPI(t *testing.T) {
|
||||||
|
err := prepareHistoryDB(historyDB)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error prepare historyDB", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
oldMaxTxs := l2DBWithACC.maxTxs
|
||||||
|
// set max number of pending txs that can be kept in the pool to 5
|
||||||
|
l2DBWithACC.maxTxs = 5
|
||||||
|
|
||||||
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
|
txs := make([]*PoolL2TxWrite, len(poolL2Txs))
|
||||||
|
for i := range poolL2Txs {
|
||||||
|
txs[i] = NewPoolL2TxWriteFromPoolL2Tx(&poolL2Txs[i])
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.GreaterOrEqual(t, len(poolL2Txs), 8)
|
||||||
|
for i := range txs[:5] {
|
||||||
|
err := l2DBWithACC.AddTxAPI(txs[i])
|
||||||
|
require.NoError(t, err)
|
||||||
|
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assertTx(t, &poolL2Txs[i], fetchedTx)
|
||||||
|
nameZone, offset := fetchedTx.Timestamp.Zone()
|
||||||
|
assert.Equal(t, "UTC", nameZone)
|
||||||
|
assert.Equal(t, 0, offset)
|
||||||
|
}
|
||||||
|
err = l2DBWithACC.AddTxAPI(txs[5])
|
||||||
|
assert.Equal(t, errPoolFull, tracerr.Unwrap(err))
|
||||||
|
// reset maxTxs to original value
|
||||||
|
l2DBWithACC.maxTxs = oldMaxTxs
|
||||||
|
|
||||||
|
// set minFeeUSD to a high value than the tx feeUSD to test the error
|
||||||
|
// of inserting a tx with lower than min fee
|
||||||
|
oldMinFeeUSD := l2DBWithACC.minFeeUSD
|
||||||
|
tx := txs[5]
|
||||||
|
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
|
||||||
|
require.NoError(t, err)
|
||||||
|
feeAmountUSD := common.TokensToUSD(feeAmount, decimals, tokenValue)
|
||||||
|
// set minFeeUSD higher than the tx fee to trigger the error
|
||||||
|
l2DBWithACC.minFeeUSD = feeAmountUSD + 1
|
||||||
|
err = l2DBWithACC.AddTxAPI(tx)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Regexp(t, "tx.feeUSD (.*) < minFeeUSD (.*)", err.Error())
|
||||||
|
// reset minFeeUSD to original value
|
||||||
|
l2DBWithACC.minFeeUSD = oldMinFeeUSD
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpdateTxsInfo(t *testing.T) {
|
func TestUpdateTxsInfo(t *testing.T) {
|
||||||
err := prepareHistoryDB(historyDB)
|
err := prepareHistoryDB(historyDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -185,7 +243,7 @@ func TestUpdateTxsInfo(t *testing.T) {
|
|||||||
|
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "test", fetchedTx.Info)
|
assert.Equal(t, "test", fetchedTx.Info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -203,9 +261,8 @@ func assertTx(t *testing.T, expected, actual *common.PoolL2Tx) {
|
|||||||
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
|
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
|
||||||
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
|
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
|
||||||
// Set expected fee
|
// Set expected fee
|
||||||
f := new(big.Float).SetInt(expected.Amount)
|
amountUSD := common.TokensToUSD(expected.Amount, token.Decimals, *token.USD)
|
||||||
amountF, _ := f.Float64()
|
expected.AbsoluteFee = amountUSD * expected.Fee.Percentage()
|
||||||
expected.AbsoluteFee = *token.USD * amountF * expected.Fee.Percentage()
|
|
||||||
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
|
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
|
||||||
}
|
}
|
||||||
assert.Equal(t, expected, actual)
|
assert.Equal(t, expected, actual)
|
||||||
@@ -230,19 +287,28 @@ func TestGetPending(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var pendingTxs []*common.PoolL2Tx
|
var pendingTxs []*common.PoolL2Tx
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pendingTxs = append(pendingTxs, &poolL2Txs[i])
|
pendingTxs = append(pendingTxs, &poolL2Txs[i])
|
||||||
}
|
}
|
||||||
fetchedTxs, err := l2DB.GetPendingTxs()
|
fetchedTxs, err := l2DB.GetPendingTxs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
|
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
|
||||||
for i := range fetchedTxs {
|
for i := range fetchedTxs {
|
||||||
assertTx(t, pendingTxs[i], &fetchedTxs[i])
|
assertTx(t, pendingTxs[i], &fetchedTxs[i])
|
||||||
}
|
}
|
||||||
|
// Check AbsoluteFee amount
|
||||||
|
for i := range fetchedTxs {
|
||||||
|
tx := &fetchedTxs[i]
|
||||||
|
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
|
||||||
|
require.NoError(t, err)
|
||||||
|
feeAmountUSD := common.TokensToUSD(feeAmount,
|
||||||
|
tokens[tx.TokenID].Decimals, *tokens[tx.TokenID].USD)
|
||||||
|
assert.InEpsilon(t, feeAmountUSD, tx.AbsoluteFee, 0.01)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStartForging(t *testing.T) {
|
func TestStartForging(t *testing.T) {
|
||||||
@@ -253,13 +319,13 @@ func TestStartForging(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var startForgingTxIDs []common.TxID
|
var startForgingTxIDs []common.TxID
|
||||||
randomizer := 0
|
randomizer := 0
|
||||||
// Add txs to DB
|
// Add txs to DB
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||||
}
|
}
|
||||||
@@ -267,11 +333,11 @@ func TestStartForging(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start forging txs
|
// Start forging txs
|
||||||
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
|
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Fetch txs and check that they've been updated correctly
|
// Fetch txs and check that they've been updated correctly
|
||||||
for _, id := range startForgingTxIDs {
|
for _, id := range startForgingTxIDs {
|
||||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
|
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
|
||||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||||
}
|
}
|
||||||
@@ -285,13 +351,13 @@ func TestDoneForging(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var startForgingTxIDs []common.TxID
|
var startForgingTxIDs []common.TxID
|
||||||
randomizer := 0
|
randomizer := 0
|
||||||
// Add txs to DB
|
// Add txs to DB
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||||
}
|
}
|
||||||
@@ -299,7 +365,7 @@ func TestDoneForging(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start forging txs
|
// Start forging txs
|
||||||
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
|
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var doneForgingTxIDs []common.TxID
|
var doneForgingTxIDs []common.TxID
|
||||||
randomizer = 0
|
randomizer = 0
|
||||||
@@ -311,12 +377,12 @@ func TestDoneForging(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Done forging txs
|
// Done forging txs
|
||||||
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
|
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Fetch txs and check that they've been updated correctly
|
// Fetch txs and check that they've been updated correctly
|
||||||
for _, id := range doneForgingTxIDs {
|
for _, id := range doneForgingTxIDs {
|
||||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
|
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
|
||||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||||
}
|
}
|
||||||
@@ -330,13 +396,13 @@ func TestInvalidate(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var invalidTxIDs []common.TxID
|
var invalidTxIDs []common.TxID
|
||||||
randomizer := 0
|
randomizer := 0
|
||||||
// Add txs to DB
|
// Add txs to DB
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
|
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
|
||||||
randomizer++
|
randomizer++
|
||||||
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
|
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
|
||||||
@@ -344,11 +410,11 @@ func TestInvalidate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Invalidate txs
|
// Invalidate txs
|
||||||
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
|
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Fetch txs and check that they've been updated correctly
|
// Fetch txs and check that they've been updated correctly
|
||||||
for _, id := range invalidTxIDs {
|
for _, id := range invalidTxIDs {
|
||||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
||||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||||
}
|
}
|
||||||
@@ -362,7 +428,7 @@ func TestInvalidateOldNonces(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Update Accounts currentNonce
|
// Update Accounts currentNonce
|
||||||
var updateAccounts []common.IdxNonce
|
var updateAccounts []common.IdxNonce
|
||||||
var currentNonce = common.Nonce(1)
|
var currentNonce = common.Nonce(1)
|
||||||
@@ -379,13 +445,13 @@ func TestInvalidateOldNonces(t *testing.T) {
|
|||||||
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
|
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
|
||||||
}
|
}
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
// sanity check
|
// sanity check
|
||||||
require.Greater(t, len(invalidTxIDs), 0)
|
require.Greater(t, len(invalidTxIDs), 0)
|
||||||
|
|
||||||
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
|
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Fetch txs and check that they've been updated correctly
|
// Fetch txs and check that they've been updated correctly
|
||||||
for _, id := range invalidTxIDs {
|
for _, id := range invalidTxIDs {
|
||||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
@@ -407,7 +473,7 @@ func TestReorg(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
reorgedTxIDs := []common.TxID{}
|
reorgedTxIDs := []common.TxID{}
|
||||||
nonReorgedTxIDs := []common.TxID{}
|
nonReorgedTxIDs := []common.TxID{}
|
||||||
@@ -418,7 +484,7 @@ func TestReorg(t *testing.T) {
|
|||||||
// Add txs to DB
|
// Add txs to DB
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||||
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
|
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
|
||||||
@@ -430,7 +496,7 @@ func TestReorg(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start forging txs
|
// Start forging txs
|
||||||
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
|
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var doneForgingTxIDs []common.TxID
|
var doneForgingTxIDs []common.TxID
|
||||||
randomizer = 0
|
randomizer = 0
|
||||||
@@ -455,22 +521,22 @@ func TestReorg(t *testing.T) {
|
|||||||
|
|
||||||
// Invalidate txs BEFORE reorgBatch --> nonReorg
|
// Invalidate txs BEFORE reorgBatch --> nonReorg
|
||||||
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
|
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Done forging txs in reorgBatch --> Reorg
|
// Done forging txs in reorgBatch --> Reorg
|
||||||
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
|
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = l2DB.Reorg(lastValidBatch)
|
err = l2DB.Reorg(lastValidBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, id := range reorgedTxIDs {
|
for _, id := range reorgedTxIDs {
|
||||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, tx.BatchNum)
|
assert.Nil(t, tx.BatchNum)
|
||||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||||
}
|
}
|
||||||
for _, id := range nonReorgedTxIDs {
|
for _, id := range nonReorgedTxIDs {
|
||||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -487,7 +553,7 @@ func TestReorg2(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
reorgedTxIDs := []common.TxID{}
|
reorgedTxIDs := []common.TxID{}
|
||||||
nonReorgedTxIDs := []common.TxID{}
|
nonReorgedTxIDs := []common.TxID{}
|
||||||
@@ -498,7 +564,7 @@ func TestReorg2(t *testing.T) {
|
|||||||
// Add txs to DB
|
// Add txs to DB
|
||||||
for i := range poolL2Txs {
|
for i := range poolL2Txs {
|
||||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||||
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
|
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
|
||||||
@@ -510,7 +576,7 @@ func TestReorg2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start forging txs
|
// Start forging txs
|
||||||
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
|
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var doneForgingTxIDs []common.TxID
|
var doneForgingTxIDs []common.TxID
|
||||||
randomizer = 0
|
randomizer = 0
|
||||||
@@ -532,22 +598,22 @@ func TestReorg2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Done forging txs BEFORE reorgBatch --> nonReorg
|
// Done forging txs BEFORE reorgBatch --> nonReorg
|
||||||
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
|
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Invalidate txs in reorgBatch --> Reorg
|
// Invalidate txs in reorgBatch --> Reorg
|
||||||
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
|
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = l2DB.Reorg(lastValidBatch)
|
err = l2DB.Reorg(lastValidBatch)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, id := range reorgedTxIDs {
|
for _, id := range reorgedTxIDs {
|
||||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, tx.BatchNum)
|
assert.Nil(t, tx.BatchNum)
|
||||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||||
}
|
}
|
||||||
for _, id := range nonReorgedTxIDs {
|
for _, id := range nonReorgedTxIDs {
|
||||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -563,7 +629,7 @@ func TestPurge(t *testing.T) {
|
|||||||
var poolL2Tx []common.PoolL2Tx
|
var poolL2Tx []common.PoolL2Tx
|
||||||
for i := 0; i < generateTx; i++ {
|
for i := 0; i < generateTx; i++ {
|
||||||
poolL2TxAux, err := generatePoolL2Txs()
|
poolL2TxAux, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
poolL2Tx = append(poolL2Tx, poolL2TxAux...)
|
poolL2Tx = append(poolL2Tx, poolL2TxAux...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -590,39 +656,39 @@ func TestPurge(t *testing.T) {
|
|||||||
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
|
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
|
||||||
}
|
}
|
||||||
err := l2DB.AddTxTest(&tx)
|
err := l2DB.AddTxTest(&tx)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
// Set batchNum keeped txs
|
// Set batchNum keeped txs
|
||||||
for i := range keepedIDs {
|
for i := range keepedIDs {
|
||||||
_, err = l2DB.db.Exec(
|
_, err = l2DB.dbWrite.Exec(
|
||||||
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
|
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
|
||||||
safeBatchNum, keepedIDs[i],
|
safeBatchNum, keepedIDs[i],
|
||||||
)
|
)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
// Start forging txs and set batchNum
|
// Start forging txs and set batchNum
|
||||||
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
|
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Done forging txs and set batchNum
|
// Done forging txs and set batchNum
|
||||||
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
|
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Invalidate txs and set batchNum
|
// Invalidate txs and set batchNum
|
||||||
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
|
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Update timestamp of afterTTL txs
|
// Update timestamp of afterTTL txs
|
||||||
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
|
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
|
||||||
for _, id := range afterTTLIDs {
|
for _, id := range afterTTLIDs {
|
||||||
// Set timestamp
|
// Set timestamp
|
||||||
_, err = l2DB.db.Exec(
|
_, err = l2DB.dbWrite.Exec(
|
||||||
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
|
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
|
||||||
deleteTimestamp, common.PoolL2TxStatePending, id,
|
deleteTimestamp, common.PoolL2TxStatePending, id,
|
||||||
)
|
)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge txs
|
// Purge txs
|
||||||
err = l2DB.Purge(safeBatchNum)
|
err = l2DB.Purge(safeBatchNum)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Check results
|
// Check results
|
||||||
for _, id := range deletedIDs {
|
for _, id := range deletedIDs {
|
||||||
_, err := l2DB.GetTx(id)
|
_, err := l2DB.GetTx(id)
|
||||||
@@ -630,7 +696,7 @@ func TestPurge(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, id := range keepedIDs {
|
for _, id := range keepedIDs {
|
||||||
_, err := l2DB.GetTx(id)
|
_, err := l2DB.GetTx(id)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -644,10 +710,10 @@ func TestAuth(t *testing.T) {
|
|||||||
for i := 0; i < len(auths); i++ {
|
for i := 0; i < len(auths); i++ {
|
||||||
// Add to the DB
|
// Add to the DB
|
||||||
err := l2DB.AddAccountCreationAuth(auths[i])
|
err := l2DB.AddAccountCreationAuth(auths[i])
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Fetch from DB
|
// Fetch from DB
|
||||||
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
|
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Check fetched vs generated
|
// Check fetched vs generated
|
||||||
assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
|
assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
|
||||||
assert.Equal(t, auths[i].BJJ, auth.BJJ)
|
assert.Equal(t, auths[i].BJJ, auth.BJJ)
|
||||||
@@ -665,7 +731,7 @@ func TestAddGet(t *testing.T) {
|
|||||||
log.Error("Error prepare historyDB", err)
|
log.Error("Error prepare historyDB", err)
|
||||||
}
|
}
|
||||||
poolL2Txs, err := generatePoolL2Txs()
|
poolL2Txs, err := generatePoolL2Txs()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// We will work with only 3 txs
|
// We will work with only 3 txs
|
||||||
require.GreaterOrEqual(t, len(poolL2Txs), 3)
|
require.GreaterOrEqual(t, len(poolL2Txs), 3)
|
||||||
@@ -701,3 +767,56 @@ func TestAddGet(t *testing.T) {
|
|||||||
assert.Equal(t, txs[i], *dbTx)
|
assert.Equal(t, txs[i], *dbTx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPurgeByExternalDelete(t *testing.T) {
|
||||||
|
err := prepareHistoryDB(historyDB)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error prepare historyDB", err)
|
||||||
|
}
|
||||||
|
txs, err := generatePoolL2Txs()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// We will work with 8 txs
|
||||||
|
require.GreaterOrEqual(t, len(txs), 8)
|
||||||
|
txs = txs[:8]
|
||||||
|
for i := range txs {
|
||||||
|
require.NoError(t, l2DB.AddTxTest(&txs[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// We will recreate this scenario:
|
||||||
|
// tx index, status , external_delete
|
||||||
|
// 0 , pending, false
|
||||||
|
// 1 , pending, false
|
||||||
|
// 2 , pending, true // will be deleted
|
||||||
|
// 3 , pending, true // will be deleted
|
||||||
|
// 4 , fging , false
|
||||||
|
// 5 , fging , false
|
||||||
|
// 6 , fging , true
|
||||||
|
// 7 , fging , true
|
||||||
|
|
||||||
|
require.NoError(t, l2DB.StartForging(
|
||||||
|
[]common.TxID{txs[4].TxID, txs[5].TxID, txs[6].TxID, txs[7].TxID},
|
||||||
|
1))
|
||||||
|
_, err = l2DB.dbWrite.Exec(
|
||||||
|
`UPDATE tx_pool SET external_delete = true WHERE
|
||||||
|
tx_id IN ($1, $2, $3, $4)
|
||||||
|
;`,
|
||||||
|
txs[2].TxID, txs[3].TxID, txs[6].TxID, txs[7].TxID,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, l2DB.PurgeByExternalDelete())
|
||||||
|
|
||||||
|
// Query txs that are have been not deleted
|
||||||
|
for _, i := range []int{0, 1, 4, 5, 6, 7} {
|
||||||
|
txID := txs[i].TxID
|
||||||
|
_, err := l2DB.GetTx(txID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query txs that have been deleted
|
||||||
|
for _, i := range []int{2, 3} {
|
||||||
|
txID := txs[i].TxID
|
||||||
|
_, err := l2DB.GetTx(txID)
|
||||||
|
require.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ type PoolL2TxWrite struct {
|
|||||||
RqFee *common.FeeSelector `meddler:"rq_fee"`
|
RqFee *common.FeeSelector `meddler:"rq_fee"`
|
||||||
RqNonce *common.Nonce `meddler:"rq_nonce"`
|
RqNonce *common.Nonce `meddler:"rq_nonce"`
|
||||||
Type common.TxType `meddler:"tx_type"`
|
Type common.TxType `meddler:"tx_type"`
|
||||||
|
ClientIP string `meddler:"client_ip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API
|
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API
|
||||||
@@ -94,7 +95,6 @@ func (tx PoolTxAPI) MarshalJSON() ([]byte, error) {
|
|||||||
"info": tx.Info,
|
"info": tx.Info,
|
||||||
"signature": tx.Signature,
|
"signature": tx.Signature,
|
||||||
"timestamp": tx.Timestamp,
|
"timestamp": tx.Timestamp,
|
||||||
"batchNum": tx.BatchNum,
|
|
||||||
"requestFromAccountIndex": tx.RqFromIdx,
|
"requestFromAccountIndex": tx.RqFromIdx,
|
||||||
"requestToAccountIndex": tx.RqToIdx,
|
"requestToAccountIndex": tx.RqToIdx,
|
||||||
"requestToHezEthereumAddress": tx.RqToEthAddr,
|
"requestToHezEthereumAddress": tx.RqToEthAddr,
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ CREATE TABLE token (
|
|||||||
name VARCHAR(20) NOT NULL,
|
name VARCHAR(20) NOT NULL,
|
||||||
symbol VARCHAR(10) NOT NULL,
|
symbol VARCHAR(10) NOT NULL,
|
||||||
decimals INT NOT NULL,
|
decimals INT NOT NULL,
|
||||||
usd NUMERIC,
|
usd NUMERIC, -- value of a normalized token (1 token = 10^decimals units)
|
||||||
usd_update TIMESTAMP WITHOUT TIME ZONE
|
usd_update TIMESTAMP WITHOUT TIME ZONE
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -100,6 +100,15 @@ CREATE TABLE account (
|
|||||||
eth_addr BYTEA NOT NULL
|
eth_addr BYTEA NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
|
CREATE TABLE account_update (
|
||||||
|
item_id SERIAL,
|
||||||
|
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||||
|
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||||
|
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
|
||||||
|
nonce BIGINT NOT NULL,
|
||||||
|
balance BYTEA NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
CREATE TABLE exit_tree (
|
CREATE TABLE exit_tree (
|
||||||
item_id SERIAL PRIMARY KEY,
|
item_id SERIAL PRIMARY KEY,
|
||||||
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
|
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||||
@@ -618,7 +627,9 @@ CREATE TABLE tx_pool (
|
|||||||
rq_amount BYTEA,
|
rq_amount BYTEA,
|
||||||
rq_fee SMALLINT,
|
rq_fee SMALLINT,
|
||||||
rq_nonce BIGINT,
|
rq_nonce BIGINT,
|
||||||
tx_type VARCHAR(40) NOT NULL
|
tx_type VARCHAR(40) NOT NULL,
|
||||||
|
client_ip VARCHAR,
|
||||||
|
external_delete BOOLEAN NOT NULL DEFAULT false
|
||||||
);
|
);
|
||||||
|
|
||||||
-- +migrate StatementBegin
|
-- +migrate StatementBegin
|
||||||
@@ -650,35 +661,47 @@ CREATE TABLE account_creation_auth (
|
|||||||
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
|
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
|
||||||
);
|
);
|
||||||
|
|
||||||
|
CREATE TABLE node_info (
|
||||||
|
item_id SERIAL PRIMARY KEY,
|
||||||
|
state BYTEA, -- object returned by GET /state
|
||||||
|
config BYTEA, -- Node config
|
||||||
|
-- max_pool_txs BIGINT, -- L2DB config
|
||||||
|
-- min_fee NUMERIC, -- L2DB config
|
||||||
|
constants BYTEA -- info of the network that is constant
|
||||||
|
);
|
||||||
|
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
|
||||||
|
|
||||||
-- +migrate Down
|
-- +migrate Down
|
||||||
-- drop triggers
|
-- triggers
|
||||||
DROP TRIGGER trigger_token_usd_update ON token;
|
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
|
||||||
DROP TRIGGER trigger_set_tx ON tx;
|
DROP TRIGGER IF EXISTS trigger_set_tx ON tx;
|
||||||
DROP TRIGGER trigger_forge_l1_txs ON batch;
|
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch;
|
||||||
DROP TRIGGER trigger_set_pool_tx ON tx_pool;
|
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool;
|
||||||
-- drop functions
|
-- functions
|
||||||
DROP FUNCTION hez_idx;
|
DROP FUNCTION IF EXISTS hez_idx;
|
||||||
DROP FUNCTION set_token_usd_update;
|
DROP FUNCTION IF EXISTS set_token_usd_update;
|
||||||
DROP FUNCTION fee_percentage;
|
DROP FUNCTION IF EXISTS fee_percentage;
|
||||||
DROP FUNCTION set_tx;
|
DROP FUNCTION IF EXISTS set_tx;
|
||||||
DROP FUNCTION forge_l1_user_txs;
|
DROP FUNCTION IF EXISTS forge_l1_user_txs;
|
||||||
DROP FUNCTION set_pool_tx;
|
DROP FUNCTION IF EXISTS set_pool_tx;
|
||||||
-- drop tables
|
-- drop tables IF EXISTS
|
||||||
DROP TABLE account_creation_auth;
|
DROP TABLE IF EXISTS node_info;
|
||||||
DROP TABLE tx_pool;
|
DROP TABLE IF EXISTS account_creation_auth;
|
||||||
DROP TABLE auction_vars;
|
DROP TABLE IF EXISTS tx_pool;
|
||||||
DROP TABLE rollup_vars;
|
DROP TABLE IF EXISTS auction_vars;
|
||||||
DROP TABLE escape_hatch_withdrawal;
|
DROP TABLE IF EXISTS rollup_vars;
|
||||||
DROP TABLE bucket_update;
|
DROP TABLE IF EXISTS escape_hatch_withdrawal;
|
||||||
DROP TABLE token_exchange;
|
DROP TABLE IF EXISTS bucket_update;
|
||||||
DROP TABLE wdelayer_vars;
|
DROP TABLE IF EXISTS token_exchange;
|
||||||
DROP TABLE tx;
|
DROP TABLE IF EXISTS wdelayer_vars;
|
||||||
DROP TABLE exit_tree;
|
DROP TABLE IF EXISTS tx;
|
||||||
DROP TABLE account;
|
DROP TABLE IF EXISTS exit_tree;
|
||||||
DROP TABLE token;
|
DROP TABLE IF EXISTS account_update;
|
||||||
DROP TABLE bid;
|
DROP TABLE IF EXISTS account;
|
||||||
DROP TABLE batch;
|
DROP TABLE IF EXISTS token;
|
||||||
DROP TABLE coordinator;
|
DROP TABLE IF EXISTS bid;
|
||||||
DROP TABLE block;
|
DROP TABLE IF EXISTS batch;
|
||||||
-- drop sequences
|
DROP TABLE IF EXISTS coordinator;
|
||||||
DROP SEQUENCE tx_item_id;
|
DROP TABLE IF EXISTS block;
|
||||||
|
-- sequences
|
||||||
|
DROP SEQUENCE IF EXISTS tx_item_id;
|
||||||
|
|||||||
@@ -52,19 +52,40 @@ const (
|
|||||||
// TypeBatchBuilder defines a StateDB used by the BatchBuilder, that
|
// TypeBatchBuilder defines a StateDB used by the BatchBuilder, that
|
||||||
// generates the ExitTree and the ZKInput when processing the txs
|
// generates the ExitTree and the ZKInput when processing the txs
|
||||||
TypeBatchBuilder = "batchbuilder"
|
TypeBatchBuilder = "batchbuilder"
|
||||||
|
// MaxNLevels is the maximum value of NLevels for the merkle tree,
|
||||||
|
// which comes from the fact that AccountIdx has 48 bits.
|
||||||
|
MaxNLevels = 48
|
||||||
)
|
)
|
||||||
|
|
||||||
// TypeStateDB determines the type of StateDB
|
// TypeStateDB determines the type of StateDB
|
||||||
type TypeStateDB string
|
type TypeStateDB string
|
||||||
|
|
||||||
|
// Config of the StateDB
|
||||||
|
type Config struct {
|
||||||
|
// Path where the checkpoints will be stored
|
||||||
|
Path string
|
||||||
|
// Keep is the number of old checkpoints to keep. If 0, all
|
||||||
|
// checkpoints are kept.
|
||||||
|
Keep int
|
||||||
|
// NoLast skips having an opened DB with a checkpoint to the last
|
||||||
|
// batchNum for thread-safe reads.
|
||||||
|
NoLast bool
|
||||||
|
// Type of StateDB (
|
||||||
|
Type TypeStateDB
|
||||||
|
// NLevels is the number of merkle tree levels in case the Type uses a
|
||||||
|
// merkle tree. If the Type doesn't use a merkle tree, NLevels should
|
||||||
|
// be 0.
|
||||||
|
NLevels int
|
||||||
|
// At every checkpoint, check that there are no gaps between the
|
||||||
|
// checkpoints
|
||||||
|
noGapsCheck bool
|
||||||
|
}
|
||||||
|
|
||||||
// StateDB represents the StateDB object
|
// StateDB represents the StateDB object
|
||||||
type StateDB struct {
|
type StateDB struct {
|
||||||
path string
|
cfg Config
|
||||||
Typ TypeStateDB
|
|
||||||
db *kvdb.KVDB
|
db *kvdb.KVDB
|
||||||
nLevels int
|
|
||||||
MT *merkletree.MerkleTree
|
MT *merkletree.MerkleTree
|
||||||
keep int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Last offers a subset of view methods of the StateDB that can be
|
// Last offers a subset of view methods of the StateDB that can be
|
||||||
@@ -104,36 +125,40 @@ func (s *Last) GetAccounts() ([]common.Account, error) {
|
|||||||
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
|
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
|
||||||
// storage. Checkpoints older than the value defined by `keep` will be
|
// storage. Checkpoints older than the value defined by `keep` will be
|
||||||
// deleted.
|
// deleted.
|
||||||
func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
// func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
||||||
|
func NewStateDB(cfg Config) (*StateDB, error) {
|
||||||
var kv *kvdb.KVDB
|
var kv *kvdb.KVDB
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
kv, err = kvdb.NewKVDB(pathDB, keep)
|
kv, err = kvdb.NewKVDB(kvdb.Config{Path: cfg.Path, Keep: cfg.Keep,
|
||||||
|
NoGapsCheck: cfg.noGapsCheck, NoLast: cfg.NoLast})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var mt *merkletree.MerkleTree = nil
|
var mt *merkletree.MerkleTree = nil
|
||||||
if typ == TypeSynchronizer || typ == TypeBatchBuilder {
|
if cfg.Type == TypeSynchronizer || cfg.Type == TypeBatchBuilder {
|
||||||
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), nLevels)
|
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), cfg.NLevels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if typ == TypeTxSelector && nLevels != 0 {
|
if cfg.Type == TypeTxSelector && cfg.NLevels != 0 {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
|
return nil, tracerr.Wrap(fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &StateDB{
|
return &StateDB{
|
||||||
path: pathDB,
|
cfg: cfg,
|
||||||
db: kv,
|
db: kv,
|
||||||
nLevels: nLevels,
|
|
||||||
MT: mt,
|
MT: mt,
|
||||||
Typ: typ,
|
|
||||||
keep: keep,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Type returns the StateDB configured Type
|
||||||
|
func (s *StateDB) Type() TypeStateDB {
|
||||||
|
return s.cfg.Type
|
||||||
|
}
|
||||||
|
|
||||||
// LastRead is a thread-safe method to query the last checkpoint of the StateDB
|
// LastRead is a thread-safe method to query the last checkpoint of the StateDB
|
||||||
// via the Last type methods
|
// via the Last type methods
|
||||||
func (s *StateDB) LastRead(fn func(sdbLast *Last) error) error {
|
func (s *StateDB) LastRead(fn func(sdbLast *Last) error) error {
|
||||||
@@ -179,7 +204,7 @@ func (s *StateDB) LastGetCurrentBatch() (common.BatchNum, error) {
|
|||||||
func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
|
func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
|
||||||
var root *big.Int
|
var root *big.Int
|
||||||
if err := s.LastRead(func(sdb *Last) error {
|
if err := s.LastRead(func(sdb *Last) error {
|
||||||
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.nLevels)
|
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.cfg.NLevels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -195,7 +220,7 @@ func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
|
|||||||
// Internally this advances & stores the current BatchNum, and then stores a
|
// Internally this advances & stores the current BatchNum, and then stores a
|
||||||
// Checkpoint of the current state of the StateDB.
|
// Checkpoint of the current state of the StateDB.
|
||||||
func (s *StateDB) MakeCheckpoint() error {
|
func (s *StateDB) MakeCheckpoint() error {
|
||||||
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.Typ)
|
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.cfg.Type)
|
||||||
return s.db.MakeCheckpoint()
|
return s.db.MakeCheckpoint()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -230,8 +255,8 @@ func (s *StateDB) SetCurrentIdx(idx common.Idx) error {
|
|||||||
// those checkpoints will remain in the storage, and eventually will be
|
// those checkpoints will remain in the storage, and eventually will be
|
||||||
// deleted when MakeCheckpoint overwrites them.
|
// deleted when MakeCheckpoint overwrites them.
|
||||||
func (s *StateDB) Reset(batchNum common.BatchNum) error {
|
func (s *StateDB) Reset(batchNum common.BatchNum) error {
|
||||||
err := s.db.Reset(batchNum)
|
log.Debugw("Making StateDB Reset", "batch", batchNum, "type", s.cfg.Type)
|
||||||
if err != nil {
|
if err := s.db.Reset(batchNum); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if s.MT != nil {
|
if s.MT != nil {
|
||||||
@@ -242,7 +267,6 @@ func (s *StateDB) Reset(batchNum common.BatchNum) error {
|
|||||||
}
|
}
|
||||||
s.MT = mt
|
s.MT = mt
|
||||||
}
|
}
|
||||||
log.Debugw("Making StateDB Reset", "batch", batchNum)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -461,9 +485,10 @@ type LocalStateDB struct {
|
|||||||
// NewLocalStateDB returns a new LocalStateDB connected to the given
|
// NewLocalStateDB returns a new LocalStateDB connected to the given
|
||||||
// synchronizerDB. Checkpoints older than the value defined by `keep` will be
|
// synchronizerDB. Checkpoints older than the value defined by `keep` will be
|
||||||
// deleted.
|
// deleted.
|
||||||
func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeStateDB,
|
func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error) {
|
||||||
nLevels int) (*LocalStateDB, error) {
|
cfg.noGapsCheck = true
|
||||||
s, err := NewStateDB(path, keep, typ, nLevels)
|
cfg.NoLast = true
|
||||||
|
s, err := NewStateDB(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -473,18 +498,24 @@ func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeSta
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckpointExists returns true if the checkpoint exists
|
||||||
|
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||||
|
return l.db.CheckpointExists(batchNum)
|
||||||
|
}
|
||||||
|
|
||||||
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
||||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
||||||
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
if fromSynchronizer {
|
if fromSynchronizer {
|
||||||
err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db)
|
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
|
||||||
if err != nil {
|
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// open the MT for the current s.db
|
// open the MT for the current s.db
|
||||||
if l.MT != nil {
|
if l.MT != nil {
|
||||||
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT), l.MT.MaxLevels())
|
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT),
|
||||||
|
l.MT.MaxLevels())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// test values
|
// test values
|
||||||
@@ -78,7 +78,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
|
|
||||||
// call NewStateDB which should get the db at the last checkpoint state
|
// call NewStateDB which should get the db at the last checkpoint state
|
||||||
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
|
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
|
||||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
|
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
v, err = sdb.db.DB().Get(k0)
|
v, err = sdb.db.DB().Get(k0)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
@@ -116,7 +116,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
bn, err := sdb.getCurrentBatch()
|
bn, err := sdb.getCurrentBatch()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, common.BatchNum(0), bn)
|
assert.Equal(t, common.BatchNum(0), bn)
|
||||||
err = sdb.db.MakeCheckpoint()
|
err = sdb.MakeCheckpoint()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
bn, err = sdb.getCurrentBatch()
|
bn, err = sdb.getCurrentBatch()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -158,7 +158,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
|
|
||||||
// call NewStateDB which should get the db at the last checkpoint state
|
// call NewStateDB which should get the db at the last checkpoint state
|
||||||
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
|
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
|
||||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
|
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
bn, err = sdb.getCurrentBatch()
|
bn, err = sdb.getCurrentBatch()
|
||||||
@@ -182,7 +182,7 @@ func TestStateDBWithoutMT(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -236,7 +236,7 @@ func TestStateDBWithMT(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -290,7 +290,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = sdb.Reset(0)
|
err = sdb.Reset(0)
|
||||||
@@ -335,7 +335,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
assert.Equal(t, common.BatchNum(i+1), cb)
|
assert.Equal(t, common.BatchNum(i+1), cb)
|
||||||
}
|
}
|
||||||
|
|
||||||
// printCheckpoints(t, sdb.path)
|
// printCheckpoints(t, sdb.cfg.Path)
|
||||||
|
|
||||||
// reset checkpoint
|
// reset checkpoint
|
||||||
err = sdb.Reset(3)
|
err = sdb.Reset(3)
|
||||||
@@ -371,7 +371,7 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
dirLocal, err := ioutil.TempDir("", "ldb")
|
dirLocal, err := ioutil.TempDir("", "ldb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dirLocal))
|
defer require.NoError(t, os.RemoveAll(dirLocal))
|
||||||
ldb, err := NewLocalStateDB(dirLocal, 128, sdb, TypeBatchBuilder, 32)
|
ldb, err := NewLocalStateDB(Config{Path: dirLocal, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||||
@@ -392,28 +392,26 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
||||||
ldb2, err := NewLocalStateDB(dirLocal2, 128, sdb, TypeBatchBuilder, 32)
|
ldb2, err := NewLocalStateDB(Config{Path: dirLocal2, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||||
err = ldb2.Reset(4, true)
|
err = ldb2.Reset(4, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// check that currentBatch is 4 after the Reset
|
// check that currentBatch is 4 after the Reset
|
||||||
cb, err = ldb2.db.GetCurrentBatch()
|
cb = ldb2.CurrentBatch()
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, common.BatchNum(4), cb)
|
assert.Equal(t, common.BatchNum(4), cb)
|
||||||
// advance one checkpoint in ldb2
|
// advance one checkpoint in ldb2
|
||||||
err = ldb2.db.MakeCheckpoint()
|
err = ldb2.MakeCheckpoint()
|
||||||
require.NoError(t, err)
|
|
||||||
cb, err = ldb2.db.GetCurrentBatch()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
cb = ldb2.CurrentBatch()
|
||||||
assert.Equal(t, common.BatchNum(5), cb)
|
assert.Equal(t, common.BatchNum(5), cb)
|
||||||
|
|
||||||
debug := false
|
debug := false
|
||||||
if debug {
|
if debug {
|
||||||
printCheckpoints(t, sdb.path)
|
printCheckpoints(t, sdb.cfg.Path)
|
||||||
printCheckpoints(t, ldb.path)
|
printCheckpoints(t, ldb.cfg.Path)
|
||||||
printCheckpoints(t, ldb2.path)
|
printCheckpoints(t, ldb2.cfg.Path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -421,7 +419,7 @@ func TestStateDBGetAccounts(t *testing.T) {
|
|||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -468,7 +466,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
|
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
|
||||||
@@ -542,7 +540,7 @@ func TestListCheckpoints(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
numCheckpoints := 16
|
numCheckpoints := 16
|
||||||
@@ -575,7 +573,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
|||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
keep := 16
|
keep := 16
|
||||||
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
numCheckpoints := 32
|
numCheckpoints := 32
|
||||||
@@ -596,7 +594,7 @@ func TestCurrentIdx(t *testing.T) {
|
|||||||
defer require.NoError(t, os.RemoveAll(dir))
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
keep := 16
|
keep := 16
|
||||||
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
idx := sdb.CurrentIdx()
|
idx := sdb.CurrentIdx()
|
||||||
@@ -604,7 +602,7 @@ func TestCurrentIdx(t *testing.T) {
|
|||||||
|
|
||||||
sdb.Close()
|
sdb.Close()
|
||||||
|
|
||||||
sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
|
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
idx = sdb.CurrentIdx()
|
idx = sdb.CurrentIdx()
|
||||||
@@ -618,9 +616,30 @@ func TestCurrentIdx(t *testing.T) {
|
|||||||
|
|
||||||
sdb.Close()
|
sdb.Close()
|
||||||
|
|
||||||
sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
|
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
idx = sdb.CurrentIdx()
|
idx = sdb.CurrentIdx()
|
||||||
assert.Equal(t, common.Idx(255), idx)
|
assert.Equal(t, common.Idx(255), idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestResetFromBadCheckpoint(t *testing.T) {
|
||||||
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
|
keep := 16
|
||||||
|
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = sdb.MakeCheckpoint()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = sdb.MakeCheckpoint()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = sdb.MakeCheckpoint()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// reset from a checkpoint that doesn't exist
|
||||||
|
err = sdb.Reset(10)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func TestGetIdx(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
var sk babyjub.PrivateKey
|
var sk babyjub.PrivateKey
|
||||||
|
|||||||
@@ -13,9 +13,9 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/marusama/semaphore/v2"
|
|
||||||
migrate "github.com/rubenv/sql-migrate"
|
migrate "github.com/rubenv/sql-migrate"
|
||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
)
|
)
|
||||||
|
|
||||||
var migrations *migrate.PackrMigrationSource
|
var migrations *migrate.PackrMigrationSource
|
||||||
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
|
|||||||
|
|
||||||
// APIConnectionController is used to limit the SQL open connections used by the API
|
// APIConnectionController is used to limit the SQL open connections used by the API
|
||||||
type APIConnectionController struct {
|
type APIConnectionController struct {
|
||||||
smphr semaphore.Semaphore
|
smphr *semaphore.Weighted
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAPICnnectionController initialize APIConnectionController
|
// NewAPICnnectionController initialize APIConnectionController
|
||||||
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
||||||
return &APIConnectionController{
|
return &APIConnectionController{
|
||||||
smphr: semaphore.New(maxConnections),
|
smphr: semaphore.NewWeighted(int64(maxConnections)),
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -254,7 +254,7 @@ type AuctionInterface interface {
|
|||||||
//
|
//
|
||||||
|
|
||||||
AuctionConstants() (*common.AuctionConstants, error)
|
AuctionConstants() (*common.AuctionConstants, error)
|
||||||
AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error)
|
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error)
|
||||||
AuctionEventInit() (*AuctionEventInitialize, int64, error)
|
AuctionEventInit() (*AuctionEventInitialize, int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -797,15 +797,22 @@ func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AuctionEventsByBlock returns the events in a block that happened in the
|
// AuctionEventsByBlock returns the events in a block that happened in the
|
||||||
// Auction Smart Contract and the blockHash where the eents happened. If there
|
// Auction Smart Contract.
|
||||||
// are no events in that block, blockHash is nil.
|
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
|
||||||
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error) {
|
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
|
||||||
|
// If there are no events in that block the result is nil.
|
||||||
|
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
|
||||||
|
blockHash *ethCommon.Hash) (*AuctionEvents, error) {
|
||||||
var auctionEvents AuctionEvents
|
var auctionEvents AuctionEvents
|
||||||
var blockHash *ethCommon.Hash
|
|
||||||
|
|
||||||
|
var blockNumBigInt *big.Int
|
||||||
|
if blockHash == nil {
|
||||||
|
blockNumBigInt = big.NewInt(blockNum)
|
||||||
|
}
|
||||||
query := ethereum.FilterQuery{
|
query := ethereum.FilterQuery{
|
||||||
FromBlock: big.NewInt(blockNum),
|
BlockHash: blockHash,
|
||||||
ToBlock: big.NewInt(blockNum),
|
FromBlock: blockNumBigInt,
|
||||||
|
ToBlock: blockNumBigInt,
|
||||||
Addresses: []ethCommon.Address{
|
Addresses: []ethCommon.Address{
|
||||||
c.address,
|
c.address,
|
||||||
},
|
},
|
||||||
@@ -814,15 +821,16 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
|
|
||||||
logs, err := c.client.client.FilterLogs(context.TODO(), query)
|
logs, err := c.client.client.FilterLogs(context.TODO(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(logs) > 0 {
|
if len(logs) == 0 {
|
||||||
blockHash = &logs[0].BlockHash
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, vLog := range logs {
|
for _, vLog := range logs {
|
||||||
if vLog.BlockHash != *blockHash {
|
if blockHash != nil && vLog.BlockHash != *blockHash {
|
||||||
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
||||||
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||||
}
|
}
|
||||||
switch vLog.Topics[0] {
|
switch vLog.Topics[0] {
|
||||||
case logAuctionNewBid:
|
case logAuctionNewBid:
|
||||||
@@ -833,7 +841,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
}
|
}
|
||||||
var newBid AuctionEventNewBid
|
var newBid AuctionEventNewBid
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
newBid.BidAmount = auxNewBid.BidAmount
|
newBid.BidAmount = auxNewBid.BidAmount
|
||||||
newBid.Slot = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
newBid.Slot = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
||||||
@@ -842,19 +850,19 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
case logAuctionNewSlotDeadline:
|
case logAuctionNewSlotDeadline:
|
||||||
var newSlotDeadline AuctionEventNewSlotDeadline
|
var newSlotDeadline AuctionEventNewSlotDeadline
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, "NewSlotDeadline", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, "NewSlotDeadline", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline)
|
auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline)
|
||||||
case logAuctionNewClosedAuctionSlots:
|
case logAuctionNewClosedAuctionSlots:
|
||||||
var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots
|
var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, "NewClosedAuctionSlots", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, "NewClosedAuctionSlots", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auctionEvents.NewClosedAuctionSlots = append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
|
auctionEvents.NewClosedAuctionSlots = append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
|
||||||
case logAuctionNewOutbidding:
|
case logAuctionNewOutbidding:
|
||||||
var newOutbidding AuctionEventNewOutbidding
|
var newOutbidding AuctionEventNewOutbidding
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding)
|
auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding)
|
||||||
case logAuctionNewDonationAddress:
|
case logAuctionNewDonationAddress:
|
||||||
@@ -864,26 +872,26 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
case logAuctionNewBootCoordinator:
|
case logAuctionNewBootCoordinator:
|
||||||
var newBootCoordinator AuctionEventNewBootCoordinator
|
var newBootCoordinator AuctionEventNewBootCoordinator
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, "NewBootCoordinator", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, "NewBootCoordinator", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, newBootCoordinator)
|
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, newBootCoordinator)
|
||||||
case logAuctionNewOpenAuctionSlots:
|
case logAuctionNewOpenAuctionSlots:
|
||||||
var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots
|
var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, "NewOpenAuctionSlots", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, "NewOpenAuctionSlots", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auctionEvents.NewOpenAuctionSlots = append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
|
auctionEvents.NewOpenAuctionSlots = append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
|
||||||
case logAuctionNewAllocationRatio:
|
case logAuctionNewAllocationRatio:
|
||||||
var newAllocationRatio AuctionEventNewAllocationRatio
|
var newAllocationRatio AuctionEventNewAllocationRatio
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, "NewAllocationRatio", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, "NewAllocationRatio", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, newAllocationRatio)
|
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, newAllocationRatio)
|
||||||
case logAuctionSetCoordinator:
|
case logAuctionSetCoordinator:
|
||||||
var setCoordinator AuctionEventSetCoordinator
|
var setCoordinator AuctionEventSetCoordinator
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, "SetCoordinator", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, "SetCoordinator", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
setCoordinator.ForgerAddress = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
setCoordinator.ForgerAddress = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||||
@@ -891,7 +899,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
case logAuctionNewForgeAllocated:
|
case logAuctionNewForgeAllocated:
|
||||||
var newForgeAllocated AuctionEventNewForgeAllocated
|
var newForgeAllocated AuctionEventNewForgeAllocated
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, "NewForgeAllocated", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, "NewForgeAllocated", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||||
@@ -904,7 +912,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
}
|
}
|
||||||
var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid
|
var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, "NewDefaultSlotSetBid", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, "NewDefaultSlotSetBid", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid
|
newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid
|
||||||
newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64()
|
newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64()
|
||||||
@@ -917,11 +925,11 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
|||||||
case logAuctionHEZClaimed:
|
case logAuctionHEZClaimed:
|
||||||
var HEZClaimed AuctionEventHEZClaimed
|
var HEZClaimed AuctionEventHEZClaimed
|
||||||
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", vLog.Data); err != nil {
|
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", vLog.Data); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
auctionEvents.HEZClaimed = append(auctionEvents.HEZClaimed, HEZClaimed)
|
auctionEvents.HEZClaimed = append(auctionEvents.HEZClaimed, HEZClaimed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &auctionEvents, blockHash, nil
|
return &auctionEvents, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ func TestAuctionSetSlotDeadline(t *testing.T) {
|
|||||||
assert.Equal(t, newSlotDeadline, slotDeadline)
|
assert.Equal(t, newSlotDeadline, slotDeadline)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline)
|
assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline)
|
||||||
}
|
}
|
||||||
@@ -109,7 +109,7 @@ func TestAuctionSetOpenAuctionSlots(t *testing.T) {
|
|||||||
assert.Equal(t, newOpenAuctionSlots, openAuctionSlots)
|
assert.Equal(t, newOpenAuctionSlots, openAuctionSlots)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots)
|
assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots)
|
||||||
}
|
}
|
||||||
@@ -130,7 +130,7 @@ func TestAuctionSetClosedAuctionSlots(t *testing.T) {
|
|||||||
assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots)
|
assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
|
assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
|
||||||
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
|
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
|
||||||
@@ -153,7 +153,7 @@ func TestAuctionSetOutbidding(t *testing.T) {
|
|||||||
assert.Equal(t, newOutbidding, outbidding)
|
assert.Equal(t, newOutbidding, outbidding)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding)
|
assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding)
|
||||||
_, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst)
|
_, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst)
|
||||||
@@ -176,7 +176,7 @@ func TestAuctionSetAllocationRatio(t *testing.T) {
|
|||||||
assert.Equal(t, newAllocationRatio, allocationRatio)
|
assert.Equal(t, newAllocationRatio, allocationRatio)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio)
|
assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio)
|
||||||
_, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst)
|
_, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst)
|
||||||
@@ -205,7 +205,7 @@ func TestAuctionSetDonationAddress(t *testing.T) {
|
|||||||
assert.Equal(t, &newDonationAddress, donationAddress)
|
assert.Equal(t, &newDonationAddress, donationAddress)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress)
|
assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress)
|
||||||
_, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst)
|
_, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst)
|
||||||
@@ -224,7 +224,7 @@ func TestAuctionSetBootCoordinator(t *testing.T) {
|
|||||||
assert.Equal(t, &newBootCoordinator, bootCoordinator)
|
assert.Equal(t, &newBootCoordinator, bootCoordinator)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
|
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
|
||||||
assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL)
|
assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL)
|
||||||
@@ -261,7 +261,7 @@ func TestAuctionChangeDefaultSlotSetBid(t *testing.T) {
|
|||||||
assert.Equal(t, minBid, newInitialMinBid)
|
assert.Equal(t, minBid, newInitialMinBid)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet)
|
assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet)
|
||||||
assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid)
|
assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid)
|
||||||
@@ -287,7 +287,7 @@ func TestAuctionRegisterCoordinator(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress)
|
assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress)
|
||||||
assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress)
|
assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress)
|
||||||
@@ -306,7 +306,7 @@ func TestAuctionBid(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount)
|
assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount)
|
||||||
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
|
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
|
||||||
@@ -346,7 +346,7 @@ func TestAuctionMultiBid(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
|
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
|
||||||
assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot)
|
assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot)
|
||||||
@@ -376,7 +376,7 @@ func TestAuctionClaimHEZ(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount)
|
assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount)
|
||||||
assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner)
|
assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner)
|
||||||
|
|||||||
@@ -324,5 +324,6 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction,
|
|||||||
Value: tx.Value(),
|
Value: tx.Value(),
|
||||||
Data: tx.Data(),
|
Data: tx.Data(),
|
||||||
}
|
}
|
||||||
return c.client.CallContract(ctx, msg, blockNum)
|
result, err := c.client.CallContract(ctx, msg, blockNum)
|
||||||
|
return result, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -264,7 +264,7 @@ type RollupInterface interface {
|
|||||||
//
|
//
|
||||||
|
|
||||||
RollupConstants() (*common.RollupConstants, error)
|
RollupConstants() (*common.RollupConstants, error)
|
||||||
RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error)
|
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error)
|
||||||
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
|
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
|
||||||
RollupEventInit() (*RollupEventInitialize, int64, error)
|
RollupEventInit() (*RollupEventInitialize, int64, error)
|
||||||
}
|
}
|
||||||
@@ -316,7 +316,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
|
|||||||
}
|
}
|
||||||
consts, err := c.RollupConstants()
|
consts, err := c.RollupConstants()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err))
|
||||||
}
|
}
|
||||||
c.consts = consts
|
c.consts = consts
|
||||||
return c, nil
|
return c, nil
|
||||||
@@ -327,7 +327,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
|
|||||||
if auth == nil {
|
if auth == nil {
|
||||||
auth, err = c.client.NewAuth()
|
auth, err = c.client.NewAuth()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
auth.GasLimit = 1000000
|
auth.GasLimit = 1000000
|
||||||
}
|
}
|
||||||
@@ -393,7 +393,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
|
|||||||
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
|
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
|
||||||
args.ProofA, args.ProofB, args.ProofC)
|
args.ProofA, args.ProofB, args.ProofC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Failed Hermez.ForgeBatch: %w", err))
|
return nil, tracerr.Wrap(fmt.Errorf("Hermez.ForgeBatch: %w", err))
|
||||||
}
|
}
|
||||||
return tx, nil
|
return tx, nil
|
||||||
}
|
}
|
||||||
@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
|
|||||||
}
|
}
|
||||||
fromIdxBig := big.NewInt(fromIdx)
|
fromIdxBig := big.NewInt(fromIdx)
|
||||||
toIdxBig := big.NewInt(toIdx)
|
toIdxBig := big.NewInt(toIdx)
|
||||||
depositAmountF, err := common.NewFloat16(depositAmount)
|
depositAmountF, err := common.NewFloat40(depositAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
amountF, err := common.NewFloat16(amount)
|
amountF, err := common.NewFloat40(amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
|
|||||||
}
|
}
|
||||||
fromIdxBig := big.NewInt(fromIdx)
|
fromIdxBig := big.NewInt(fromIdx)
|
||||||
toIdxBig := big.NewInt(toIdx)
|
toIdxBig := big.NewInt(toIdx)
|
||||||
depositAmountF, err := common.NewFloat16(depositAmount)
|
depositAmountF, err := common.NewFloat40(depositAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
amountF, err := common.NewFloat16(amount)
|
amountF, err := common.NewFloat40(amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -735,31 +735,40 @@ func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error)
|
|||||||
return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err)
|
return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
|
// RollupEventsByBlock returns the events in a block that happened in the
|
||||||
func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error) {
|
// Rollup Smart Contract.
|
||||||
|
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
|
||||||
|
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
|
||||||
|
// If there are no events in that block the result is nil.
|
||||||
|
func (c *RollupClient) RollupEventsByBlock(blockNum int64,
|
||||||
|
blockHash *ethCommon.Hash) (*RollupEvents, error) {
|
||||||
var rollupEvents RollupEvents
|
var rollupEvents RollupEvents
|
||||||
var blockHash *ethCommon.Hash
|
|
||||||
|
|
||||||
|
var blockNumBigInt *big.Int
|
||||||
|
if blockHash == nil {
|
||||||
|
blockNumBigInt = big.NewInt(blockNum)
|
||||||
|
}
|
||||||
query := ethereum.FilterQuery{
|
query := ethereum.FilterQuery{
|
||||||
FromBlock: big.NewInt(blockNum),
|
BlockHash: blockHash,
|
||||||
ToBlock: big.NewInt(blockNum),
|
FromBlock: blockNumBigInt,
|
||||||
|
ToBlock: blockNumBigInt,
|
||||||
Addresses: []ethCommon.Address{
|
Addresses: []ethCommon.Address{
|
||||||
c.address,
|
c.address,
|
||||||
},
|
},
|
||||||
BlockHash: nil,
|
|
||||||
Topics: [][]ethCommon.Hash{},
|
Topics: [][]ethCommon.Hash{},
|
||||||
}
|
}
|
||||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(logs) > 0 {
|
if len(logs) == 0 {
|
||||||
blockHash = &logs[0].BlockHash
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, vLog := range logs {
|
for _, vLog := range logs {
|
||||||
if vLog.BlockHash != *blockHash {
|
if blockHash != nil && vLog.BlockHash != *blockHash {
|
||||||
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
||||||
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||||
}
|
}
|
||||||
switch vLog.Topics[0] {
|
switch vLog.Topics[0] {
|
||||||
case logHermezL1UserTxEvent:
|
case logHermezL1UserTxEvent:
|
||||||
@@ -767,11 +776,11 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var L1UserTx RollupEventL1UserTx
|
var L1UserTx RollupEventL1UserTx
|
||||||
err := c.contractAbi.UnpackIntoInterface(&L1UserTxAux, "L1UserTxEvent", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&L1UserTxAux, "L1UserTxEvent", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
L1Tx, err := common.L1UserTxFromBytes(L1UserTxAux.L1UserTx)
|
L1Tx, err := common.L1UserTxFromBytes(L1UserTxAux.L1UserTx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
toForgeL1TxsNum := new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
toForgeL1TxsNum := new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
||||||
L1Tx.ToForgeL1TxsNum = &toForgeL1TxsNum
|
L1Tx.ToForgeL1TxsNum = &toForgeL1TxsNum
|
||||||
@@ -783,7 +792,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var addToken RollupEventAddToken
|
var addToken RollupEventAddToken
|
||||||
err := c.contractAbi.UnpackIntoInterface(&addToken, "AddToken", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&addToken, "AddToken", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
addToken.TokenAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
addToken.TokenAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
rollupEvents.AddToken = append(rollupEvents.AddToken, addToken)
|
rollupEvents.AddToken = append(rollupEvents.AddToken, addToken)
|
||||||
@@ -791,7 +800,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var forgeBatch RollupEventForgeBatch
|
var forgeBatch RollupEventForgeBatch
|
||||||
err := c.contractAbi.UnpackIntoInterface(&forgeBatch, "ForgeBatch", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&forgeBatch, "ForgeBatch", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
forgeBatch.BatchNum = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
forgeBatch.BatchNum = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
||||||
forgeBatch.EthTxHash = vLog.TxHash
|
forgeBatch.EthTxHash = vLog.TxHash
|
||||||
@@ -803,7 +812,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
}
|
}
|
||||||
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, "UpdateForgeL1L2BatchTimeout", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, "UpdateForgeL1L2BatchTimeout", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
rollupEvents.UpdateForgeL1L2BatchTimeout = append(rollupEvents.UpdateForgeL1L2BatchTimeout,
|
rollupEvents.UpdateForgeL1L2BatchTimeout = append(rollupEvents.UpdateForgeL1L2BatchTimeout,
|
||||||
RollupEventUpdateForgeL1L2BatchTimeout{
|
RollupEventUpdateForgeL1L2BatchTimeout{
|
||||||
@@ -813,7 +822,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var updateFeeAddToken RollupEventUpdateFeeAddToken
|
var updateFeeAddToken RollupEventUpdateFeeAddToken
|
||||||
err := c.contractAbi.UnpackIntoInterface(&updateFeeAddToken, "UpdateFeeAddToken", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&updateFeeAddToken, "UpdateFeeAddToken", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
rollupEvents.UpdateFeeAddToken = append(rollupEvents.UpdateFeeAddToken, updateFeeAddToken)
|
rollupEvents.UpdateFeeAddToken = append(rollupEvents.UpdateFeeAddToken, updateFeeAddToken)
|
||||||
case logHermezWithdrawEvent:
|
case logHermezWithdrawEvent:
|
||||||
@@ -831,7 +840,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var updateBucketWithdraw RollupEventUpdateBucketWithdraw
|
var updateBucketWithdraw RollupEventUpdateBucketWithdraw
|
||||||
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, "UpdateBucketWithdraw", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, "UpdateBucketWithdraw", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals
|
updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals
|
||||||
updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64())
|
updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64())
|
||||||
@@ -842,7 +851,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var withdrawalDelay RollupEventUpdateWithdrawalDelay
|
var withdrawalDelay RollupEventUpdateWithdrawalDelay
|
||||||
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "UpdateWithdrawalDelay", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "UpdateWithdrawalDelay", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
rollupEvents.UpdateWithdrawalDelay = append(rollupEvents.UpdateWithdrawalDelay, withdrawalDelay)
|
rollupEvents.UpdateWithdrawalDelay = append(rollupEvents.UpdateWithdrawalDelay, withdrawalDelay)
|
||||||
case logHermezUpdateBucketsParameters:
|
case logHermezUpdateBucketsParameters:
|
||||||
@@ -850,7 +859,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var bucketsParameters RollupEventUpdateBucketsParameters
|
var bucketsParameters RollupEventUpdateBucketsParameters
|
||||||
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, "UpdateBucketsParameters", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, "UpdateBucketsParameters", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
for i, bucket := range bucketsParametersAux.ArrayBuckets {
|
for i, bucket := range bucketsParametersAux.ArrayBuckets {
|
||||||
bucketsParameters.ArrayBuckets[i].CeilUSD = bucket[0]
|
bucketsParameters.ArrayBuckets[i].CeilUSD = bucket[0]
|
||||||
@@ -863,7 +872,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
var tokensExchange RollupEventUpdateTokenExchange
|
var tokensExchange RollupEventUpdateTokenExchange
|
||||||
err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
rollupEvents.UpdateTokenExchange = append(rollupEvents.UpdateTokenExchange, tokensExchange)
|
rollupEvents.UpdateTokenExchange = append(rollupEvents.UpdateTokenExchange, tokensExchange)
|
||||||
case logHermezSafeMode:
|
case logHermezSafeMode:
|
||||||
@@ -885,7 +894,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
bucketsParameters)
|
bucketsParameters)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &rollupEvents, blockHash, nil
|
return &rollupEvents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the
|
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the
|
||||||
@@ -893,7 +902,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
|||||||
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
|
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
|
||||||
tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash)
|
tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(fmt.Errorf("TransactionByHash: %w", err))
|
||||||
}
|
}
|
||||||
txData := tx.Data()
|
txData := tx.Data()
|
||||||
|
|
||||||
@@ -930,9 +939,9 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
|||||||
FeeIdxCoordinator: []common.Idx{},
|
FeeIdxCoordinator: []common.Idx{},
|
||||||
}
|
}
|
||||||
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
|
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
|
||||||
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
|
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1)
|
||||||
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
|
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
|
||||||
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
|
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
|
||||||
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
|
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
|
||||||
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
|
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
|
||||||
l1UserTxsData := []byte{}
|
l1UserTxsData := []byte{}
|
||||||
@@ -959,7 +968,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
|||||||
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
|
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
|
||||||
}
|
}
|
||||||
for i := 0; i < numTxsL1Coord; i++ {
|
for i := 0; i < numTxsL1Coord; i++ {
|
||||||
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
|
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
|
||||||
var signature []byte
|
var signature []byte
|
||||||
v := bytesL1Coordinator[0]
|
v := bytesL1Coordinator[0]
|
||||||
s := bytesL1Coordinator[1:33]
|
s := bytesL1Coordinator[1:33]
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ func TestRollupAddToken(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress)
|
assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress)
|
||||||
@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
|
|||||||
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
|
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
|
||||||
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
|
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
|
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
|
||||||
for i := 0; i < numTxsL1; i++ {
|
for i := 0; i < numTxsL1; i++ {
|
||||||
bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
|
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
|
||||||
var signature []byte
|
var signature []byte
|
||||||
v := bytesL1Coordinator[0]
|
v := bytesL1Coordinator[0]
|
||||||
s := bytesL1Coordinator[1:33]
|
s := bytesL1Coordinator[1:33]
|
||||||
@@ -174,7 +174,7 @@ func TestRollupForgeBatch(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err = rollupClient.client.EthLastBlock()
|
currentBlockNum, err = rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum)
|
assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum)
|
||||||
@@ -203,7 +203,7 @@ func TestRollupUpdateForgeL1L2BatchTimeout(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
|
assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
|
||||||
@@ -216,7 +216,7 @@ func TestRollupUpdateFeeAddToken(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken)
|
assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken)
|
||||||
@@ -235,7 +235,7 @@ func TestRollupUpdateBucketsParameters(t *testing.T) {
|
|||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
blockStampBucket = currentBlockNum
|
blockStampBucket = currentBlockNum
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, bucketsParameters, rollupEvents.UpdateBucketsParameters[0].ArrayBuckets)
|
assert.Equal(t, bucketsParameters, rollupEvents.UpdateBucketsParameters[0].ArrayBuckets)
|
||||||
}
|
}
|
||||||
@@ -246,7 +246,7 @@ func TestRollupUpdateWithdrawalDelay(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, newWithdrawalDelay, int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
|
assert.Equal(t, newWithdrawalDelay, int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
|
||||||
}
|
}
|
||||||
@@ -263,7 +263,7 @@ func TestRollupUpdateTokenExchange(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, addressArray, rollupEvents.UpdateTokenExchange[0].AddressArray)
|
assert.Equal(t, addressArray, rollupEvents.UpdateTokenExchange[0].AddressArray)
|
||||||
assert.Equal(t, valueArray, rollupEvents.UpdateTokenExchange[0].ValueArray)
|
assert.Equal(t, valueArray, rollupEvents.UpdateTokenExchange[0].ValueArray)
|
||||||
@@ -292,7 +292,7 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
@@ -324,7 +324,7 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
@@ -356,7 +356,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
@@ -388,7 +388,7 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -418,7 +418,7 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -447,7 +447,7 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -478,7 +478,7 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -508,7 +508,7 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -538,7 +538,7 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -569,7 +569,7 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -599,7 +599,7 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -629,7 +629,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -659,7 +659,7 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -688,7 +688,7 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -717,7 +717,7 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -747,7 +747,7 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -776,7 +776,7 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -807,7 +807,7 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||||
@@ -822,7 +822,7 @@ func TestRollupForgeBatch2(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum)
|
assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum)
|
||||||
@@ -876,7 +876,7 @@ func TestRollupForgeBatch2(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err = rollupClient.client.EthLastBlock()
|
currentBlockNum, err = rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err = rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err = rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, int64(3), rollupEvents.ForgeBatch[0].BatchNum)
|
assert.Equal(t, int64(3), rollupEvents.ForgeBatch[0].BatchNum)
|
||||||
@@ -928,7 +928,7 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx)
|
assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx)
|
||||||
@@ -951,7 +951,7 @@ func TestRollupSafeMode(t *testing.T) {
|
|||||||
|
|
||||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
auxEvent := new(RollupEventSafeMode)
|
auxEvent := new(RollupEventSafeMode)
|
||||||
assert.Equal(t, auxEvent, &rollupEvents.SafeMode[0])
|
assert.Equal(t, auxEvent, &rollupEvents.SafeMode[0])
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ type WDelayerInterface interface {
|
|||||||
WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error)
|
WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error)
|
||||||
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
|
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
|
||||||
|
|
||||||
WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error)
|
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
|
||||||
WDelayerConstants() (*common.WDelayerConstants, error)
|
WDelayerConstants() (*common.WDelayerConstants, error)
|
||||||
WDelayerEventInit() (*WDelayerEventInitialize, int64, error)
|
WDelayerEventInit() (*WDelayerEventInitialize, int64, error)
|
||||||
}
|
}
|
||||||
@@ -424,40 +424,47 @@ func (c *WDelayerClient) WDelayerEventInit() (*WDelayerEventInitialize, int64, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WDelayerEventsByBlock returns the events in a block that happened in the
|
// WDelayerEventsByBlock returns the events in a block that happened in the
|
||||||
// WDelayer Smart Contract and the blockHash where the eents happened. If
|
// WDelayer Smart Contract.
|
||||||
// there are no events in that block, blockHash is nil.
|
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
|
||||||
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error) {
|
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
|
||||||
|
// If there are no events in that block the result is nil.
|
||||||
|
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
|
||||||
|
blockHash *ethCommon.Hash) (*WDelayerEvents, error) {
|
||||||
var wdelayerEvents WDelayerEvents
|
var wdelayerEvents WDelayerEvents
|
||||||
var blockHash *ethCommon.Hash
|
|
||||||
|
|
||||||
|
var blockNumBigInt *big.Int
|
||||||
|
if blockHash == nil {
|
||||||
|
blockNumBigInt = big.NewInt(blockNum)
|
||||||
|
}
|
||||||
query := ethereum.FilterQuery{
|
query := ethereum.FilterQuery{
|
||||||
FromBlock: big.NewInt(blockNum),
|
BlockHash: blockHash,
|
||||||
ToBlock: big.NewInt(blockNum),
|
FromBlock: blockNumBigInt,
|
||||||
|
ToBlock: blockNumBigInt,
|
||||||
Addresses: []ethCommon.Address{
|
Addresses: []ethCommon.Address{
|
||||||
c.address,
|
c.address,
|
||||||
},
|
},
|
||||||
BlockHash: nil,
|
|
||||||
Topics: [][]ethCommon.Hash{},
|
Topics: [][]ethCommon.Hash{},
|
||||||
}
|
}
|
||||||
|
|
||||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(logs) > 0 {
|
if len(logs) == 0 {
|
||||||
blockHash = &logs[0].BlockHash
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, vLog := range logs {
|
for _, vLog := range logs {
|
||||||
if vLog.BlockHash != *blockHash {
|
if blockHash != nil && vLog.BlockHash != *blockHash {
|
||||||
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
||||||
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||||
}
|
}
|
||||||
switch vLog.Topics[0] {
|
switch vLog.Topics[0] {
|
||||||
case logWDelayerDeposit:
|
case logWDelayerDeposit:
|
||||||
var deposit WDelayerEventDeposit
|
var deposit WDelayerEventDeposit
|
||||||
err := c.contractAbi.UnpackIntoInterface(&deposit, "Deposit", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&deposit, "Deposit", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
deposit.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
deposit.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
deposit.Token = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
deposit.Token = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||||
@@ -468,7 +475,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
|||||||
var withdraw WDelayerEventWithdraw
|
var withdraw WDelayerEventWithdraw
|
||||||
err := c.contractAbi.UnpackIntoInterface(&withdraw, "Withdraw", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&withdraw, "Withdraw", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
withdraw.Token = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
withdraw.Token = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
withdraw.Owner = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
withdraw.Owner = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||||
@@ -482,7 +489,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
|||||||
var withdrawalDelay WDelayerEventNewWithdrawalDelay
|
var withdrawalDelay WDelayerEventNewWithdrawalDelay
|
||||||
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "NewWithdrawalDelay", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "NewWithdrawalDelay", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
wdelayerEvents.NewWithdrawalDelay = append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
|
wdelayerEvents.NewWithdrawalDelay = append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
|
||||||
|
|
||||||
@@ -490,7 +497,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
|||||||
var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal
|
var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal
|
||||||
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, "EscapeHatchWithdrawal", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, "EscapeHatchWithdrawal", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||||
escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||||
@@ -501,7 +508,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
|||||||
var emergencyCouncil WDelayerEventNewEmergencyCouncil
|
var emergencyCouncil WDelayerEventNewEmergencyCouncil
|
||||||
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, "NewEmergencyCouncil", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, "NewEmergencyCouncil", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
wdelayerEvents.NewEmergencyCouncil = append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
|
wdelayerEvents.NewEmergencyCouncil = append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
|
||||||
|
|
||||||
@@ -509,10 +516,10 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
|||||||
var governanceAddress WDelayerEventNewHermezGovernanceAddress
|
var governanceAddress WDelayerEventNewHermezGovernanceAddress
|
||||||
err := c.contractAbi.UnpackIntoInterface(&governanceAddress, "NewHermezGovernanceAddress", vLog.Data)
|
err := c.contractAbi.UnpackIntoInterface(&governanceAddress, "NewHermezGovernanceAddress", vLog.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
wdelayerEvents.NewHermezGovernanceAddress = append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
|
wdelayerEvents.NewHermezGovernanceAddress = append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &wdelayerEvents, blockHash, nil
|
return &wdelayerEvents, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func TestWDelayerSetHermezGovernanceAddress(t *testing.T) {
|
|||||||
assert.Equal(t, &auxAddressConst, auxAddress)
|
assert.Equal(t, &auxAddressConst, auxAddress)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
|
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
|
||||||
_, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst)
|
_, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst)
|
||||||
@@ -81,7 +81,7 @@ func TestWDelayerSetEmergencyCouncil(t *testing.T) {
|
|||||||
assert.Equal(t, &auxAddressConst, auxAddress)
|
assert.Equal(t, &auxAddressConst, auxAddress)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, auxAddressConst, wdelayerEvents.NewEmergencyCouncil[0].NewEmergencyCouncil)
|
assert.Equal(t, auxAddressConst, wdelayerEvents.NewEmergencyCouncil[0].NewEmergencyCouncil)
|
||||||
_, err = wdelayerClientAux.WDelayerTransferEmergencyCouncil(emergencyCouncilAddressConst)
|
_, err = wdelayerClientAux.WDelayerTransferEmergencyCouncil(emergencyCouncilAddressConst)
|
||||||
@@ -110,7 +110,7 @@ func TestWDelayerChangeWithdrawalDelay(t *testing.T) {
|
|||||||
assert.Equal(t, newWithdrawalDelay, withdrawalDelay)
|
assert.Equal(t, newWithdrawalDelay, withdrawalDelay)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, uint64(newWithdrawalDelay), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay)
|
assert.Equal(t, uint64(newWithdrawalDelay), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay)
|
||||||
}
|
}
|
||||||
@@ -124,7 +124,7 @@ func TestWDelayerDeposit(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
|
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
|
||||||
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
|
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
|
||||||
@@ -150,7 +150,7 @@ func TestWDelayerWithdrawal(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount)
|
assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount)
|
||||||
assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner)
|
assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner)
|
||||||
@@ -166,7 +166,7 @@ func TestWDelayerSecondDeposit(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
|
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
|
||||||
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
|
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
|
||||||
@@ -181,7 +181,7 @@ func TestWDelayerEnableEmergencyMode(t *testing.T) {
|
|||||||
assert.Equal(t, true, emergencyMode)
|
assert.Equal(t, true, emergencyMode)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
auxEvent := new(WDelayerEventEmergencyModeEnabled)
|
auxEvent := new(WDelayerEventEmergencyModeEnabled)
|
||||||
assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0])
|
assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0])
|
||||||
@@ -210,7 +210,7 @@ func TestWDelayerEscapeHatchWithdrawal(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token)
|
assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token)
|
||||||
assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To)
|
assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To)
|
||||||
|
|||||||
3
go.mod
3
go.mod
@@ -17,11 +17,11 @@ require (
|
|||||||
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
|
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
|
||||||
github.com/joho/godotenv v1.3.0
|
github.com/joho/godotenv v1.3.0
|
||||||
github.com/lib/pq v1.8.0
|
github.com/lib/pq v1.8.0
|
||||||
github.com/marusama/semaphore/v2 v2.4.1
|
|
||||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||||
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
|
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
|
||||||
github.com/mitchellh/copystructure v1.0.0
|
github.com/mitchellh/copystructure v1.0.0
|
||||||
github.com/mitchellh/mapstructure v1.3.0
|
github.com/mitchellh/mapstructure v1.3.0
|
||||||
|
github.com/prometheus/client_golang v1.3.0
|
||||||
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
||||||
github.com/russross/meddler v1.0.0
|
github.com/russross/meddler v1.0.0
|
||||||
github.com/stretchr/testify v1.6.1
|
github.com/stretchr/testify v1.6.1
|
||||||
@@ -29,5 +29,6 @@ require (
|
|||||||
go.uber.org/zap v1.16.0
|
go.uber.org/zap v1.16.0
|
||||||
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
|
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202
|
golang.org/x/net v0.0.0-20200822124328-c89045814202
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||||
gopkg.in/go-playground/validator.v9 v9.29.1
|
gopkg.in/go-playground/validator.v9 v9.29.1
|
||||||
)
|
)
|
||||||
|
|||||||
19
go.sum
19
go.sum
@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
|
|||||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||||
|
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||||
|
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||||
@@ -66,6 +68,7 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw
|
|||||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
|
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
|
||||||
@@ -84,6 +87,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
|
|||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||||
|
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||||
|
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||||
@@ -169,6 +174,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
|
|||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
||||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||||
@@ -415,9 +422,6 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
|
|||||||
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
|
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
|
||||||
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
|
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
|
||||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||||
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
|
|
||||||
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
|
|
||||||
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
|
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||||
@@ -441,6 +445,7 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
|
|||||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
||||||
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
||||||
@@ -541,23 +546,27 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
|
|||||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
|
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
|
||||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
|
||||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
|
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
|
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||||
@@ -599,6 +608,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
|||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||||
|
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
@@ -617,6 +628,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
|||||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
||||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
||||||
|
|||||||
@@ -67,6 +67,11 @@ func Init(levelStr string, outputs []string) {
|
|||||||
|
|
||||||
func sprintStackTrace(st []tracerr.Frame) string {
|
func sprintStackTrace(st []tracerr.Frame) string {
|
||||||
builder := strings.Builder{}
|
builder := strings.Builder{}
|
||||||
|
// Skip deepest frame because it belongs to the go runtime and we don't
|
||||||
|
// care about it.
|
||||||
|
if len(st) > 0 {
|
||||||
|
st = st[:len(st)-1]
|
||||||
|
}
|
||||||
for _, f := range st {
|
for _, f := range st {
|
||||||
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
|
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
|
||||||
}
|
}
|
||||||
|
|||||||
412
node/node.go
412
node/node.go
@@ -2,7 +2,9 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -52,6 +54,7 @@ const (
|
|||||||
// Node is the Hermez Node
|
// Node is the Hermez Node
|
||||||
type Node struct {
|
type Node struct {
|
||||||
nodeAPI *NodeAPI
|
nodeAPI *NodeAPI
|
||||||
|
stateAPIUpdater *api.StateAPIUpdater
|
||||||
debugAPI *debugapi.DebugAPI
|
debugAPI *debugapi.DebugAPI
|
||||||
priceUpdater *priceupdater.PriceUpdater
|
priceUpdater *priceupdater.PriceUpdater
|
||||||
// Coordinator
|
// Coordinator
|
||||||
@@ -63,7 +66,9 @@ type Node struct {
|
|||||||
// General
|
// General
|
||||||
cfg *config.Node
|
cfg *config.Node
|
||||||
mode Mode
|
mode Mode
|
||||||
sqlConn *sqlx.DB
|
sqlConnRead *sqlx.DB
|
||||||
|
sqlConnWrite *sqlx.DB
|
||||||
|
historyDB *historydb.HistoryDB
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@@ -73,15 +78,34 @@ type Node struct {
|
|||||||
func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||||
meddler.Debug = cfg.Debug.MeddlerLogs
|
meddler.Debug = cfg.Debug.MeddlerLogs
|
||||||
// Stablish DB connection
|
// Stablish DB connection
|
||||||
db, err := dbUtils.InitSQLDB(
|
dbWrite, err := dbUtils.InitSQLDB(
|
||||||
cfg.PostgreSQL.Port,
|
cfg.PostgreSQL.PortWrite,
|
||||||
cfg.PostgreSQL.Host,
|
cfg.PostgreSQL.HostWrite,
|
||||||
cfg.PostgreSQL.User,
|
cfg.PostgreSQL.UserWrite,
|
||||||
cfg.PostgreSQL.Password,
|
cfg.PostgreSQL.PasswordWrite,
|
||||||
cfg.PostgreSQL.Name,
|
cfg.PostgreSQL.NameWrite,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
var dbRead *sqlx.DB
|
||||||
|
if cfg.PostgreSQL.HostRead == "" {
|
||||||
|
dbRead = dbWrite
|
||||||
|
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
|
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
dbRead, err = dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortRead,
|
||||||
|
cfg.PostgreSQL.HostRead,
|
||||||
|
cfg.PostgreSQL.UserRead,
|
||||||
|
cfg.PostgreSQL.PasswordRead,
|
||||||
|
cfg.PostgreSQL.NameRead,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
var apiConnCon *dbUtils.APIConnectionController
|
var apiConnCon *dbUtils.APIConnectionController
|
||||||
if cfg.API.Explorer || mode == ModeCoordinator {
|
if cfg.API.Explorer || mode == ModeCoordinator {
|
||||||
@@ -91,7 +115,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
historyDB := historydb.NewHistoryDB(db, apiConnCon)
|
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
|
||||||
|
|
||||||
ethClient, err := ethclient.Dial(cfg.Web3.URL)
|
ethClient, err := ethclient.Dial(cfg.Web3.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -102,8 +126,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
var keyStore *ethKeystore.KeyStore
|
var keyStore *ethKeystore.KeyStore
|
||||||
if mode == ModeCoordinator {
|
if mode == ModeCoordinator {
|
||||||
ethCfg = eth.EthereumConfig{
|
ethCfg = eth.EthereumConfig{
|
||||||
CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit,
|
CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit,
|
||||||
GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv,
|
GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv,
|
||||||
}
|
}
|
||||||
|
|
||||||
scryptN := ethKeystore.StandardScryptN
|
scryptN := ethKeystore.StandardScryptN
|
||||||
@@ -115,6 +139,23 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
|
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
|
||||||
scryptN, scryptP)
|
scryptN, scryptP)
|
||||||
|
|
||||||
|
balance, err := ethClient.BalanceAt(context.TODO(), cfg.Coordinator.ForgerAddress, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
minForgeBalance := cfg.Coordinator.MinimumForgeAddressBalance
|
||||||
|
if minForgeBalance != nil && balance.Cmp(minForgeBalance) == -1 {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
|
"forger account balance is less than cfg.Coordinator.MinimumForgeAddressBalance: %v < %v",
|
||||||
|
balance.Int64(), minForgeBalance))
|
||||||
|
}
|
||||||
|
log.Infow("forger ethereum account balance",
|
||||||
|
"addr", cfg.Coordinator.ForgerAddress,
|
||||||
|
"balance", balance.Int64(),
|
||||||
|
"minForgeBalance", minForgeBalance.Int64(),
|
||||||
|
)
|
||||||
|
|
||||||
// Unlock Coordinator ForgerAddr in the keystore to make calls
|
// Unlock Coordinator ForgerAddr in the keystore to make calls
|
||||||
// to ForgeBatch in the smart contract
|
// to ForgeBatch in the smart contract
|
||||||
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
|
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
|
||||||
@@ -171,8 +212,12 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
return nil, tracerr.Wrap(fmt.Errorf("cfg.StateDB.Keep = %v < %v, which is unsafe",
|
return nil, tracerr.Wrap(fmt.Errorf("cfg.StateDB.Keep = %v < %v, which is unsafe",
|
||||||
cfg.StateDB.Keep, safeStateDBKeep))
|
cfg.StateDB.Keep, safeStateDBKeep))
|
||||||
}
|
}
|
||||||
stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, cfg.StateDB.Keep,
|
stateDB, err := statedb.NewStateDB(statedb.Config{
|
||||||
statedb.TypeSynchronizer, 32)
|
Path: cfg.StateDB.Path,
|
||||||
|
Keep: cfg.StateDB.Keep,
|
||||||
|
Type: statedb.TypeSynchronizer,
|
||||||
|
NLevels: statedb.MaxNLevels,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -186,19 +231,42 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
}
|
}
|
||||||
initSCVars := sync.SCVars()
|
initSCVars := sync.SCVars()
|
||||||
|
|
||||||
scConsts := synchronizer.SCConsts{
|
scConsts := common.SCConsts{
|
||||||
Rollup: *sync.RollupConstants(),
|
Rollup: *sync.RollupConstants(),
|
||||||
Auction: *sync.AuctionConstants(),
|
Auction: *sync.AuctionConstants(),
|
||||||
WDelayer: *sync.WDelayerConstants(),
|
WDelayer: *sync.WDelayerConstants(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hdbNodeCfg := historydb.NodeConfig{
|
||||||
|
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
|
||||||
|
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
|
||||||
|
}
|
||||||
|
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
hdbConsts := historydb.Constants{
|
||||||
|
SCConsts: common.SCConsts{
|
||||||
|
Rollup: scConsts.Rollup,
|
||||||
|
Auction: scConsts.Auction,
|
||||||
|
WDelayer: scConsts.WDelayer,
|
||||||
|
},
|
||||||
|
ChainID: chainIDU16,
|
||||||
|
HermezAddress: cfg.SmartContracts.Rollup,
|
||||||
|
}
|
||||||
|
if err := historyDB.SetConstants(&hdbConsts); err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stateAPIUpdater := api.NewStateAPIUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
|
||||||
|
|
||||||
var coord *coordinator.Coordinator
|
var coord *coordinator.Coordinator
|
||||||
var l2DB *l2db.L2DB
|
var l2DB *l2db.L2DB
|
||||||
if mode == ModeCoordinator {
|
if mode == ModeCoordinator {
|
||||||
l2DB = l2db.NewL2DB(
|
l2DB = l2db.NewL2DB(
|
||||||
db,
|
dbRead, dbWrite,
|
||||||
cfg.Coordinator.L2DB.SafetyPeriod,
|
cfg.Coordinator.L2DB.SafetyPeriod,
|
||||||
cfg.Coordinator.L2DB.MaxTxs,
|
cfg.Coordinator.L2DB.MaxTxs,
|
||||||
|
cfg.Coordinator.L2DB.MinFeeUSD,
|
||||||
cfg.Coordinator.L2DB.TTL.Duration,
|
cfg.Coordinator.L2DB.TTL.Duration,
|
||||||
apiConnCon,
|
apiConnCon,
|
||||||
)
|
)
|
||||||
@@ -240,9 +308,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs))
|
serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs))
|
||||||
for i, serverProofCfg := range cfg.Coordinator.ServerProofs {
|
for i, serverProofCfg := range cfg.Coordinator.ServerProofs {
|
||||||
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
|
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
|
||||||
@@ -256,7 +321,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
|
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
|
||||||
MaxL1Tx: common.RollupConstMaxL1Tx,
|
MaxL1Tx: common.RollupConstMaxL1Tx,
|
||||||
}
|
}
|
||||||
verifierIdx, err := scConsts.Rollup.FindVerifierIdx(
|
var verifierIdx int
|
||||||
|
if cfg.Coordinator.Debug.RollupVerifierIndex == nil {
|
||||||
|
verifierIdx, err = scConsts.Rollup.FindVerifierIdx(
|
||||||
cfg.Coordinator.Circuit.MaxTx,
|
cfg.Coordinator.Circuit.MaxTx,
|
||||||
cfg.Coordinator.Circuit.NLevels,
|
cfg.Coordinator.Circuit.NLevels,
|
||||||
)
|
)
|
||||||
@@ -264,6 +331,27 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
|
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
|
||||||
|
} else {
|
||||||
|
verifierIdx = *cfg.Coordinator.Debug.RollupVerifierIndex
|
||||||
|
log.Infow("Using debug verifier index from config", "verifierIdx", verifierIdx)
|
||||||
|
if verifierIdx >= len(scConsts.Rollup.Verifiers) {
|
||||||
|
return nil, tracerr.Wrap(
|
||||||
|
fmt.Errorf("verifierIdx (%v) >= "+
|
||||||
|
"len(scConsts.Rollup.Verifiers) (%v)",
|
||||||
|
verifierIdx, len(scConsts.Rollup.Verifiers)))
|
||||||
|
}
|
||||||
|
verifier := scConsts.Rollup.Verifiers[verifierIdx]
|
||||||
|
if verifier.MaxTx != cfg.Coordinator.Circuit.MaxTx ||
|
||||||
|
verifier.NLevels != cfg.Coordinator.Circuit.NLevels {
|
||||||
|
return nil, tracerr.Wrap(
|
||||||
|
fmt.Errorf("Circuit config and verifier params don't match. "+
|
||||||
|
"circuit.MaxTx = %v, circuit.NLevels = %v, "+
|
||||||
|
"verifier.MaxTx = %v, verifier.NLevels = %v",
|
||||||
|
cfg.Coordinator.Circuit.MaxTx, cfg.Coordinator.Circuit.NLevels,
|
||||||
|
verifier.MaxTx, verifier.NLevels,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
coord, err = coordinator.NewCoordinator(
|
coord, err = coordinator.NewCoordinator(
|
||||||
coordinator.Config{
|
coordinator.Config{
|
||||||
@@ -271,9 +359,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
|
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
|
||||||
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
|
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
|
||||||
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
|
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
|
||||||
|
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
|
||||||
|
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
|
||||||
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
||||||
|
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
|
||||||
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
|
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
|
||||||
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
|
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
|
||||||
|
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
|
||||||
|
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
|
||||||
|
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
|
||||||
|
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
|
||||||
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
|
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
|
||||||
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
|
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
|
||||||
Purger: coordinator.PurgerCfg{
|
Purger: coordinator.PurgerCfg{
|
||||||
@@ -282,6 +377,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
|
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
|
||||||
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
|
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
|
||||||
},
|
},
|
||||||
|
ForgeBatchGasCost: cfg.Coordinator.EthClient.ForgeBatchGasCost,
|
||||||
VerifierIdx: uint8(verifierIdx),
|
VerifierIdx: uint8(verifierIdx),
|
||||||
TxProcessorConfig: txProcessorCfg,
|
TxProcessorConfig: txProcessorCfg,
|
||||||
},
|
},
|
||||||
@@ -292,11 +388,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
serverProofs,
|
serverProofs,
|
||||||
client,
|
client,
|
||||||
&scConsts,
|
&scConsts,
|
||||||
&synchronizer.SCVariables{
|
initSCVars,
|
||||||
Rollup: *initSCVars.Rollup,
|
|
||||||
Auction: *initSCVars.Auction,
|
|
||||||
WDelayer: *initSCVars.WDelayer,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -304,6 +396,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
}
|
}
|
||||||
var nodeAPI *NodeAPI
|
var nodeAPI *NodeAPI
|
||||||
if cfg.API.Address != "" {
|
if cfg.API.Address != "" {
|
||||||
|
if cfg.Debug.GinDebugMode {
|
||||||
|
gin.SetMode(gin.DebugMode)
|
||||||
|
} else {
|
||||||
|
gin.SetMode(gin.ReleaseMode)
|
||||||
|
}
|
||||||
if cfg.API.UpdateMetricsInterval.Duration == 0 {
|
if cfg.API.UpdateMetricsInterval.Duration == 0 {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
|
return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
|
||||||
cfg.API.UpdateMetricsInterval.Duration))
|
cfg.API.UpdateMetricsInterval.Duration))
|
||||||
@@ -323,22 +420,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
coord, cfg.API.Explorer,
|
coord, cfg.API.Explorer,
|
||||||
server,
|
server,
|
||||||
historyDB,
|
historyDB,
|
||||||
stateDB,
|
|
||||||
l2DB,
|
l2DB,
|
||||||
&api.Config{
|
|
||||||
RollupConstants: scConsts.Rollup,
|
|
||||||
AuctionConstants: scConsts.Auction,
|
|
||||||
WDelayerConstants: scConsts.WDelayer,
|
|
||||||
ChainID: chainIDU16,
|
|
||||||
HermezAddress: cfg.SmartContracts.Rollup,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
|
|
||||||
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
|
|
||||||
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
|
|
||||||
}
|
}
|
||||||
var debugAPI *debugapi.DebugAPI
|
var debugAPI *debugapi.DebugAPI
|
||||||
if cfg.Debug.APIAddress != "" {
|
if cfg.Debug.APIAddress != "" {
|
||||||
@@ -351,6 +437,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
return &Node{
|
return &Node{
|
||||||
|
stateAPIUpdater: stateAPIUpdater,
|
||||||
nodeAPI: nodeAPI,
|
nodeAPI: nodeAPI,
|
||||||
debugAPI: debugAPI,
|
debugAPI: debugAPI,
|
||||||
priceUpdater: priceUpdater,
|
priceUpdater: priceUpdater,
|
||||||
@@ -358,12 +445,130 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
sync: sync,
|
sync: sync,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
mode: mode,
|
mode: mode,
|
||||||
sqlConn: db,
|
sqlConnRead: dbRead,
|
||||||
|
sqlConnWrite: dbWrite,
|
||||||
|
historyDB: historyDB,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIServer is a server that only runs the API
|
||||||
|
type APIServer struct {
|
||||||
|
nodeAPI *NodeAPI
|
||||||
|
mode Mode
|
||||||
|
ctx context.Context
|
||||||
|
wg sync.WaitGroup
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAPIServer creates a new APIServer
|
||||||
|
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
|
||||||
|
meddler.Debug = cfg.Debug.MeddlerLogs
|
||||||
|
// Stablish DB connection
|
||||||
|
dbWrite, err := dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortWrite,
|
||||||
|
cfg.PostgreSQL.HostWrite,
|
||||||
|
cfg.PostgreSQL.UserWrite,
|
||||||
|
cfg.PostgreSQL.PasswordWrite,
|
||||||
|
cfg.PostgreSQL.NameWrite,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
var dbRead *sqlx.DB
|
||||||
|
if cfg.PostgreSQL.HostRead == "" {
|
||||||
|
dbRead = dbWrite
|
||||||
|
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf(
|
||||||
|
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
dbRead, err = dbUtils.InitSQLDB(
|
||||||
|
cfg.PostgreSQL.PortRead,
|
||||||
|
cfg.PostgreSQL.HostRead,
|
||||||
|
cfg.PostgreSQL.UserRead,
|
||||||
|
cfg.PostgreSQL.PasswordRead,
|
||||||
|
cfg.PostgreSQL.NameRead,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
apiConnCon := dbUtils.NewAPICnnectionController(
|
||||||
|
cfg.API.MaxSQLConnections,
|
||||||
|
cfg.API.SQLConnectionTimeout.Duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
|
||||||
|
|
||||||
|
var l2DB *l2db.L2DB
|
||||||
|
if mode == ModeCoordinator {
|
||||||
|
l2DB = l2db.NewL2DB(
|
||||||
|
dbRead, dbWrite,
|
||||||
|
0,
|
||||||
|
cfg.Coordinator.L2DB.MaxTxs,
|
||||||
|
cfg.Coordinator.L2DB.MinFeeUSD,
|
||||||
|
0,
|
||||||
|
apiConnCon,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Debug.GinDebugMode {
|
||||||
|
gin.SetMode(gin.DebugMode)
|
||||||
|
} else {
|
||||||
|
gin.SetMode(gin.ReleaseMode)
|
||||||
|
}
|
||||||
|
server := gin.Default()
|
||||||
|
coord := false
|
||||||
|
if mode == ModeCoordinator {
|
||||||
|
coord = cfg.Coordinator.API.Coordinator
|
||||||
|
}
|
||||||
|
nodeAPI, err := NewNodeAPI(
|
||||||
|
cfg.API.Address,
|
||||||
|
coord, cfg.API.Explorer,
|
||||||
|
server,
|
||||||
|
historyDB,
|
||||||
|
l2DB,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
return &APIServer{
|
||||||
|
nodeAPI: nodeAPI,
|
||||||
|
mode: mode,
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the APIServer
|
||||||
|
func (s *APIServer) Start() {
|
||||||
|
log.Infow("Starting api server...", "mode", s.mode)
|
||||||
|
log.Info("Starting NodeAPI...")
|
||||||
|
s.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
log.Info("NodeAPI routine stopped")
|
||||||
|
s.wg.Done()
|
||||||
|
}()
|
||||||
|
if err := s.nodeAPI.Run(s.ctx); err != nil {
|
||||||
|
if s.ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Fatalw("NodeAPI.Run", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the APIServer
|
||||||
|
func (s *APIServer) Stop() {
|
||||||
|
log.Infow("Stopping NodeAPI...")
|
||||||
|
s.cancel()
|
||||||
|
s.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
// NodeAPI holds the node http API
|
// NodeAPI holds the node http API
|
||||||
type NodeAPI struct { //nolint:golint
|
type NodeAPI struct { //nolint:golint
|
||||||
api *api.API
|
api *api.API
|
||||||
@@ -383,9 +588,7 @@ func NewNodeAPI(
|
|||||||
coordinatorEndpoints, explorerEndpoints bool,
|
coordinatorEndpoints, explorerEndpoints bool,
|
||||||
server *gin.Engine,
|
server *gin.Engine,
|
||||||
hdb *historydb.HistoryDB,
|
hdb *historydb.HistoryDB,
|
||||||
sdb *statedb.StateDB,
|
|
||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
config *api.Config,
|
|
||||||
) (*NodeAPI, error) {
|
) (*NodeAPI, error) {
|
||||||
engine := gin.Default()
|
engine := gin.Default()
|
||||||
engine.NoRoute(handleNoRoute)
|
engine.NoRoute(handleNoRoute)
|
||||||
@@ -394,9 +597,7 @@ func NewNodeAPI(
|
|||||||
coordinatorEndpoints, explorerEndpoints,
|
coordinatorEndpoints, explorerEndpoints,
|
||||||
engine,
|
engine,
|
||||||
hdb,
|
hdb,
|
||||||
sdb,
|
|
||||||
l2db,
|
l2db,
|
||||||
config,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -412,16 +613,20 @@ func NewNodeAPI(
|
|||||||
// cancelation.
|
// cancelation.
|
||||||
func (a *NodeAPI) Run(ctx context.Context) error {
|
func (a *NodeAPI) Run(ctx context.Context) error {
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
Addr: a.addr,
|
|
||||||
Handler: a.engine,
|
Handler: a.engine,
|
||||||
// TODO: Figure out best parameters for production
|
// TODO: Figure out best parameters for production
|
||||||
ReadTimeout: 30 * time.Second, //nolint:gomnd
|
ReadTimeout: 30 * time.Second, //nolint:gomnd
|
||||||
WriteTimeout: 30 * time.Second, //nolint:gomnd
|
WriteTimeout: 30 * time.Second, //nolint:gomnd
|
||||||
MaxHeaderBytes: 1 << 20, //nolint:gomnd
|
MaxHeaderBytes: 1 << 20, //nolint:gomnd
|
||||||
}
|
}
|
||||||
go func() {
|
listener, err := net.Listen("tcp", a.addr)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
log.Infof("NodeAPI is ready at %v", a.addr)
|
log.Infof("NodeAPI is ready at %v", a.addr)
|
||||||
if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
go func() {
|
||||||
|
if err := server.Serve(listener); err != nil &&
|
||||||
|
tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||||
log.Fatalf("Listen: %s\n", err)
|
log.Fatalf("Listen: %s\n", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -437,60 +642,57 @@ func (a *NodeAPI) Run(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr,
|
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr,
|
||||||
batches []common.BatchData) {
|
batches []common.BatchData) error {
|
||||||
if n.mode == ModeCoordinator {
|
if n.mode == ModeCoordinator {
|
||||||
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
|
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
|
||||||
Stats: *stats,
|
Stats: *stats,
|
||||||
Vars: vars,
|
Vars: *vars,
|
||||||
Batches: batches,
|
Batches: batches,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if n.nodeAPI != nil {
|
n.stateAPIUpdater.SetSCVars(vars)
|
||||||
if vars.Rollup != nil {
|
|
||||||
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
|
|
||||||
}
|
|
||||||
if vars.Auction != nil {
|
|
||||||
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
|
|
||||||
}
|
|
||||||
if vars.WDelayer != nil {
|
|
||||||
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if stats.Synced() {
|
if stats.Synced() {
|
||||||
if err := n.nodeAPI.api.UpdateNetworkInfo(
|
if err := n.stateAPIUpdater.UpdateNetworkInfo(
|
||||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
common.BatchNum(stats.Eth.LastBatch),
|
common.BatchNum(stats.Eth.LastBatchNum),
|
||||||
stats.Sync.Auction.CurrentSlot.SlotNum,
|
stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
}
|
n.stateAPIUpdater.UpdateNetworkInfoBlock(
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
|
|
||||||
if n.mode == ModeCoordinator {
|
|
||||||
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
|
|
||||||
Stats: *stats,
|
|
||||||
Vars: vars,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if n.nodeAPI != nil {
|
|
||||||
vars := n.sync.SCVars()
|
|
||||||
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
|
|
||||||
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
|
|
||||||
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
|
|
||||||
n.nodeAPI.api.UpdateNetworkInfoBlock(
|
|
||||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
|
||||||
|
vars *common.SCVariables) error {
|
||||||
|
if n.mode == ModeCoordinator {
|
||||||
|
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
|
||||||
|
Stats: *stats,
|
||||||
|
Vars: *vars.AsPtr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
n.stateAPIUpdater.SetSCVars(vars.AsPtr())
|
||||||
|
n.stateAPIUpdater.UpdateNetworkInfoBlock(
|
||||||
|
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||||
|
)
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
|
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
|
||||||
// don't have to pass it around.
|
// don't have to pass it around.
|
||||||
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
|
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
|
||||||
blockData, discarded, err := n.sync.Sync2(ctx, lastBlock)
|
blockData, discarded, err := n.sync.Sync(ctx, lastBlock)
|
||||||
stats := n.sync.Stats()
|
stats := n.sync.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// case: error
|
// case: error
|
||||||
@@ -499,16 +701,20 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
|
|||||||
// case: reorg
|
// case: reorg
|
||||||
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
|
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
|
||||||
vars := n.sync.SCVars()
|
vars := n.sync.SCVars()
|
||||||
n.handleReorg(ctx, stats, vars)
|
if err := n.handleReorg(ctx, stats, vars); err != nil {
|
||||||
|
return nil, time.Duration(0), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
return nil, time.Duration(0), nil
|
return nil, time.Duration(0), nil
|
||||||
} else if blockData != nil {
|
} else if blockData != nil {
|
||||||
// case: new block
|
// case: new block
|
||||||
vars := synchronizer.SCVariablesPtr{
|
vars := common.SCVariablesPtr{
|
||||||
Rollup: blockData.Rollup.Vars,
|
Rollup: blockData.Rollup.Vars,
|
||||||
Auction: blockData.Auction.Vars,
|
Auction: blockData.Auction.Vars,
|
||||||
WDelayer: blockData.WDelayer.Vars,
|
WDelayer: blockData.WDelayer.Vars,
|
||||||
}
|
}
|
||||||
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
|
if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
|
||||||
|
return nil, time.Duration(0), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
return &blockData.Block, time.Duration(0), nil
|
return &blockData.Block, time.Duration(0), nil
|
||||||
} else {
|
} else {
|
||||||
// case: no block
|
// case: no block
|
||||||
@@ -527,7 +733,9 @@ func (n *Node) StartSynchronizer() {
|
|||||||
// the last synced one) is synchronized
|
// the last synced one) is synchronized
|
||||||
stats := n.sync.Stats()
|
stats := n.sync.Stats()
|
||||||
vars := n.sync.SCVars()
|
vars := n.sync.SCVars()
|
||||||
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{})
|
if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil {
|
||||||
|
log.Fatalw("Node.handleNewBlock", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -546,10 +754,16 @@ func (n *Node) StartSynchronizer() {
|
|||||||
if n.ctx.Err() != nil {
|
if n.ctx.Err() != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
|
||||||
|
log.Warnw("Synchronizer.Sync", "err", err)
|
||||||
|
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
|
||||||
|
log.Warnw("Synchronizer.Sync", "err", err)
|
||||||
|
} else {
|
||||||
log.Errorw("Synchronizer.Sync", "err", err)
|
log.Errorw("Synchronizer.Sync", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
@@ -607,15 +821,26 @@ func (n *Node) StartNodeAPI() {
|
|||||||
|
|
||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
// Do an initial update on startup
|
||||||
|
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.ctx.Done():
|
||||||
log.Info("API.UpdateMetrics loop done")
|
log.Info("ApiStateUpdater.UpdateMetrics loop done")
|
||||||
n.wg.Done()
|
n.wg.Done()
|
||||||
return
|
return
|
||||||
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
|
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
|
||||||
if err := n.nodeAPI.api.UpdateMetrics(); err != nil {
|
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
|
||||||
log.Errorw("API.UpdateMetrics", "err", err)
|
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -623,15 +848,26 @@ func (n *Node) StartNodeAPI() {
|
|||||||
|
|
||||||
n.wg.Add(1)
|
n.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
// Do an initial update on startup
|
||||||
|
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.ctx.Done():
|
||||||
log.Info("API.UpdateRecommendedFee loop done")
|
log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done")
|
||||||
n.wg.Done()
|
n.wg.Done()
|
||||||
return
|
return
|
||||||
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
|
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
|
||||||
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil {
|
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
|
||||||
log.Errorw("API.UpdateRecommendedFee", "err", err)
|
log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := n.stateAPIUpdater.Store(); err != nil {
|
||||||
|
log.Errorw("ApiStateUpdater.Store", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
|
|||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
historyDB := historydb.NewHistoryDB(db, nil)
|
historyDB := historydb.NewHistoryDB(db, db, nil)
|
||||||
// Clean DB
|
// Clean DB
|
||||||
test.WipeDB(historyDB.DB())
|
test.WipeDB(historyDB.DB())
|
||||||
// Populate DB
|
// Populate DB
|
||||||
|
|||||||
44
synchronizer/metrics.go
Normal file
44
synchronizer/metrics.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package synchronizer
|
||||||
|
|
||||||
|
import "github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
var (
|
||||||
|
metricReorgsCount = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "sync_reorgs",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricSyncedLastBlockNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "sync_synced_last_block_num",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricEthLastBlockNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "sync_eth_last_block_num",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricSyncedLastBatchNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "sync_synced_last_batch_num",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricEthLastBatchNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "sync_eth_last_batch_num",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
prometheus.MustRegister(metricReorgsCount)
|
||||||
|
prometheus.MustRegister(metricSyncedLastBlockNum)
|
||||||
|
prometheus.MustRegister(metricEthLastBlockNum)
|
||||||
|
prometheus.MustRegister(metricSyncedLastBatchNum)
|
||||||
|
prometheus.MustRegister(metricEthLastBatchNum)
|
||||||
|
}
|
||||||
@@ -18,6 +18,19 @@ import (
|
|||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// errStrUnknownBlock is the string returned by geth when querying an
|
||||||
|
// unknown block
|
||||||
|
errStrUnknownBlock = "unknown block"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrUnknownBlock is the error returned by the Synchronizer when a
|
||||||
|
// block is queried by hash but the ethereum node doesn't find it due
|
||||||
|
// to it being discarded from a reorg.
|
||||||
|
ErrUnknownBlock = fmt.Errorf("unknown block")
|
||||||
|
)
|
||||||
|
|
||||||
// Stats of the syncrhonizer
|
// Stats of the syncrhonizer
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
Eth struct {
|
Eth struct {
|
||||||
@@ -25,12 +38,12 @@ type Stats struct {
|
|||||||
Updated time.Time
|
Updated time.Time
|
||||||
FirstBlockNum int64
|
FirstBlockNum int64
|
||||||
LastBlock common.Block
|
LastBlock common.Block
|
||||||
LastBatch int64
|
LastBatchNum int64
|
||||||
}
|
}
|
||||||
Sync struct {
|
Sync struct {
|
||||||
Updated time.Time
|
Updated time.Time
|
||||||
LastBlock common.Block
|
LastBlock common.Block
|
||||||
LastBatch int64
|
LastBatch common.Batch
|
||||||
// LastL1BatchBlock is the last ethereum block in which an
|
// LastL1BatchBlock is the last ethereum block in which an
|
||||||
// l1Batch was forged
|
// l1Batch was forged
|
||||||
LastL1BatchBlock int64
|
LastL1BatchBlock int64
|
||||||
@@ -77,13 +90,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateSync updates the synchronizer stats
|
// UpdateSync updates the synchronizer stats
|
||||||
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
|
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
|
||||||
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
|
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
s.rw.Lock()
|
s.rw.Lock()
|
||||||
s.Sync.LastBlock = *lastBlock
|
s.Sync.LastBlock = *lastBlock
|
||||||
if lastBatch != nil {
|
if lastBatch != nil {
|
||||||
s.Sync.LastBatch = int64(*lastBatch)
|
s.Sync.LastBatch = *lastBatch
|
||||||
}
|
}
|
||||||
if lastL1BatchBlock != nil {
|
if lastL1BatchBlock != nil {
|
||||||
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
|
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
|
||||||
@@ -105,16 +118,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
|
|||||||
|
|
||||||
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
|
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
|
||||||
}
|
}
|
||||||
lastBatch, err := ethClient.RollupLastForgedBatch()
|
lastBatchNum, err := ethClient.RollupLastForgedBatch()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err))
|
||||||
}
|
}
|
||||||
s.rw.Lock()
|
s.rw.Lock()
|
||||||
s.Eth.Updated = now
|
s.Eth.Updated = now
|
||||||
s.Eth.LastBlock = *lastBlock
|
s.Eth.LastBlock = *lastBlock
|
||||||
s.Eth.LastBatch = lastBatch
|
s.Eth.LastBatchNum = lastBatchNum
|
||||||
s.rw.Unlock()
|
s.rw.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -139,6 +152,10 @@ func (s *StatsHolder) CopyStats() *Stats {
|
|||||||
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
|
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
|
||||||
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
|
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
|
||||||
}
|
}
|
||||||
|
if s.Sync.LastBatch.StateRoot != nil {
|
||||||
|
sCopy.Sync.LastBatch.StateRoot =
|
||||||
|
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
|
||||||
|
}
|
||||||
s.rw.RUnlock()
|
s.rw.RUnlock()
|
||||||
return &sCopy
|
return &sCopy
|
||||||
}
|
}
|
||||||
@@ -152,9 +169,9 @@ func (s *StatsHolder) blocksPerc() float64 {
|
|||||||
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
|
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
|
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
|
||||||
return float64(batchNum) * 100.0 /
|
return float64(batchNum) * 100.0 /
|
||||||
float64(s.Eth.LastBatch)
|
float64(s.Eth.LastBatchNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartBlockNums sets the first block used to start tracking the smart
|
// StartBlockNums sets the first block used to start tracking the smart
|
||||||
@@ -166,26 +183,26 @@ type StartBlockNums struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SCVariables joins all the smart contract variables in a single struct
|
// SCVariables joins all the smart contract variables in a single struct
|
||||||
type SCVariables struct {
|
// type SCVariables struct {
|
||||||
Rollup common.RollupVariables `validate:"required"`
|
// Rollup common.RollupVariables `validate:"required"`
|
||||||
Auction common.AuctionVariables `validate:"required"`
|
// Auction common.AuctionVariables `validate:"required"`
|
||||||
WDelayer common.WDelayerVariables `validate:"required"`
|
// WDelayer common.WDelayerVariables `validate:"required"`
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
// SCVariablesPtr joins all the smart contract variables as pointers in a single
|
// // SCVariablesPtr joins all the smart contract variables as pointers in a single
|
||||||
// struct
|
// // struct
|
||||||
type SCVariablesPtr struct {
|
// type SCVariablesPtr struct {
|
||||||
Rollup *common.RollupVariables `validate:"required"`
|
// Rollup *common.RollupVariables `validate:"required"`
|
||||||
Auction *common.AuctionVariables `validate:"required"`
|
// Auction *common.AuctionVariables `validate:"required"`
|
||||||
WDelayer *common.WDelayerVariables `validate:"required"`
|
// WDelayer *common.WDelayerVariables `validate:"required"`
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
// SCConsts joins all the smart contract constants in a single struct
|
// // SCConsts joins all the smart contract constants in a single struct
|
||||||
type SCConsts struct {
|
// type SCConsts struct {
|
||||||
Rollup common.RollupConstants
|
// Rollup common.RollupConstants
|
||||||
Auction common.AuctionConstants
|
// Auction common.AuctionConstants
|
||||||
WDelayer common.WDelayerConstants
|
// WDelayer common.WDelayerConstants
|
||||||
}
|
// }
|
||||||
|
|
||||||
// Config is the Synchronizer configuration
|
// Config is the Synchronizer configuration
|
||||||
type Config struct {
|
type Config struct {
|
||||||
@@ -196,14 +213,15 @@ type Config struct {
|
|||||||
// Synchronizer implements the Synchronizer type
|
// Synchronizer implements the Synchronizer type
|
||||||
type Synchronizer struct {
|
type Synchronizer struct {
|
||||||
ethClient eth.ClientInterface
|
ethClient eth.ClientInterface
|
||||||
consts SCConsts
|
consts common.SCConsts
|
||||||
historyDB *historydb.HistoryDB
|
historyDB *historydb.HistoryDB
|
||||||
stateDB *statedb.StateDB
|
stateDB *statedb.StateDB
|
||||||
cfg Config
|
cfg Config
|
||||||
initVars SCVariables
|
initVars common.SCVariables
|
||||||
startBlockNum int64
|
startBlockNum int64
|
||||||
vars SCVariables
|
vars common.SCVariables
|
||||||
stats *StatsHolder
|
stats *StatsHolder
|
||||||
|
resetStateFailed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSynchronizer creates a new Synchronizer
|
// NewSynchronizer creates a new Synchronizer
|
||||||
@@ -224,7 +242,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
|
|||||||
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
|
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
|
||||||
err))
|
err))
|
||||||
}
|
}
|
||||||
consts := SCConsts{
|
consts := common.SCConsts{
|
||||||
Rollup: *rollupConstants,
|
Rollup: *rollupConstants,
|
||||||
Auction: *auctionConstants,
|
Auction: *auctionConstants,
|
||||||
WDelayer: *wDelayerConstants,
|
WDelayer: *wDelayerConstants,
|
||||||
@@ -289,11 +307,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SCVars returns a copy of the Smart Contract Variables
|
// SCVars returns a copy of the Smart Contract Variables
|
||||||
func (s *Synchronizer) SCVars() SCVariablesPtr {
|
func (s *Synchronizer) SCVars() *common.SCVariables {
|
||||||
return SCVariablesPtr{
|
return &common.SCVariables{
|
||||||
Rollup: s.vars.Rollup.Copy(),
|
Rollup: *s.vars.Rollup.Copy(),
|
||||||
Auction: s.vars.Auction.Copy(),
|
Auction: *s.vars.Auction.Copy(),
|
||||||
WDelayer: s.vars.WDelayer.Copy(),
|
WDelayer: *s.vars.WDelayer.Copy(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -328,23 +346,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// firstBatchBlockNum is the blockNum of first batch in that block, if any
|
// updateCurrentSlot updates the slot with information of the current slot.
|
||||||
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
|
// The information abouth which coordinator is allowed to forge is only updated
|
||||||
slot := common.Slot{
|
// when we are Synced.
|
||||||
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
|
// hasBatch is true when the last synced block contained at least one batch.
|
||||||
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
|
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
|
||||||
}
|
|
||||||
// We want the next block because the current one is already mined
|
// We want the next block because the current one is already mined
|
||||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||||
slotNum := s.consts.Auction.SlotNum(blockNum)
|
slotNum := s.consts.Auction.SlotNum(blockNum)
|
||||||
|
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
|
||||||
if reset {
|
if reset {
|
||||||
|
// Using this query only to know if there
|
||||||
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
||||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
firstBatchBlockNum = nil
|
hasBatch = false
|
||||||
} else {
|
} else {
|
||||||
firstBatchBlockNum = &dbFirstBatchBlockNum
|
hasBatch = true
|
||||||
|
firstBatchBlockNum = dbFirstBatchBlockNum
|
||||||
}
|
}
|
||||||
slot.ForgerCommitment = false
|
slot.ForgerCommitment = false
|
||||||
} else if slotNum > slot.SlotNum {
|
} else if slotNum > slot.SlotNum {
|
||||||
@@ -355,11 +375,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
|||||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||||
// If Synced, update the current coordinator
|
// If Synced, update the current coordinator
|
||||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
if err := s.setSlotCoordinator(slot); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if firstBatchBlockNum != nil &&
|
if hasBatch &&
|
||||||
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
|
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
|
||||||
int64(s.vars.Auction.SlotDeadline) {
|
int64(s.vars.Auction.SlotDeadline) {
|
||||||
slot.ForgerCommitment = true
|
slot.ForgerCommitment = true
|
||||||
}
|
}
|
||||||
@@ -368,57 +388,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
|||||||
// BEGIN SANITY CHECK
|
// BEGIN SANITY CHECK
|
||||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
|
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
|
||||||
}
|
}
|
||||||
if !canForge {
|
if !canForge {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||||
"differs from smart contract: %+v", slot))
|
"differs from smart contract: %+v", slot))
|
||||||
}
|
}
|
||||||
// END SANITY CHECK
|
// END SANITY CHECK
|
||||||
}
|
}
|
||||||
return &slot, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
|
// updateNextSlot updates the slot with information of the next slot.
|
||||||
|
// The information abouth which coordinator is allowed to forge is only updated
|
||||||
|
// when we are Synced.
|
||||||
|
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
|
||||||
// We want the next block because the current one is already mined
|
// We want the next block because the current one is already mined
|
||||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||||
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
|
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
|
||||||
slot := common.Slot{
|
slot.SlotNum = slotNum
|
||||||
SlotNum: slotNum,
|
slot.ForgerCommitment = false
|
||||||
ForgerCommitment: false,
|
|
||||||
}
|
|
||||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||||
// If Synced, update the current coordinator
|
// If Synced, update the current coordinator
|
||||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
if err := s.setSlotCoordinator(slot); err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Remove this SANITY CHECK once this code is tested enough
|
// TODO: Remove this SANITY CHECK once this code is tested enough
|
||||||
// BEGIN SANITY CHECK
|
// BEGIN SANITY CHECK
|
||||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
|
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
|
||||||
}
|
}
|
||||||
if !canForge {
|
if !canForge {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||||
"differs from smart contract: %+v", slot))
|
"differs from smart contract: %+v", slot))
|
||||||
}
|
}
|
||||||
// END SANITY CHECK
|
// END SANITY CHECK
|
||||||
}
|
}
|
||||||
return &slot, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
|
// updateCurrentNextSlotIfSync updates the current and next slot. Information
|
||||||
current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
|
// about forger address that is allowed to forge is only updated if we are
|
||||||
if err != nil {
|
// Synced.
|
||||||
|
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
|
||||||
|
current := s.stats.Sync.Auction.CurrentSlot
|
||||||
|
next := s.stats.Sync.Auction.NextSlot
|
||||||
|
if err := s.updateCurrentSlot(¤t, reset, hasBatch); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
next, err := s.getNextSlot()
|
if err := s.updateNextSlot(&next); err != nil {
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
s.stats.UpdateCurrentNextSlot(current, next)
|
s.stats.UpdateCurrentNextSlot(¤t, &next)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -445,8 +469,10 @@ func (s *Synchronizer) init() error {
|
|||||||
lastBlock = lastSavedBlock
|
lastBlock = lastSavedBlock
|
||||||
}
|
}
|
||||||
if err := s.resetState(lastBlock); err != nil {
|
if err := s.resetState(lastBlock); err != nil {
|
||||||
|
s.resetStateFailed = true
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
s.resetStateFailed = false
|
||||||
|
|
||||||
log.Infow("Sync init block",
|
log.Infow("Sync init block",
|
||||||
"syncLastBlock", s.stats.Sync.LastBlock,
|
"syncLastBlock", s.stats.Sync.LastBlock,
|
||||||
@@ -455,23 +481,44 @@ func (s *Synchronizer) init() error {
|
|||||||
"ethLastBlock", s.stats.Eth.LastBlock,
|
"ethLastBlock", s.stats.Eth.LastBlock,
|
||||||
)
|
)
|
||||||
log.Infow("Sync init batch",
|
log.Infow("Sync init batch",
|
||||||
"syncLastBatch", s.stats.Sync.LastBatch,
|
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
|
||||||
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
|
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
|
||||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||||
)
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync2 attems to synchronize an ethereum block starting from lastSavedBlock.
|
func (s *Synchronizer) resetIntermediateState() error {
|
||||||
|
lastBlock, err := s.historyDB.GetLastBlock()
|
||||||
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
|
lastBlock = &common.Block{}
|
||||||
|
} else if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBlock: %w", err))
|
||||||
|
}
|
||||||
|
if err := s.resetState(lastBlock); err != nil {
|
||||||
|
s.resetStateFailed = true
|
||||||
|
return tracerr.Wrap(fmt.Errorf("resetState at block %v: %w", lastBlock.Num, err))
|
||||||
|
}
|
||||||
|
s.resetStateFailed = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync attems to synchronize an ethereum block starting from lastSavedBlock.
|
||||||
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
|
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
|
||||||
// If a block is synched, it will be returned and also stored in the DB. If a
|
// If a block is synched, it will be returned and also stored in the DB. If a
|
||||||
// reorg is detected, the number of discarded blocks will be returned and no
|
// reorg is detected, the number of discarded blocks will be returned and no
|
||||||
// synchronization will be made.
|
// synchronization will be made.
|
||||||
// TODO: Be smart about locking: only lock during the read/write operations
|
// TODO: Be smart about locking: only lock during the read/write operations
|
||||||
func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block) (*common.BlockData, *int64, error) {
|
func (s *Synchronizer) Sync(ctx context.Context,
|
||||||
|
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
|
||||||
|
if s.resetStateFailed {
|
||||||
|
if err := s.resetIntermediateState(); err != nil {
|
||||||
|
return nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var nextBlockNum int64 // next block number to sync
|
var nextBlockNum int64 // next block number to sync
|
||||||
if lastSavedBlock == nil {
|
if lastSavedBlock == nil {
|
||||||
var err error
|
|
||||||
// Get lastSavedBlock from History DB
|
// Get lastSavedBlock from History DB
|
||||||
lastSavedBlock, err = s.historyDB.GetLastBlock()
|
lastSavedBlock, err = s.historyDB.GetLastBlock()
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
@@ -497,7 +544,7 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
|||||||
if tracerr.Unwrap(err) == ethereum.NotFound {
|
if tracerr.Unwrap(err) == ethereum.NotFound {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
|
||||||
}
|
}
|
||||||
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
|
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
|
||||||
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
|
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
|
||||||
@@ -527,6 +574,20 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// If there was an error during sync, reset to the last block
|
||||||
|
// in the historyDB because the historyDB is written last in
|
||||||
|
// the Sync method and is the source of consistency. This
|
||||||
|
// allows reseting the stateDB in the case a batch was
|
||||||
|
// processed but the historyDB block was not committed due to an
|
||||||
|
// error.
|
||||||
|
if err != nil {
|
||||||
|
if err2 := s.resetIntermediateState(); err2 != nil {
|
||||||
|
log.Errorw("sync revert", "err", err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Get data from the rollup contract
|
// Get data from the rollup contract
|
||||||
rollupData, err := s.rollupSync(ethBlock)
|
rollupData, err := s.rollupSync(ethBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -564,14 +625,14 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Group all the block data into the structs to save into HistoryDB
|
// Group all the block data into the structs to save into HistoryDB
|
||||||
blockData := common.BlockData{
|
blockData = &common.BlockData{
|
||||||
Block: *ethBlock,
|
Block: *ethBlock,
|
||||||
Rollup: *rollupData,
|
Rollup: *rollupData,
|
||||||
Auction: *auctionData,
|
Auction: *auctionData,
|
||||||
WDelayer: *wDelayerData,
|
WDelayer: *wDelayerData,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.historyDB.AddBlockSCData(&blockData)
|
err = s.historyDB.AddBlockSCData(blockData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -589,31 +650,35 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.stats.UpdateSync(ethBlock,
|
s.stats.UpdateSync(ethBlock,
|
||||||
&rollupData.Batches[batchesLen-1].Batch.BatchNum,
|
&rollupData.Batches[batchesLen-1].Batch,
|
||||||
lastL1BatchBlock, lastForgeL1TxsNum)
|
lastL1BatchBlock, lastForgeL1TxsNum)
|
||||||
}
|
}
|
||||||
var firstBatchBlockNum *int64
|
hasBatch := false
|
||||||
if len(rollupData.Batches) > 0 {
|
if len(rollupData.Batches) > 0 {
|
||||||
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
|
hasBatch = true
|
||||||
}
|
}
|
||||||
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
|
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, batchData := range rollupData.Batches {
|
||||||
|
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
|
||||||
|
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
|
||||||
|
log.Debugw("Synced batch",
|
||||||
|
"syncLastBatch", batchData.Batch.BatchNum,
|
||||||
|
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
||||||
|
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
|
||||||
|
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
|
||||||
log.Debugw("Synced block",
|
log.Debugw("Synced block",
|
||||||
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
|
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
|
||||||
"syncBlocksPerc", s.stats.blocksPerc(),
|
"syncBlocksPerc", s.stats.blocksPerc(),
|
||||||
"ethLastBlockNum", s.stats.Eth.LastBlock.Num,
|
"ethLastBlockNum", s.stats.Eth.LastBlock.Num,
|
||||||
)
|
)
|
||||||
for _, batchData := range rollupData.Batches {
|
|
||||||
log.Debugw("Synced batch",
|
|
||||||
"syncLastBatch", batchData.Batch.BatchNum,
|
|
||||||
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
|
|
||||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &blockData, nil, nil
|
return blockData, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reorg manages a reorg, updating History and State DB as needed. Keeps
|
// reorg manages a reorg, updating History and State DB as needed. Keeps
|
||||||
@@ -645,36 +710,37 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
|
|||||||
log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1)
|
log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1)
|
||||||
|
|
||||||
// Set History DB and State DB to the correct state
|
// Set History DB and State DB to the correct state
|
||||||
err := s.historyDB.Reorg(block.Num)
|
if err := s.historyDB.Reorg(block.Num); err != nil {
|
||||||
if err != nil {
|
|
||||||
return 0, tracerr.Wrap(err)
|
return 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.resetState(block); err != nil {
|
if err := s.resetState(block); err != nil {
|
||||||
|
s.resetStateFailed = true
|
||||||
return 0, tracerr.Wrap(err)
|
return 0, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
s.resetStateFailed = false
|
||||||
|
|
||||||
return block.Num, nil
|
return block.Num, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitialVariables(ethClient eth.ClientInterface,
|
func getInitialVariables(ethClient eth.ClientInterface,
|
||||||
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
|
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
|
||||||
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
|
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
|
||||||
}
|
}
|
||||||
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
|
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
|
||||||
}
|
}
|
||||||
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
|
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
|
||||||
}
|
}
|
||||||
rollupVars := rollupInit.RollupVariables()
|
rollupVars := rollupInit.RollupVariables()
|
||||||
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
|
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
|
||||||
wDelayerVars := wDelayerInit.WDelayerVariables()
|
wDelayerVars := wDelayerInit.WDelayerVariables()
|
||||||
return &SCVariables{
|
return &common.SCVariables{
|
||||||
Rollup: *rollupVars,
|
Rollup: *rollupVars,
|
||||||
Auction: *auctionVars,
|
Auction: *auctionVars,
|
||||||
WDelayer: *wDelayerVars,
|
WDelayer: *wDelayerVars,
|
||||||
@@ -714,12 +780,17 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
|||||||
s.vars.WDelayer = *wDelayer
|
s.vars.WDelayer = *wDelayer
|
||||||
}
|
}
|
||||||
|
|
||||||
batchNum, err := s.historyDB.GetLastBatchNum()
|
batch, err := s.historyDB.GetLastBatch()
|
||||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
||||||
}
|
}
|
||||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||||
batchNum = 0
|
batch = &common.Batch{}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.stateDB.Reset(batch.BatchNum)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
lastL1BatchBlockNum, err := s.historyDB.GetLastL1BatchBlockNum()
|
lastL1BatchBlockNum, err := s.historyDB.GetLastL1BatchBlockNum()
|
||||||
@@ -739,14 +810,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
|||||||
lastForgeL1TxsNum = &n
|
lastForgeL1TxsNum = &n
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.stateDB.Reset(batchNum)
|
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
|
||||||
|
|
||||||
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -761,19 +827,16 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
|||||||
|
|
||||||
// Get rollup events in the block, and make sure the block hash matches
|
// Get rollup events in the block, and make sure the block hash matches
|
||||||
// the expected one.
|
// the expected one.
|
||||||
rollupEvents, blockHash, err := s.ethClient.RollupEventsByBlock(blockNum)
|
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, ðBlock.Hash)
|
||||||
if err != nil {
|
if err != nil && err.Error() == errStrUnknownBlock {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(ErrUnknownBlock)
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
|
||||||
}
|
}
|
||||||
// No events in this block
|
// No events in this block
|
||||||
if blockHash == nil {
|
if rollupEvents == nil {
|
||||||
return &rollupData, nil
|
return &rollupData, nil
|
||||||
}
|
}
|
||||||
if *blockHash != ethBlock.Hash {
|
|
||||||
log.Errorw("Block hash mismatch in Rollup events", "expected", ethBlock.Hash.String(),
|
|
||||||
"got", blockHash.String())
|
|
||||||
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
var nextForgeL1TxsNum int64 // forgeL1TxsNum for the next L1Batch
|
var nextForgeL1TxsNum int64 // forgeL1TxsNum for the next L1Batch
|
||||||
nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum()
|
nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum()
|
||||||
@@ -801,7 +864,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
|||||||
forgeBatchArgs, sender, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash,
|
forgeBatchArgs, sender, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash,
|
||||||
evtForgeBatch.L1UserTxsLen)
|
evtForgeBatch.L1UserTxsLen)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(fmt.Errorf("RollupForgeBatchArgs: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
batchNum := common.BatchNum(evtForgeBatch.BatchNum)
|
batchNum := common.BatchNum(evtForgeBatch.BatchNum)
|
||||||
@@ -884,6 +947,16 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
if s.stateDB.CurrentBatch() != batchNum {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
|
||||||
|
"evtForgeBatch.BatchNum = (%v)",
|
||||||
|
s.stateDB.CurrentBatch(), batchNum))
|
||||||
|
}
|
||||||
|
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
|
||||||
|
"forgeBatchArgs.NewStRoot (%v)",
|
||||||
|
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
|
||||||
|
}
|
||||||
|
|
||||||
// Transform processed PoolL2 txs to L2 and store in BatchData
|
// Transform processed PoolL2 txs to L2 and store in BatchData
|
||||||
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
||||||
@@ -924,6 +997,19 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
|||||||
}
|
}
|
||||||
batchData.CreatedAccounts = processTxsOut.CreatedAccounts
|
batchData.CreatedAccounts = processTxsOut.CreatedAccounts
|
||||||
|
|
||||||
|
batchData.UpdatedAccounts = make([]common.AccountUpdate, 0,
|
||||||
|
len(processTxsOut.UpdatedAccounts))
|
||||||
|
for _, acc := range processTxsOut.UpdatedAccounts {
|
||||||
|
batchData.UpdatedAccounts = append(batchData.UpdatedAccounts,
|
||||||
|
common.AccountUpdate{
|
||||||
|
EthBlockNum: blockNum,
|
||||||
|
BatchNum: batchNum,
|
||||||
|
Idx: acc.Idx,
|
||||||
|
Nonce: acc.Nonce,
|
||||||
|
Balance: acc.Balance,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
slotNum := int64(0)
|
slotNum := int64(0)
|
||||||
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
|
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
|
||||||
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
|
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
|
||||||
@@ -1066,19 +1152,16 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
|
|||||||
var auctionData = common.NewAuctionData()
|
var auctionData = common.NewAuctionData()
|
||||||
|
|
||||||
// Get auction events in the block
|
// Get auction events in the block
|
||||||
auctionEvents, blockHash, err := s.ethClient.AuctionEventsByBlock(blockNum)
|
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, ðBlock.Hash)
|
||||||
if err != nil {
|
if err != nil && err.Error() == errStrUnknownBlock {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(ErrUnknownBlock)
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
|
||||||
}
|
}
|
||||||
// No events in this block
|
// No events in this block
|
||||||
if blockHash == nil {
|
if auctionEvents == nil {
|
||||||
return &auctionData, nil
|
return &auctionData, nil
|
||||||
}
|
}
|
||||||
if *blockHash != ethBlock.Hash {
|
|
||||||
log.Errorw("Block hash mismatch in Auction events", "expected", ethBlock.Hash.String(),
|
|
||||||
"got", blockHash.String())
|
|
||||||
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get bids
|
// Get bids
|
||||||
for _, evt := range auctionEvents.NewBid {
|
for _, evt := range auctionEvents.NewBid {
|
||||||
@@ -1168,19 +1251,16 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
|
|||||||
wDelayerData := common.NewWDelayerData()
|
wDelayerData := common.NewWDelayerData()
|
||||||
|
|
||||||
// Get wDelayer events in the block
|
// Get wDelayer events in the block
|
||||||
wDelayerEvents, blockHash, err := s.ethClient.WDelayerEventsByBlock(blockNum)
|
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, ðBlock.Hash)
|
||||||
if err != nil {
|
if err != nil && err.Error() == errStrUnknownBlock {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(ErrUnknownBlock)
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
|
||||||
}
|
}
|
||||||
// No events in this block
|
// No events in this block
|
||||||
if blockHash == nil {
|
if wDelayerEvents == nil {
|
||||||
return &wDelayerData, nil
|
return &wDelayerData, nil
|
||||||
}
|
}
|
||||||
if *blockHash != ethBlock.Hash {
|
|
||||||
log.Errorw("Block hash mismatch in WDelayer events", "expected", ethBlock.Hash.String(),
|
|
||||||
"got", blockHash.String())
|
|
||||||
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, evt := range wDelayerEvents.Deposit {
|
for _, evt := range wDelayerEvents.Deposit {
|
||||||
wDelayerData.Deposits = append(wDelayerData.Deposits, common.WDelayerTransfer{
|
wDelayerData.Deposits = append(wDelayerData.Deposits, common.WDelayerTransfer{
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
"github.com/hermeznetwork/hermez-node/eth"
|
"github.com/hermeznetwork/hermez-node/eth"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
|
||||||
"github.com/hermeznetwork/hermez-node/test"
|
"github.com/hermeznetwork/hermez-node/test"
|
||||||
"github.com/hermeznetwork/hermez-node/test/til"
|
"github.com/hermeznetwork/hermez-node/test/til"
|
||||||
"github.com/jinzhu/copier"
|
"github.com/jinzhu/copier"
|
||||||
@@ -172,6 +171,8 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
|
|||||||
*exit = syncBatch.ExitTree[j]
|
*exit = syncBatch.ExitTree[j]
|
||||||
}
|
}
|
||||||
assert.Equal(t, batch.Batch, syncBatch.Batch)
|
assert.Equal(t, batch.Batch, syncBatch.Batch)
|
||||||
|
// Ignore updated accounts
|
||||||
|
syncBatch.UpdatedAccounts = nil
|
||||||
assert.Equal(t, batch, syncBatch)
|
assert.Equal(t, batch, syncBatch)
|
||||||
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
|
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
|
||||||
|
|
||||||
@@ -307,20 +308,28 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
deleteme = append(deleteme, dir)
|
deleteme = append(deleteme, dir)
|
||||||
|
|
||||||
stateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Init History DB
|
// Init History DB
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
historyDB := historydb.NewHistoryDB(db, nil)
|
historyDB := historydb.NewHistoryDB(db, db, nil)
|
||||||
// Clear DB
|
// Clear DB
|
||||||
test.WipeDB(historyDB.DB())
|
test.WipeDB(historyDB.DB())
|
||||||
|
|
||||||
return stateDB, historyDB
|
return stateDB, historyDB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newBigInt(s string) *big.Int {
|
||||||
|
v, ok := new(big.Int).SetString(s, 10)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Errorf("Can't set big.Int from %s", s))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
func TestSyncGeneral(t *testing.T) {
|
func TestSyncGeneral(t *testing.T) {
|
||||||
//
|
//
|
||||||
// Setup
|
// Setup
|
||||||
@@ -339,7 +348,6 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
s, err := NewSynchronizer(client, historyDB, stateDB, Config{
|
s, err := NewSynchronizer(client, historyDB, stateDB, Config{
|
||||||
StatsRefreshPeriod: 0 * time.Second,
|
StatsRefreshPeriod: 0 * time.Second,
|
||||||
})
|
})
|
||||||
log.Error(err)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@@ -351,7 +359,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, false, stats.Synced())
|
assert.Equal(t, false, stats.Synced())
|
||||||
|
|
||||||
// Test Sync for rollup genesis block
|
// Test Sync for rollup genesis block
|
||||||
syncBlock, discards, err := s.Sync2(ctx, nil)
|
syncBlock, discards, err := s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
require.NotNil(t, syncBlock)
|
require.NotNil(t, syncBlock)
|
||||||
@@ -364,9 +372,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
|
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
|
||||||
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
|
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
|
||||||
vars := s.SCVars()
|
vars := s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
|
|
||||||
dbBlocks, err := s.historyDB.GetAllBlocks()
|
dbBlocks, err := s.historyDB.GetAllBlocks()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -374,7 +382,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, int64(1), dbBlocks[1].Num)
|
assert.Equal(t, int64(1), dbBlocks[1].Num)
|
||||||
|
|
||||||
// Sync again and expect no new blocks
|
// Sync again and expect no new blocks
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
require.Nil(t, syncBlock)
|
require.Nil(t, syncBlock)
|
||||||
@@ -434,12 +442,22 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
||||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
||||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
|
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
|
||||||
|
// Set StateRoots for batches manually (til doesn't set it)
|
||||||
|
blocks[i].Rollup.Batches[0].Batch.StateRoot =
|
||||||
|
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
|
||||||
|
blocks[i].Rollup.Batches[1].Batch.StateRoot =
|
||||||
|
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
|
||||||
// blocks 1 (blockNum=3)
|
// blocks 1 (blockNum=3)
|
||||||
i = 1
|
i = 1
|
||||||
require.Equal(t, 3, int(blocks[i].Block.Num))
|
require.Equal(t, 3, int(blocks[i].Block.Num))
|
||||||
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
||||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
||||||
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
|
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
|
||||||
|
// Set StateRoots for batches manually (til doesn't set it)
|
||||||
|
blocks[i].Rollup.Batches[0].Batch.StateRoot =
|
||||||
|
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
|
||||||
|
blocks[i].Rollup.Batches[1].Batch.StateRoot =
|
||||||
|
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
|
||||||
|
|
||||||
// Generate extra required data
|
// Generate extra required data
|
||||||
ethAddTokens(blocks, client)
|
ethAddTokens(blocks, client)
|
||||||
@@ -461,7 +479,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
|
|
||||||
// Block 2
|
// Block 2
|
||||||
|
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
require.NotNil(t, syncBlock)
|
require.NotNil(t, syncBlock)
|
||||||
@@ -478,7 +496,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
|
|
||||||
// Block 3
|
// Block 3
|
||||||
|
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
@@ -502,7 +520,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
client.CtlMineBlock()
|
client.CtlMineBlock()
|
||||||
|
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
require.NotNil(t, syncBlock)
|
require.NotNil(t, syncBlock)
|
||||||
@@ -515,9 +533,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
|
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
|
||||||
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
|
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
|
||||||
vars = s.SCVars()
|
vars = s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
|
|
||||||
dbExits, err := s.historyDB.GetAllExits()
|
dbExits, err := s.historyDB.GetAllExits()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -553,7 +571,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
|
|
||||||
client.CtlMineBlock()
|
client.CtlMineBlock()
|
||||||
|
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
require.NotNil(t, syncBlock)
|
require.NotNil(t, syncBlock)
|
||||||
@@ -614,6 +632,12 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
blocks, err = tc.GenerateBlocks(set2)
|
blocks, err = tc.GenerateBlocks(set2)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Set StateRoots for batches manually (til doesn't set it)
|
||||||
|
blocks[0].Rollup.Batches[0].Batch.StateRoot =
|
||||||
|
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
|
||||||
|
blocks[0].Rollup.Batches[1].Batch.StateRoot =
|
||||||
|
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
client.CtlRollback()
|
client.CtlRollback()
|
||||||
}
|
}
|
||||||
@@ -632,7 +656,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// First sync detects the reorg and discards 4 blocks
|
// First sync detects the reorg and discards 4 blocks
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expetedDiscards := int64(4)
|
expetedDiscards := int64(4)
|
||||||
require.Equal(t, &expetedDiscards, discards)
|
require.Equal(t, &expetedDiscards, discards)
|
||||||
@@ -641,9 +665,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
assert.Equal(t, false, stats.Synced())
|
assert.Equal(t, false, stats.Synced())
|
||||||
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
|
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
|
||||||
vars = s.SCVars()
|
vars = s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
|
|
||||||
// At this point, the DB only has data up to block 1
|
// At this point, the DB only has data up to block 1
|
||||||
dbBlock, err := s.historyDB.GetLastBlock()
|
dbBlock, err := s.historyDB.GetLastBlock()
|
||||||
@@ -660,7 +684,7 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
|
|
||||||
// Sync blocks 2-6
|
// Sync blocks 2-6
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
syncBlock, discards, err = s.Sync2(ctx, nil)
|
syncBlock, discards, err = s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
require.NotNil(t, syncBlock)
|
require.NotNil(t, syncBlock)
|
||||||
@@ -680,9 +704,9 @@ func TestSyncGeneral(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
vars = s.SCVars()
|
vars = s.SCVars()
|
||||||
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup)
|
assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
|
||||||
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction)
|
assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
|
||||||
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer)
|
assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbBlock, err = s.historyDB.GetLastBlock()
|
dbBlock, err = s.historyDB.GetLastBlock()
|
||||||
@@ -783,7 +807,7 @@ func TestSyncForgerCommitment(t *testing.T) {
|
|||||||
|
|
||||||
// be in sync
|
// be in sync
|
||||||
for {
|
for {
|
||||||
syncBlock, discards, err := s.Sync2(ctx, nil)
|
syncBlock, discards, err := s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
if syncBlock == nil {
|
if syncBlock == nil {
|
||||||
@@ -802,7 +826,7 @@ func TestSyncForgerCommitment(t *testing.T) {
|
|||||||
err = client.CtlAddBlocks([]common.BlockData{block})
|
err = client.CtlAddBlocks([]common.BlockData{block})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
syncBlock, discards, err := s.Sync2(ctx, nil)
|
syncBlock, discards, err := s.Sync(ctx, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, discards)
|
require.Nil(t, discards)
|
||||||
if syncBlock == nil {
|
if syncBlock == nil {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package debugapi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -12,6 +13,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/hermez-node/synchronizer"
|
"github.com/hermeznetwork/hermez-node/synchronizer"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func handleNoRoute(c *gin.Context) {
|
func handleNoRoute(c *gin.Context) {
|
||||||
@@ -107,6 +109,8 @@ func (a *DebugAPI) Run(ctx context.Context) error {
|
|||||||
api.Use(cors.Default())
|
api.Use(cors.Default())
|
||||||
debugAPI := api.Group("/debug")
|
debugAPI := api.Group("/debug")
|
||||||
|
|
||||||
|
debugAPI.GET("/metrics", gin.WrapH(promhttp.Handler()))
|
||||||
|
|
||||||
debugAPI.GET("sdb/batchnum", a.handleCurrentBatch)
|
debugAPI.GET("sdb/batchnum", a.handleCurrentBatch)
|
||||||
debugAPI.GET("sdb/mtroot", a.handleMTRoot)
|
debugAPI.GET("sdb/mtroot", a.handleMTRoot)
|
||||||
// Accounts returned by these endpoints will always have BatchNum = 0,
|
// Accounts returned by these endpoints will always have BatchNum = 0,
|
||||||
@@ -118,16 +122,20 @@ func (a *DebugAPI) Run(ctx context.Context) error {
|
|||||||
debugAPI.GET("sync/stats", a.handleSyncStats)
|
debugAPI.GET("sync/stats", a.handleSyncStats)
|
||||||
|
|
||||||
debugAPIServer := &http.Server{
|
debugAPIServer := &http.Server{
|
||||||
Addr: a.addr,
|
|
||||||
Handler: api,
|
Handler: api,
|
||||||
// Use some hardcoded numberes that are suitable for testing
|
// Use some hardcoded numbers that are suitable for testing
|
||||||
ReadTimeout: 30 * time.Second, //nolint:gomnd
|
ReadTimeout: 30 * time.Second, //nolint:gomnd
|
||||||
WriteTimeout: 30 * time.Second, //nolint:gomnd
|
WriteTimeout: 30 * time.Second, //nolint:gomnd
|
||||||
MaxHeaderBytes: 1 << 20, //nolint:gomnd
|
MaxHeaderBytes: 1 << 20, //nolint:gomnd
|
||||||
}
|
}
|
||||||
go func() {
|
listener, err := net.Listen("tcp", a.addr)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
log.Infof("DebugAPI is ready at %v", a.addr)
|
log.Infof("DebugAPI is ready at %v", a.addr)
|
||||||
if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
go func() {
|
||||||
|
if err := debugAPIServer.Serve(listener); err != nil &&
|
||||||
|
tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||||
log.Fatalf("Listen: %s\n", err)
|
log.Fatalf("Listen: %s\n", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ func TestDebugAPI(t *testing.T) {
|
|||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
|
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|||||||
@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
|
|||||||
cpy := c.nextBlock().copy()
|
cpy := c.nextBlock().copy()
|
||||||
defer func() { c.revertIfErr(err, cpy) }()
|
defer func() { c.revertIfErr(err, cpy) }()
|
||||||
|
|
||||||
_, err = common.NewFloat16(amount)
|
_, err = common.NewFloat40(amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
_, err = common.NewFloat16(depositAmount)
|
_, err = common.NewFloat40(depositAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -1116,15 +1116,20 @@ func (c *Client) RollupConstants() (*common.RollupConstants, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
|
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
|
||||||
func (c *Client) RollupEventsByBlock(blockNum int64) (*eth.RollupEvents, *ethCommon.Hash, error) {
|
func (c *Client) RollupEventsByBlock(blockNum int64,
|
||||||
|
blockHash *ethCommon.Hash) (*eth.RollupEvents, error) {
|
||||||
c.rw.RLock()
|
c.rw.RLock()
|
||||||
defer c.rw.RUnlock()
|
defer c.rw.RUnlock()
|
||||||
|
|
||||||
block, ok := c.blocks[blockNum]
|
block, ok := c.blocks[blockNum]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||||
}
|
}
|
||||||
return &block.Rollup.Events, &block.Eth.Hash, nil
|
if blockHash != nil && *blockHash != block.Eth.Hash {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
|
||||||
|
blockHash, block.Eth.Hash))
|
||||||
|
}
|
||||||
|
return &block.Rollup.Events, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RollupEventInit returns the initialize event with its corresponding block number
|
// RollupEventInit returns the initialize event with its corresponding block number
|
||||||
@@ -1573,15 +1578,20 @@ func (c *Client) AuctionConstants() (*common.AuctionConstants, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AuctionEventsByBlock returns the events in a block that happened in the Auction Smart Contract
|
// AuctionEventsByBlock returns the events in a block that happened in the Auction Smart Contract
|
||||||
func (c *Client) AuctionEventsByBlock(blockNum int64) (*eth.AuctionEvents, *ethCommon.Hash, error) {
|
func (c *Client) AuctionEventsByBlock(blockNum int64,
|
||||||
|
blockHash *ethCommon.Hash) (*eth.AuctionEvents, error) {
|
||||||
c.rw.RLock()
|
c.rw.RLock()
|
||||||
defer c.rw.RUnlock()
|
defer c.rw.RUnlock()
|
||||||
|
|
||||||
block, ok := c.blocks[blockNum]
|
block, ok := c.blocks[blockNum]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||||
}
|
}
|
||||||
return &block.Auction.Events, &block.Eth.Hash, nil
|
if blockHash != nil && *blockHash != block.Eth.Hash {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
|
||||||
|
blockHash, block.Eth.Hash))
|
||||||
|
}
|
||||||
|
return &block.Auction.Events, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AuctionEventInit returns the initialize event with its corresponding block number
|
// AuctionEventInit returns the initialize event with its corresponding block number
|
||||||
@@ -1789,15 +1799,20 @@ func (c *Client) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WDelayerEventsByBlock returns the events in a block that happened in the WDelayer Contract
|
// WDelayerEventsByBlock returns the events in a block that happened in the WDelayer Contract
|
||||||
func (c *Client) WDelayerEventsByBlock(blockNum int64) (*eth.WDelayerEvents, *ethCommon.Hash, error) {
|
func (c *Client) WDelayerEventsByBlock(blockNum int64,
|
||||||
|
blockHash *ethCommon.Hash) (*eth.WDelayerEvents, error) {
|
||||||
c.rw.RLock()
|
c.rw.RLock()
|
||||||
defer c.rw.RUnlock()
|
defer c.rw.RUnlock()
|
||||||
|
|
||||||
block, ok := c.blocks[blockNum]
|
block, ok := c.blocks[blockNum]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||||
}
|
}
|
||||||
return &block.WDelayer.Events, &block.Eth.Hash, nil
|
if blockHash != nil && *blockHash != block.Eth.Hash {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
|
||||||
|
blockHash, block.Eth.Hash))
|
||||||
|
}
|
||||||
|
return &block.WDelayer.Events, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WDelayerConstants returns the Constants of the WDelayer Contract
|
// WDelayerConstants returns the Constants of the WDelayer Contract
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ func TestClientAuction(t *testing.T) {
|
|||||||
blockNum, err := c.EthLastBlock()
|
blockNum, err := c.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
auctionEvents, _, err := c.AuctionEventsByBlock(blockNum)
|
auctionEvents, err := c.AuctionEventsByBlock(blockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, 2, len(auctionEvents.NewBid))
|
assert.Equal(t, 2, len(auctionEvents.NewBid))
|
||||||
}
|
}
|
||||||
@@ -171,7 +171,7 @@ func TestClientRollup(t *testing.T) {
|
|||||||
|
|
||||||
blockNum, err := c.EthLastBlock()
|
blockNum, err := c.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
rollupEvents, _, err := c.RollupEventsByBlock(blockNum)
|
rollupEvents, err := c.RollupEventsByBlock(blockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, N, len(rollupEvents.L1UserTx))
|
assert.Equal(t, N, len(rollupEvents.L1UserTx))
|
||||||
assert.Equal(t, 1, len(rollupEvents.AddToken))
|
assert.Equal(t, 1, len(rollupEvents.AddToken))
|
||||||
@@ -192,7 +192,7 @@ func TestClientRollup(t *testing.T) {
|
|||||||
|
|
||||||
blockNumA, err := c.EthLastBlock()
|
blockNumA, err := c.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
rollupEvents, hashA, err := c.RollupEventsByBlock(blockNumA)
|
rollupEvents, err = c.RollupEventsByBlock(blockNumA, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
|
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
|
||||||
assert.Equal(t, 0, len(rollupEvents.AddToken))
|
assert.Equal(t, 0, len(rollupEvents.AddToken))
|
||||||
@@ -205,14 +205,14 @@ func TestClientRollup(t *testing.T) {
|
|||||||
|
|
||||||
blockNumB, err := c.EthLastBlock()
|
blockNumB, err := c.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
rollupEvents, hashB, err := c.RollupEventsByBlock(blockNumA)
|
rollupEventsB, err := c.RollupEventsByBlock(blockNumA, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
|
assert.Equal(t, 0, len(rollupEventsB.L1UserTx))
|
||||||
assert.Equal(t, 0, len(rollupEvents.AddToken))
|
assert.Equal(t, 0, len(rollupEventsB.AddToken))
|
||||||
assert.Equal(t, 0, len(rollupEvents.ForgeBatch))
|
assert.Equal(t, 0, len(rollupEventsB.ForgeBatch))
|
||||||
|
|
||||||
assert.Equal(t, blockNumA, blockNumB)
|
assert.Equal(t, blockNumA, blockNumB)
|
||||||
assert.NotEqual(t, hashA, hashB)
|
assert.NotEqual(t, rollupEvents, rollupEventsB)
|
||||||
|
|
||||||
// Forge again
|
// Forge again
|
||||||
rollupForgeBatchArgs0 := ð.RollupForgeBatchArgs{
|
rollupForgeBatchArgs0 := ð.RollupForgeBatchArgs{
|
||||||
@@ -232,7 +232,7 @@ func TestClientRollup(t *testing.T) {
|
|||||||
|
|
||||||
blockNum, err = c.EthLastBlock()
|
blockNum, err = c.EthLastBlock()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
rollupEvents, _, err = c.RollupEventsByBlock(blockNum)
|
rollupEvents, err = c.RollupEventsByBlock(blockNum, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
rollupForgeBatchArgs1, sender, err := c.RollupForgeBatchArgs(rollupEvents.ForgeBatch[0].EthTxHash,
|
rollupForgeBatchArgs1, sender, err := c.RollupForgeBatchArgs(rollupEvents.ForgeBatch[0].EthTxHash,
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -145,7 +146,7 @@ const longWaitDuration = 999 * time.Hour
|
|||||||
// const provingDuration = 2 * time.Second
|
// const provingDuration = 2 * time.Second
|
||||||
|
|
||||||
func (s *Mock) runProver(ctx context.Context) {
|
func (s *Mock) runProver(ctx context.Context) {
|
||||||
waitDuration := longWaitDuration
|
timer := time.NewTimer(longWaitDuration)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -153,21 +154,27 @@ func (s *Mock) runProver(ctx context.Context) {
|
|||||||
case msg := <-s.msgCh:
|
case msg := <-s.msgCh:
|
||||||
switch msg.value {
|
switch msg.value {
|
||||||
case "cancel":
|
case "cancel":
|
||||||
waitDuration = longWaitDuration
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
|
}
|
||||||
|
timer.Reset(longWaitDuration)
|
||||||
s.Lock()
|
s.Lock()
|
||||||
if !s.status.IsReady() {
|
if !s.status.IsReady() {
|
||||||
s.status = prover.StatusCodeAborted
|
s.status = prover.StatusCodeAborted
|
||||||
}
|
}
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
case "prove":
|
case "prove":
|
||||||
waitDuration = s.provingDuration
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
|
}
|
||||||
|
timer.Reset(s.provingDuration)
|
||||||
s.Lock()
|
s.Lock()
|
||||||
s.status = prover.StatusCodeBusy
|
s.status = prover.StatusCodeBusy
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
}
|
}
|
||||||
msg.ackCh <- true
|
msg.ackCh <- true
|
||||||
case <-time.After(waitDuration):
|
case <-timer.C:
|
||||||
waitDuration = longWaitDuration
|
timer.Reset(longWaitDuration)
|
||||||
s.Lock()
|
s.Lock()
|
||||||
if s.status != prover.StatusCodeBusy {
|
if s.status != prover.StatusCodeBusy {
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
@@ -202,16 +209,20 @@ func (s *Mock) Run(ctx context.Context) error {
|
|||||||
apiGroup.POST("/cancel", s.handleCancel)
|
apiGroup.POST("/cancel", s.handleCancel)
|
||||||
|
|
||||||
debugAPIServer := &http.Server{
|
debugAPIServer := &http.Server{
|
||||||
Addr: s.addr,
|
|
||||||
Handler: api,
|
Handler: api,
|
||||||
// Use some hardcoded numberes that are suitable for testing
|
// Use some hardcoded numberes that are suitable for testing
|
||||||
ReadTimeout: 30 * time.Second, //nolint:gomnd
|
ReadTimeout: 30 * time.Second, //nolint:gomnd
|
||||||
WriteTimeout: 30 * time.Second, //nolint:gomnd
|
WriteTimeout: 30 * time.Second, //nolint:gomnd
|
||||||
MaxHeaderBytes: 1 << 20, //nolint:gomnd
|
MaxHeaderBytes: 1 << 20, //nolint:gomnd
|
||||||
}
|
}
|
||||||
go func() {
|
listener, err := net.Listen("tcp", s.addr)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
log.Infof("prover.MockServer is ready at %v", s.addr)
|
log.Infof("prover.MockServer is ready at %v", s.addr)
|
||||||
if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
go func() {
|
||||||
|
if err := debugAPIServer.Serve(listener); err != nil &&
|
||||||
|
tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||||
log.Fatalf("Listen: %s\n", err)
|
log.Fatalf("Listen: %s\n", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -142,10 +142,12 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
|
|||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
users = GenerateJsUsers(t)
|
users = GenerateJsUsers(t)
|
||||||
|
|
||||||
|
depositAmount, err := common.Float40(10400).BigInt()
|
||||||
|
require.Nil(t, err)
|
||||||
l1UserTxs = []common.L1Tx{
|
l1UserTxs = []common.L1Tx{
|
||||||
{
|
{
|
||||||
FromIdx: 0,
|
FromIdx: 0,
|
||||||
DepositAmount: big.NewInt(16000000),
|
DepositAmount: depositAmount,
|
||||||
Amount: big.NewInt(0),
|
Amount: big.NewInt(0),
|
||||||
TokenID: 1,
|
TokenID: 1,
|
||||||
FromBJJ: users[0].BJJ.Public().Compress(),
|
FromBJJ: users[0].BJJ.Public().Compress(),
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
hdb := historydb.NewHistoryDB(db, nil)
|
hdb := historydb.NewHistoryDB(db, db, nil)
|
||||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||||
Num: 1,
|
Num: 1,
|
||||||
}))
|
}))
|
||||||
@@ -75,12 +75,13 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
|
|||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||||
|
|
||||||
dir, err := ioutil.TempDir("", "tmpSyncDB")
|
dir, err := ioutil.TempDir("", "tmpSyncDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 0)
|
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
||||||
@@ -310,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
|||||||
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
|
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
|
||||||
h, err := zki.HashGlobalData()
|
h, err := zki.HashGlobalData()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String())
|
assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String())
|
||||||
sendProofAndCheckResp(t, zki)
|
sendProofAndCheckResp(t, zki)
|
||||||
|
|
||||||
// batch3
|
// batch3
|
||||||
@@ -333,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
|||||||
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
|
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
|
||||||
h, err = zki.HashGlobalData()
|
h, err = zki.HashGlobalData()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String())
|
assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String())
|
||||||
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
|
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
|
||||||
assert.Equal(t, "0", zki.EthAddr3[1].String())
|
assert.Equal(t, "0", zki.EthAddr3[1].String())
|
||||||
sendProofAndCheckResp(t, zki)
|
sendProofAndCheckResp(t, zki)
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
|
|||||||
os.Exit(exitVal)
|
os.Exit(exitVal)
|
||||||
}
|
}
|
||||||
|
|
||||||
const MaxTx = 376
|
const MaxTx = 352
|
||||||
const NLevels = 32
|
const NLevels = 32
|
||||||
const MaxL1Tx = 256
|
const MaxL1Tx = 256
|
||||||
const MaxFeeTx = 64
|
const MaxFeeTx = 64
|
||||||
@@ -50,7 +50,7 @@ func initStateDB(t *testing.T, typ statedb.TypeStateDB) *statedb.StateDB {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, typ, NLevels)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: typ, NLevels: NLevels})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return sdb
|
return sdb
|
||||||
}
|
}
|
||||||
@@ -61,6 +61,7 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("sending proof to %s", proofServerURL)
|
||||||
// Store zkinputs json for debugging purposes
|
// Store zkinputs json for debugging purposes
|
||||||
zkInputsJSON, err := json.Marshal(zki)
|
zkInputsJSON, err := json.Marshal(zki)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -27,15 +27,20 @@ type TxProcessor struct {
|
|||||||
// AccumulatedFees contains the accumulated fees for each token (Coord
|
// AccumulatedFees contains the accumulated fees for each token (Coord
|
||||||
// Idx) in the processed batch
|
// Idx) in the processed batch
|
||||||
AccumulatedFees map[common.Idx]*big.Int
|
AccumulatedFees map[common.Idx]*big.Int
|
||||||
|
// updatedAccounts stores the last version of the account when it has
|
||||||
|
// been created/updated by any of the processed transactions.
|
||||||
|
updatedAccounts map[common.Idx]*common.Account
|
||||||
config Config
|
config Config
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains the TxProcessor configuration parameters
|
// Config contains the TxProcessor configuration parameters
|
||||||
type Config struct {
|
type Config struct {
|
||||||
NLevels uint32
|
NLevels uint32
|
||||||
|
// MaxFeeTx is the maximum number of coordinator accounts that can receive fees
|
||||||
MaxFeeTx uint32
|
MaxFeeTx uint32
|
||||||
MaxTx uint32
|
MaxTx uint32
|
||||||
MaxL1Tx uint32
|
MaxL1Tx uint32
|
||||||
|
// ChainID of the blockchain
|
||||||
ChainID uint16
|
ChainID uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,6 +58,9 @@ type ProcessTxOutput struct {
|
|||||||
CreatedAccounts []common.Account
|
CreatedAccounts []common.Account
|
||||||
CoordinatorIdxsMap map[common.TokenID]common.Idx
|
CoordinatorIdxsMap map[common.TokenID]common.Idx
|
||||||
CollectedFees map[common.TokenID]*big.Int
|
CollectedFees map[common.TokenID]*big.Int
|
||||||
|
// UpdatedAccounts returns the current state of each account
|
||||||
|
// created/updated by any of the processed transactions.
|
||||||
|
UpdatedAccounts map[common.Idx]*common.Account
|
||||||
}
|
}
|
||||||
|
|
||||||
func newErrorNotEnoughBalance(tx common.Tx) error {
|
func newErrorNotEnoughBalance(tx common.Tx) error {
|
||||||
@@ -125,9 +133,13 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx))
|
return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
|
tp.updatedAccounts = make(map[common.Idx]*common.Account)
|
||||||
|
}
|
||||||
|
|
||||||
exits := make([]processedExit, nTx)
|
exits := make([]processedExit, nTx)
|
||||||
|
|
||||||
if tp.s.Typ == statedb.TypeBatchBuilder {
|
if tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
tp.zki = common.NewZKInputs(tp.config.ChainID, tp.config.MaxTx, tp.config.MaxL1Tx,
|
tp.zki = common.NewZKInputs(tp.config.ChainID, tp.config.MaxTx, tp.config.MaxL1Tx,
|
||||||
tp.config.MaxFeeTx, tp.config.NLevels, (tp.s.CurrentBatch() + 1).BigInt())
|
tp.config.MaxFeeTx, tp.config.NLevels, (tp.s.CurrentBatch() + 1).BigInt())
|
||||||
tp.zki.OldLastIdx = tp.s.CurrentIdx().BigInt()
|
tp.zki.OldLastIdx = tp.s.CurrentIdx().BigInt()
|
||||||
@@ -137,7 +149,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
|
|
||||||
// TBD if ExitTree is only in memory or stored in disk, for the moment
|
// TBD if ExitTree is only in memory or stored in disk, for the moment
|
||||||
// is only needed in memory
|
// is only needed in memory
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
|
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -166,7 +178,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
if createdAccount != nil {
|
if createdAccount != nil {
|
||||||
createdAccounts = append(createdAccounts, *createdAccount)
|
createdAccounts = append(createdAccounts, *createdAccount)
|
||||||
l1usertxs[i].EffectiveFromIdx = createdAccount.Idx
|
l1usertxs[i].EffectiveFromIdx = createdAccount.Idx
|
||||||
@@ -195,8 +207,8 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
|
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
if exitIdx != nil && exitTree != nil {
|
if exitIdx != nil && exitTree != nil && exitAccount != nil {
|
||||||
exits[tp.i] = processedExit{
|
exits[tp.i] = processedExit{
|
||||||
exit: true,
|
exit: true,
|
||||||
newExit: newExit,
|
newExit: newExit,
|
||||||
@@ -217,7 +229,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
if exitIdx != nil {
|
if exitIdx != nil {
|
||||||
log.Error("Unexpected Exit in L1CoordinatorTx")
|
log.Error("Unexpected Exit in L1CoordinatorTx")
|
||||||
}
|
}
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
if createdAccount != nil {
|
if createdAccount != nil {
|
||||||
createdAccounts = append(createdAccounts, *createdAccount)
|
createdAccounts = append(createdAccounts, *createdAccount)
|
||||||
l1coordinatortxs[i].EffectiveFromIdx = createdAccount.Idx
|
l1coordinatortxs[i].EffectiveFromIdx = createdAccount.Idx
|
||||||
@@ -276,7 +288,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
// collectedFees will contain the amount of fee collected for each
|
// collectedFees will contain the amount of fee collected for each
|
||||||
// TokenID
|
// TokenID
|
||||||
var collectedFees map[common.TokenID]*big.Int
|
var collectedFees map[common.TokenID]*big.Int
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
collectedFees = make(map[common.TokenID]*big.Int)
|
collectedFees = make(map[common.TokenID]*big.Int)
|
||||||
for tokenID := range coordIdxsMap {
|
for tokenID := range coordIdxsMap {
|
||||||
collectedFees[tokenID] = big.NewInt(0)
|
collectedFees[tokenID] = big.NewInt(0)
|
||||||
@@ -317,7 +329,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
if exitIdx != nil && exitTree != nil {
|
if exitIdx != nil && exitTree != nil {
|
||||||
exits[tp.i] = processedExit{
|
exits[tp.i] = processedExit{
|
||||||
exit: true,
|
exit: true,
|
||||||
@@ -380,7 +392,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr)
|
tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr)
|
||||||
}
|
}
|
||||||
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
|
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
|
||||||
pFee, err := tp.s.UpdateAccount(idx, accCoord)
|
pFee, err := tp.updateAccount(idx, accCoord)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -401,10 +413,11 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tp.s.Typ == statedb.TypeTxSelector {
|
if tp.s.Type() == statedb.TypeTxSelector {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
// once all txs processed (exitTree root frozen), for each Exit,
|
// once all txs processed (exitTree root frozen), for each Exit,
|
||||||
// generate common.ExitInfo data
|
// generate common.ExitInfo data
|
||||||
var exitInfos []common.ExitInfo
|
var exitInfos []common.ExitInfo
|
||||||
@@ -436,8 +449,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
// retun exitInfos, createdAccounts and collectedFees, so Synchronizer will
|
||||||
// return exitInfos, createdAccounts and collectedFees, so Synchronizer will
|
|
||||||
// be able to store it into HistoryDB for the concrete BatchNum
|
// be able to store it into HistoryDB for the concrete BatchNum
|
||||||
return &ProcessTxOutput{
|
return &ProcessTxOutput{
|
||||||
ZKInputs: nil,
|
ZKInputs: nil,
|
||||||
@@ -445,6 +457,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
|||||||
CreatedAccounts: createdAccounts,
|
CreatedAccounts: createdAccounts,
|
||||||
CoordinatorIdxsMap: coordIdxsMap,
|
CoordinatorIdxsMap: coordIdxsMap,
|
||||||
CollectedFees: collectedFees,
|
CollectedFees: collectedFees,
|
||||||
|
UpdatedAccounts: tp.updatedAccounts,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -501,11 +514,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
|||||||
tp.zki.OnChain[tp.i] = big.NewInt(1)
|
tp.zki.OnChain[tp.i] = big.NewInt(1)
|
||||||
|
|
||||||
// L1Txs
|
// L1Txs
|
||||||
depositAmountF16, err := common.NewFloat16(tx.DepositAmount)
|
depositAmountF40, err := common.NewFloat40(tx.DepositAmount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, nil, tracerr.Wrap(err)
|
return nil, nil, false, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16))
|
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40))
|
||||||
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
||||||
if tx.FromBJJ != common.EmptyBJJComp {
|
if tx.FromBJJ != common.EmptyBJJComp {
|
||||||
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
|
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
|
||||||
@@ -515,6 +528,20 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
|||||||
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
|
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
|
||||||
tp.zki.ISOnChain[tp.i] = big.NewInt(1)
|
tp.zki.ISOnChain[tp.i] = big.NewInt(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tx.Type == common.TxTypeForceTransfer ||
|
||||||
|
tx.Type == common.TxTypeDepositTransfer ||
|
||||||
|
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
|
||||||
|
tx.Type == common.TxTypeForceExit {
|
||||||
|
// in the cases where at L1Tx there is usage of the
|
||||||
|
// Amount parameter, add it at the ZKInputs.AmountF
|
||||||
|
// slot
|
||||||
|
amountF40, err := common.NewFloat40(tx.Amount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, false, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch tx.Type {
|
switch tx.Type {
|
||||||
@@ -578,7 +605,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
|||||||
|
|
||||||
// execute exit flow
|
// execute exit flow
|
||||||
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
|
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
|
||||||
exitAccount, newExit, err := tp.applyExit(nil, nil, exitTree, tx.Tx())
|
exitAccount, newExit, err := tp.applyExit(nil, nil, exitTree, tx.Tx(), tx.Amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return nil, nil, false, nil, tracerr.Wrap(err)
|
return nil, nil, false, nil, tracerr.Wrap(err)
|
||||||
@@ -588,7 +615,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
|||||||
}
|
}
|
||||||
|
|
||||||
var createdAccount *common.Account
|
var createdAccount *common.Account
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer &&
|
if tp.s.Type() == statedb.TypeSynchronizer &&
|
||||||
(tx.Type == common.TxTypeCreateAccountDeposit ||
|
(tx.Type == common.TxTypeCreateAccountDeposit ||
|
||||||
tx.Type == common.TxTypeCreateAccountDepositTransfer) {
|
tx.Type == common.TxTypeCreateAccountDepositTransfer) {
|
||||||
var err error
|
var err error
|
||||||
@@ -612,8 +639,8 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
var err error
|
var err error
|
||||||
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
||||||
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
// this should never be reached
|
// thisTypeould never be reached
|
||||||
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
||||||
return nil, nil, false, tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
|
return nil, nil, false, tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
|
||||||
}
|
}
|
||||||
@@ -657,6 +684,11 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
|
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
|
||||||
|
|
||||||
tp.zki.OnChain[tp.i] = big.NewInt(0)
|
tp.zki.OnChain[tp.i] = big.NewInt(0)
|
||||||
|
amountF40, err := common.NewFloat40(tx.Amount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, false, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
|
||||||
tp.zki.NewAccount[tp.i] = big.NewInt(0)
|
tp.zki.NewAccount[tp.i] = big.NewInt(0)
|
||||||
|
|
||||||
// L2Txs
|
// L2Txs
|
||||||
@@ -676,8 +708,8 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if StateDB type==TypeSynchronizer, will need to add Nonce
|
// if StateDB type==TypeSynchronizer, will need to add Nonce
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
// as type==TypeSynchronizer, always tx.ToIdx!=0
|
// as tType==TypeSynchronizer, always tx.ToIdx!=0
|
||||||
acc, err := tp.s.GetAccount(tx.FromIdx)
|
acc, err := tp.s.GetAccount(tx.FromIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorw("GetAccount", "fromIdx", tx.FromIdx, "err", err)
|
log.Errorw("GetAccount", "fromIdx", tx.FromIdx, "err", err)
|
||||||
@@ -698,7 +730,7 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
}
|
}
|
||||||
case common.TxTypeExit:
|
case common.TxTypeExit:
|
||||||
// execute exit flow
|
// execute exit flow
|
||||||
exitAccount, newExit, err := tp.applyExit(coordIdxsMap, collectedFees, exitTree, tx.Tx())
|
exitAccount, newExit, err := tp.applyExit(coordIdxsMap, collectedFees, exitTree, tx.Tx(), tx.Amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return nil, nil, false, tracerr.Wrap(err)
|
return nil, nil, false, tracerr.Wrap(err)
|
||||||
@@ -720,7 +752,7 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
|
|||||||
EthAddr: tx.FromEthAddr,
|
EthAddr: tx.FromEthAddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), account)
|
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), account)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -755,6 +787,28 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
|
|||||||
return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1)
|
return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createAccount is a wrapper over the StateDB.CreateAccount method that also
|
||||||
|
// stores the created account in the updatedAccounts map in case the StateDB is
|
||||||
|
// of TypeSynchronizer
|
||||||
|
func (tp *TxProcessor) createAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||||
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
|
account.Idx = idx
|
||||||
|
tp.updatedAccounts[idx] = account
|
||||||
|
}
|
||||||
|
return tp.s.CreateAccount(idx, account)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateAccount is a wrapper over the StateDB.UpdateAccount method that also
|
||||||
|
// stores the updated account in the updatedAccounts map in case the StateDB is
|
||||||
|
// of TypeSynchronizer
|
||||||
|
func (tp *TxProcessor) updateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||||
|
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||||
|
account.Idx = idx
|
||||||
|
tp.updatedAccounts[idx] = account
|
||||||
|
}
|
||||||
|
return tp.s.UpdateAccount(idx, account)
|
||||||
|
}
|
||||||
|
|
||||||
// applyDeposit updates the balance in the account of the depositer, if
|
// applyDeposit updates the balance in the account of the depositer, if
|
||||||
// andTransfer parameter is set to true, the method will also apply the
|
// andTransfer parameter is set to true, the method will also apply the
|
||||||
// Transfer of the L1Tx/DepositTransfer
|
// Transfer of the L1Tx/DepositTransfer
|
||||||
@@ -785,7 +839,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update sender account in localStateDB
|
// update sender account in localStateDB
|
||||||
p, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
|
p, err := tp.updateAccount(tx.FromIdx, accSender)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -822,7 +876,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
|||||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
||||||
|
|
||||||
// update receiver account in localStateDB
|
// update receiver account in localStateDB
|
||||||
p, err := tp.s.UpdateAccount(tx.ToIdx, accReceiver)
|
p, err := tp.updateAccount(tx.ToIdx, accReceiver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -889,8 +943,8 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
||||||
accumulated.Add(accumulated, fee)
|
accumulated.Add(accumulated, fee)
|
||||||
|
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer ||
|
if tp.s.Type() == statedb.TypeSynchronizer ||
|
||||||
tp.s.Typ == statedb.TypeBatchBuilder {
|
tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
collected := collectedFees[accCoord.TokenID]
|
collected := collectedFees[accCoord.TokenID]
|
||||||
collected.Add(collected, fee)
|
collected.Add(collected, fee)
|
||||||
}
|
}
|
||||||
@@ -905,7 +959,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update sender account in localStateDB
|
// update sender account in localStateDB
|
||||||
pSender, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
|
pSender, err := tp.updateAccount(tx.FromIdx, accSender)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
@@ -944,7 +998,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
|
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
|
||||||
|
|
||||||
// update receiver account in localStateDB
|
// update receiver account in localStateDB
|
||||||
pReceiver, err := tp.s.UpdateAccount(auxToIdx, accReceiver)
|
pReceiver, err := tp.updateAccount(auxToIdx, accReceiver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -987,7 +1041,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create Account of the Sender
|
// create Account of the Sender
|
||||||
p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
|
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -1035,7 +1089,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
|
|||||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
||||||
|
|
||||||
// update receiver account in localStateDB
|
// update receiver account in localStateDB
|
||||||
p, err = tp.s.UpdateAccount(tx.ToIdx, accReceiver)
|
p, err = tp.updateAccount(tx.ToIdx, accReceiver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -1050,7 +1104,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
|
|||||||
// new Leaf in the ExitTree.
|
// new Leaf in the ExitTree.
|
||||||
func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
||||||
collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree,
|
collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree,
|
||||||
tx common.Tx) (*common.Account, bool, error) {
|
tx common.Tx, originalAmount *big.Int) (*common.Account, bool, error) {
|
||||||
// 0. subtract tx.Amount from current Account in StateMT
|
// 0. subtract tx.Amount from current Account in StateMT
|
||||||
// add the tx.Amount into the Account (tx.FromIdx) in the ExitMT
|
// add the tx.Amount into the Account (tx.FromIdx) in the ExitMT
|
||||||
acc, err := tp.s.GetAccount(tx.FromIdx)
|
acc, err := tp.s.GetAccount(tx.FromIdx)
|
||||||
@@ -1094,8 +1148,8 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
||||||
accumulated.Add(accumulated, fee)
|
accumulated.Add(accumulated, fee)
|
||||||
|
|
||||||
if tp.s.Typ == statedb.TypeSynchronizer ||
|
if tp.s.Type() == statedb.TypeSynchronizer ||
|
||||||
tp.s.Typ == statedb.TypeBatchBuilder {
|
tp.s.Type() == statedb.TypeBatchBuilder {
|
||||||
collected := collectedFees[accCoord.TokenID]
|
collected := collectedFees[accCoord.TokenID]
|
||||||
collected.Add(collected, fee)
|
collected.Add(collected, fee)
|
||||||
}
|
}
|
||||||
@@ -1109,7 +1163,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := tp.s.UpdateAccount(tx.FromIdx, acc)
|
p, err := tp.updateAccount(tx.FromIdx, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, tracerr.Wrap(err)
|
return nil, false, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -1120,6 +1174,21 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
if exitTree == nil {
|
if exitTree == nil {
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Do not add the Exit when Amount=0, not EffectiveAmount=0. In
|
||||||
|
// txprocessor.applyExit function, the tx.Amount is in reality the
|
||||||
|
// EffectiveAmount, that's why is used here the originalAmount
|
||||||
|
// parameter, which contains the real value of the tx.Amount (not
|
||||||
|
// tx.EffectiveAmount). This is a particularity of the approach of the
|
||||||
|
// circuit, the idea will be in the future to update the circuit and
|
||||||
|
// when Amount>0 but EffectiveAmount=0, to not add the Exit in the
|
||||||
|
// Exits MerkleTree, but for the moment the Go code is adapted to the
|
||||||
|
// circuit.
|
||||||
|
if originalAmount.Cmp(big.NewInt(0)) == 0 { // Amount == 0
|
||||||
|
// if the Exit Amount==0, the Exit is not added to the ExitTree
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx)
|
exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx)
|
||||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||||
// 1a. if idx does not exist in exitTree:
|
// 1a. if idx does not exist in exitTree:
|
||||||
@@ -1128,6 +1197,8 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
exitAccount := &common.Account{
|
exitAccount := &common.Account{
|
||||||
TokenID: acc.TokenID,
|
TokenID: acc.TokenID,
|
||||||
Nonce: common.Nonce(0),
|
Nonce: common.Nonce(0),
|
||||||
|
// as is a common.Tx, the tx.Amount is already an
|
||||||
|
// EffectiveAmount
|
||||||
Balance: tx.Amount,
|
Balance: tx.Amount,
|
||||||
BJJ: acc.BJJ,
|
BJJ: acc.BJJ,
|
||||||
EthAddr: acc.EthAddr,
|
EthAddr: acc.EthAddr,
|
||||||
@@ -1141,7 +1212,9 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
tp.zki.Sign2[tp.i] = big.NewInt(1)
|
tp.zki.Sign2[tp.i] = big.NewInt(1)
|
||||||
}
|
}
|
||||||
tp.zki.Ay2[tp.i] = accBJJY
|
tp.zki.Ay2[tp.i] = accBJJY
|
||||||
tp.zki.Balance2[tp.i] = tx.Amount
|
// Balance2 contains the ExitLeaf Balance before the
|
||||||
|
// leaf update, which is 0
|
||||||
|
tp.zki.Balance2[tp.i] = big.NewInt(0)
|
||||||
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
|
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
|
||||||
// as Leaf didn't exist in the ExitTree, set NewExit[i]=1
|
// as Leaf didn't exist in the ExitTree, set NewExit[i]=1
|
||||||
tp.zki.NewExit[tp.i] = big.NewInt(1)
|
tp.zki.NewExit[tp.i] = big.NewInt(1)
|
||||||
@@ -1175,7 +1248,9 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
tp.zki.Sign2[tp.i] = big.NewInt(1)
|
tp.zki.Sign2[tp.i] = big.NewInt(1)
|
||||||
}
|
}
|
||||||
tp.zki.Ay2[tp.i] = accBJJY
|
tp.zki.Ay2[tp.i] = accBJJY
|
||||||
tp.zki.Balance2[tp.i] = tx.Amount
|
// Balance2 contains the ExitLeaf Balance before the leaf
|
||||||
|
// update
|
||||||
|
tp.zki.Balance2[tp.i] = exitAccount.Balance
|
||||||
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
|
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1193,6 +1268,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
|||||||
}
|
}
|
||||||
tp.zki.OldKey2[tp.i] = p.OldKey.BigInt()
|
tp.zki.OldKey2[tp.i] = p.OldKey.BigInt()
|
||||||
tp.zki.OldValue2[tp.i] = p.OldValue.BigInt()
|
tp.zki.OldValue2[tp.i] = p.OldValue.BigInt()
|
||||||
|
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
|
||||||
}
|
}
|
||||||
|
|
||||||
return exitAccount, false, nil
|
return exitAccount, false, nil
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
package txprocessor
|
package txprocessor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"encoding/hex"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
@@ -36,7 +35,8 @@ func TestComputeEffectiveAmounts(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
set := `
|
set := `
|
||||||
@@ -212,7 +212,8 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
@@ -358,7 +359,8 @@ func TestProcessTxsSynchronizer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
@@ -489,7 +491,8 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
@@ -580,7 +583,8 @@ func TestProcessTxsRootTestVectors(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -631,22 +635,22 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
|
|||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, nLevels)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeBatchBuilder, NLevels: nLevels})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
users := txsets.GenerateJsUsers(t)
|
users := txsets.GenerateJsUsers(t)
|
||||||
|
|
||||||
daMaxHex, err := hex.DecodeString("FFFF")
|
daMaxF40 := common.Float40(0xFFFFFFFFFF)
|
||||||
|
daMaxBI, err := daMaxF40.BigInt()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
|
assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String())
|
||||||
daMaxBI := daMaxF16.BigInt()
|
|
||||||
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
|
|
||||||
|
|
||||||
daMax1Hex, err := hex.DecodeString("FFFE")
|
daMax1F40 := common.Float40(0xFFFFFFFFFE)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
|
daMax1BI, err := daMax1F40.BigInt()
|
||||||
daMax1BI := daMax1F16.BigInt()
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
|
assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String())
|
||||||
|
|
||||||
l1Txs := []common.L1Tx{
|
l1Txs := []common.L1Tx{
|
||||||
{
|
{
|
||||||
@@ -700,7 +704,8 @@ func initTestMultipleCoordIdxForTokenID(t *testing.T) (*TxProcessor, *til.Contex
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainID := uint16(1)
|
chainID := uint16(1)
|
||||||
@@ -790,7 +795,8 @@ func TestMultipleCoordIdxForTokenID(t *testing.T) {
|
|||||||
checkBalanceByIdx(t, tp.s, 259, "0") // Coord0
|
checkBalanceByIdx(t, tp.s, 259, "0") // Coord0
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTwoExits(t *testing.T) {
|
func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOutput,
|
||||||
|
[]*ProcessTxOutput, []*ProcessTxOutput) {
|
||||||
// In the first part we generate a batch with two force exits for the
|
// In the first part we generate a batch with two force exits for the
|
||||||
// same account of 20 each. The txprocessor output should be a single
|
// same account of 20 each. The txprocessor output should be a single
|
||||||
// exitInfo with balance of 40.
|
// exitInfo with balance of 40.
|
||||||
@@ -798,7 +804,9 @@ func TestTwoExits(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
nLevels := 16
|
||||||
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: stateDBType, NLevels: nLevels})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainID := uint16(1)
|
chainID := uint16(1)
|
||||||
@@ -834,10 +842,10 @@ func TestTwoExits(t *testing.T) {
|
|||||||
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
|
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
|
||||||
|
|
||||||
config := Config{
|
config := Config{
|
||||||
NLevels: 32,
|
NLevels: uint32(nLevels),
|
||||||
MaxFeeTx: 64,
|
MaxTx: 3,
|
||||||
MaxTx: 512,
|
MaxL1Tx: 2,
|
||||||
MaxL1Tx: 16,
|
MaxFeeTx: 2,
|
||||||
ChainID: chainID,
|
ChainID: chainID,
|
||||||
}
|
}
|
||||||
tp := NewTxProcessor(sdb, config)
|
tp := NewTxProcessor(sdb, config)
|
||||||
@@ -850,8 +858,6 @@ func TestTwoExits(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
|
|
||||||
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
|
|
||||||
acc, err := sdb.GetAccount(256)
|
acc, err := sdb.GetAccount(256)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, big.NewInt(60), acc.Balance)
|
assert.Equal(t, big.NewInt(60), acc.Balance)
|
||||||
@@ -865,7 +871,8 @@ func TestTwoExits(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir2))
|
defer assert.NoError(t, os.RemoveAll(dir2))
|
||||||
|
|
||||||
sdb2, err := statedb.NewStateDB(dir2, 128, statedb.TypeSynchronizer, 32)
|
sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128,
|
||||||
|
Type: stateDBType, NLevels: nLevels})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
@@ -903,5 +910,261 @@ func TestTwoExits(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
|
// In the third part we start a fresh statedb and generate a batch with
|
||||||
|
// two force exit for the same account as before but where the 1st Exit
|
||||||
|
// is with all the amount, and the 2nd Exit is with more amount than
|
||||||
|
// the available balance. The txprocessor output should be a single
|
||||||
|
// exitInfo with balance of 40, and the exit merkle tree proof should
|
||||||
|
// be equal to the previous ones.
|
||||||
|
|
||||||
|
dir3, err := ioutil.TempDir("", "tmpdb")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer assert.NoError(t, os.RemoveAll(dir3))
|
||||||
|
|
||||||
|
sdb3, err := statedb.NewStateDB(statedb.Config{Path: dir3, Keep: 128,
|
||||||
|
Type: stateDBType, NLevels: nLevels})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
|
||||||
|
// Single exit with balance of both exits in previous set. The exit
|
||||||
|
// root should match.
|
||||||
|
set3 := `
|
||||||
|
Type: Blockchain
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) A: 100
|
||||||
|
|
||||||
|
> batchL1 // freeze L1User{1}
|
||||||
|
> batchL1 // forge L1User{1}
|
||||||
|
|
||||||
|
ForceExit(0) A: 40
|
||||||
|
ForceExit(0) A: 100
|
||||||
|
|
||||||
|
> batchL1 // freeze L1User{2}
|
||||||
|
> batchL1 // forge L1User{2}
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
blocks, err = tc.GenerateBlocks(set3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = tc.FillBlocksForgedL1UserTxs(blocks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tp = NewTxProcessor(sdb3, config)
|
||||||
|
ptOuts3 := []*ProcessTxOutput{}
|
||||||
|
for _, block := range blocks {
|
||||||
|
for _, batch := range block.Rollup.Batches {
|
||||||
|
ptOut, err := tp.ProcessTxs(nil, batch.L1UserTxs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
ptOuts3 = append(ptOuts3, ptOut)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptOuts, ptOuts2, ptOuts3
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTwoExitsSynchronizer(t *testing.T) {
|
||||||
|
ptOuts, ptOuts2, ptOuts3 := testTwoExits(t, statedb.TypeSynchronizer)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
|
||||||
|
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
|
||||||
|
|
||||||
|
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
|
||||||
|
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts3[3].ExitInfos[0].MerkleProof)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExitOf0Amount(t *testing.T) {
|
||||||
|
// Test to check that when doing an Exit with amount 0 the Exit Root
|
||||||
|
// does not change (as there is no new Exit Leaf created)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
chainID := uint16(1)
|
||||||
|
|
||||||
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) A: 100
|
||||||
|
CreateAccountDeposit(0) B: 100
|
||||||
|
|
||||||
|
> batchL1 // batch1: freeze L1User{2}
|
||||||
|
> batchL1 // batch2: forge L1User{2}
|
||||||
|
|
||||||
|
ForceExit(0) A: 10
|
||||||
|
ForceExit(0) B: 0
|
||||||
|
|
||||||
|
> batchL1 // batch3: freeze L1User{2}
|
||||||
|
> batchL1 // batch4: forge L1User{2}
|
||||||
|
|
||||||
|
ForceExit(0) A: 10
|
||||||
|
|
||||||
|
> batchL1 // batch5: freeze L1User{1}
|
||||||
|
> batchL1 // batch6: forge L1User{1}
|
||||||
|
|
||||||
|
ForceExit(0) A: 0
|
||||||
|
> batchL1 // batch7: freeze L1User{1}
|
||||||
|
> batchL1 // batch8: forge L1User{1}
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = tc.FillBlocksForgedL1UserTxs(blocks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
require.Equal(t, 2, len(blocks[0].Rollup.Batches[1].L1UserTxs))
|
||||||
|
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
|
||||||
|
require.Equal(t, big.NewInt(10), blocks[0].Rollup.Batches[3].L1UserTxs[0].Amount)
|
||||||
|
require.Equal(t, big.NewInt(0), blocks[0].Rollup.Batches[3].L1UserTxs[1].Amount)
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
NLevels: 32,
|
||||||
|
MaxFeeTx: 64,
|
||||||
|
MaxTx: 512,
|
||||||
|
MaxL1Tx: 16,
|
||||||
|
ChainID: chainID,
|
||||||
|
}
|
||||||
|
tp := NewTxProcessor(sdb, config)
|
||||||
|
|
||||||
|
// For this test are only processed the batches with transactions:
|
||||||
|
// - Batch2, equivalent to Batches[1]
|
||||||
|
// - Batch4, equivalent to Batches[3]
|
||||||
|
// - Batch6, equivalent to Batches[5]
|
||||||
|
// - Batch8, equivalent to Batches[7]
|
||||||
|
|
||||||
|
// process Batch2:
|
||||||
|
_, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[1].L1UserTxs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// process Batch4:
|
||||||
|
ptOut, err := tp.ProcessTxs(nil, blocks[0].Rollup.Batches[3].L1UserTxs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||||
|
exitRootBatch4 := ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String()
|
||||||
|
|
||||||
|
// process Batch6:
|
||||||
|
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[5].L1UserTxs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||||
|
// Expect that the ExitRoot for the Batch6 will be equal than for the
|
||||||
|
// Batch4, as the Batch4 & Batch6 have the same tx with Exit Amount=10,
|
||||||
|
// and Batch4 has a 2nd tx with Exit Amount=0.
|
||||||
|
assert.Equal(t, exitRootBatch4, ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||||
|
|
||||||
|
// For the Batch8, as there is only 1 exit with Amount=0, the ExitRoot
|
||||||
|
// should be 0.
|
||||||
|
// process Batch8:
|
||||||
|
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[7].L1UserTxs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdatedAccounts(t *testing.T) {
|
||||||
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
|
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
AddToken(1)
|
||||||
|
CreateAccountCoordinator(0) Coord // 256
|
||||||
|
CreateAccountCoordinator(1) Coord // 257
|
||||||
|
> batch // 1
|
||||||
|
CreateAccountDeposit(0) A: 50 // 258
|
||||||
|
CreateAccountDeposit(0) B: 60 // 259
|
||||||
|
CreateAccountDeposit(1) A: 70 // 260
|
||||||
|
CreateAccountDeposit(1) B: 80 // 261
|
||||||
|
> batchL1 // 2
|
||||||
|
> batchL1 // 3
|
||||||
|
Transfer(0) A-B: 5 (126)
|
||||||
|
> batch // 4
|
||||||
|
Exit(1) B: 5 (126)
|
||||||
|
> batch // 5
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
|
||||||
|
chainID := uint16(0)
|
||||||
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
require.NoError(t, err)
|
||||||
|
tilCfgExtra := til.ConfigExtra{
|
||||||
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
|
CoordUser: "Coord",
|
||||||
|
}
|
||||||
|
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
|
||||||
|
require.NoError(t, err)
|
||||||
|
tc.FillBlocksL1UserTxsBatchNum(blocks)
|
||||||
|
err = tc.FillBlocksForgedL1UserTxs(blocks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 5, len(blocks[0].Rollup.Batches))
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
NLevels: 32,
|
||||||
|
MaxFeeTx: 64,
|
||||||
|
MaxTx: 512,
|
||||||
|
MaxL1Tx: 16,
|
||||||
|
ChainID: chainID,
|
||||||
|
}
|
||||||
|
tp := NewTxProcessor(sdb, config)
|
||||||
|
|
||||||
|
sortedKeys := func(m map[common.Idx]*common.Account) []int {
|
||||||
|
keys := make([]int, 0)
|
||||||
|
for k := range m {
|
||||||
|
keys = append(keys, int(k))
|
||||||
|
}
|
||||||
|
sort.Ints(keys)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, batch := range blocks[0].Rollup.Batches {
|
||||||
|
l2Txs := common.L2TxsToPoolL2Txs(batch.L2Txs)
|
||||||
|
ptOut, err := tp.ProcessTxs(batch.Batch.FeeIdxsCoordinator, batch.L1UserTxs,
|
||||||
|
batch.L1CoordinatorTxs, l2Txs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
switch batch.Batch.BatchNum {
|
||||||
|
case 1:
|
||||||
|
assert.Equal(t, 2, len(ptOut.UpdatedAccounts))
|
||||||
|
assert.Equal(t, []int{256, 257}, sortedKeys(ptOut.UpdatedAccounts))
|
||||||
|
case 2:
|
||||||
|
assert.Equal(t, 0, len(ptOut.UpdatedAccounts))
|
||||||
|
assert.Equal(t, []int{}, sortedKeys(ptOut.UpdatedAccounts))
|
||||||
|
case 3:
|
||||||
|
assert.Equal(t, 4, len(ptOut.UpdatedAccounts))
|
||||||
|
assert.Equal(t, []int{258, 259, 260, 261}, sortedKeys(ptOut.UpdatedAccounts))
|
||||||
|
case 4:
|
||||||
|
assert.Equal(t, 2+1, len(ptOut.UpdatedAccounts))
|
||||||
|
assert.Equal(t, []int{256, 258, 259}, sortedKeys(ptOut.UpdatedAccounts))
|
||||||
|
case 5:
|
||||||
|
assert.Equal(t, 1+1, len(ptOut.UpdatedAccounts))
|
||||||
|
assert.Equal(t, []int{257, 261}, sortedKeys(ptOut.UpdatedAccounts))
|
||||||
|
}
|
||||||
|
for idx, updAcc := range ptOut.UpdatedAccounts {
|
||||||
|
acc, err := sdb.GetAccount(idx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// If acc.Balance is 0, set it to 0 with big.NewInt so
|
||||||
|
// that the comparison succeeds. Without this, the
|
||||||
|
// comparison will not succeed because acc.Balance is
|
||||||
|
// set from a slice, and thus the internal big.Int
|
||||||
|
// buffer is not nil (big.Int.abs)
|
||||||
|
if acc.Balance.BitLen() == 0 {
|
||||||
|
acc.Balance = big.NewInt(0)
|
||||||
|
}
|
||||||
|
assert.Equal(t, acc, updAcc)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
53
txselector/metrics.go
Normal file
53
txselector/metrics.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package txselector
|
||||||
|
|
||||||
|
import "github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
var (
|
||||||
|
metricGetL2TxSelection = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "txsel_get_l2_txselecton_total",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricGetL1L2TxSelection = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "txsel_get_l1_l2_txselecton_total",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "txsel_selected_l1_coordinator_txs",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricSelectedL1UserTxs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "txsel_selected_l1_user_txs",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricSelectedL2Txs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "txsel_selected_l2_txs",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricDiscardedL2Txs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "txsel_discarded_l2_txs",
|
||||||
|
Help: "",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
prometheus.MustRegister(metricGetL2TxSelection)
|
||||||
|
prometheus.MustRegister(metricGetL1L2TxSelection)
|
||||||
|
|
||||||
|
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
|
||||||
|
prometheus.MustRegister(metricSelectedL1UserTxs)
|
||||||
|
prometheus.MustRegister(metricSelectedL2Txs)
|
||||||
|
prometheus.MustRegister(metricDiscardedL2Txs)
|
||||||
|
}
|
||||||
@@ -3,13 +3,13 @@ package txselector
|
|||||||
// current: very simple version of TxSelector
|
// current: very simple version of TxSelector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/kvdb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
@@ -18,19 +18,6 @@ import (
|
|||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
)
|
)
|
||||||
|
|
||||||
// txs implements the interface Sort for an array of Tx
|
|
||||||
type txs []common.PoolL2Tx
|
|
||||||
|
|
||||||
func (t txs) Len() int {
|
|
||||||
return len(t)
|
|
||||||
}
|
|
||||||
func (t txs) Swap(i, j int) {
|
|
||||||
t[i], t[j] = t[j], t[i]
|
|
||||||
}
|
|
||||||
func (t txs) Less(i, j int) bool {
|
|
||||||
return t[i].AbsoluteFee > t[j].AbsoluteFee
|
|
||||||
}
|
|
||||||
|
|
||||||
// CoordAccount contains the data of the Coordinator account, that will be used
|
// CoordAccount contains the data of the Coordinator account, that will be used
|
||||||
// to create new transactions of CreateAccountDeposit type to add new TokenID
|
// to create new transactions of CreateAccountDeposit type to add new TokenID
|
||||||
// accounts for the Coordinator to receive the fees.
|
// accounts for the Coordinator to receive the fees.
|
||||||
@@ -62,8 +49,14 @@ type TxSelector struct {
|
|||||||
// NewTxSelector returns a *TxSelector
|
// NewTxSelector returns a *TxSelector
|
||||||
func NewTxSelector(coordAccount *CoordAccount, dbpath string,
|
func NewTxSelector(coordAccount *CoordAccount, dbpath string,
|
||||||
synchronizerStateDB *statedb.StateDB, l2 *l2db.L2DB) (*TxSelector, error) {
|
synchronizerStateDB *statedb.StateDB, l2 *l2db.L2DB) (*TxSelector, error) {
|
||||||
localAccountsDB, err := statedb.NewLocalStateDB(dbpath, 128,
|
localAccountsDB, err := statedb.NewLocalStateDB(
|
||||||
synchronizerStateDB, statedb.TypeTxSelector, 0) // without merkletree
|
statedb.Config{
|
||||||
|
Path: dbpath,
|
||||||
|
Keep: kvdb.DefaultKeep,
|
||||||
|
Type: statedb.TypeTxSelector,
|
||||||
|
NLevels: 0,
|
||||||
|
},
|
||||||
|
synchronizerStateDB) // without merkletree
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -82,12 +75,8 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
|
|||||||
|
|
||||||
// Reset tells the TxSelector to get it's internal AccountsDB
|
// Reset tells the TxSelector to get it's internal AccountsDB
|
||||||
// from the required `batchNum`
|
// from the required `batchNum`
|
||||||
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
|
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
err := txsel.localAccountsDB.Reset(batchNum, true)
|
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
|
||||||
if err != nil {
|
|
||||||
return tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
||||||
@@ -144,9 +133,11 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
|||||||
// included in the next batch.
|
// included in the next batch.
|
||||||
func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]common.Idx,
|
func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]common.Idx,
|
||||||
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, discardedL2Txs, err :=
|
metricGetL2TxSelection.Inc()
|
||||||
txsel.GetL1L2TxSelection(selectionConfig, []common.L1Tx{})
|
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
|
||||||
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err)
|
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{})
|
||||||
|
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
|
||||||
|
discardedL2Txs, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetL1L2TxSelection returns the selection of L1 + L2 txs.
|
// GetL1L2TxSelection returns the selection of L1 + L2 txs.
|
||||||
@@ -158,6 +149,16 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]c
|
|||||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||||
// included in the next batch.
|
// included in the next batch.
|
||||||
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||||
|
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||||
|
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
|
metricGetL1L2TxSelection.Inc()
|
||||||
|
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
||||||
|
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
||||||
|
discardedL2Txs, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
||||||
@@ -188,14 +189,16 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch
|
// discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch
|
||||||
var discardedL2Txs []common.PoolL2Tx
|
|
||||||
|
|
||||||
var l1CoordinatorTxs []common.L1Tx
|
var l1CoordinatorTxs []common.L1Tx
|
||||||
positionL1 := len(l1UserTxs)
|
positionL1 := len(l1UserTxs)
|
||||||
var accAuths [][]byte
|
var accAuths [][]byte
|
||||||
|
|
||||||
// sort l2TxsRaw (cropping at MaxTx at this point)
|
// sort l2TxsRaw (cropping at MaxTx at this point)
|
||||||
l2Txs0 := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
|
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
|
||||||
|
for i := range discardedL2Txs {
|
||||||
|
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee"
|
||||||
|
}
|
||||||
|
|
||||||
noncesMap := make(map[common.Idx]common.Nonce)
|
noncesMap := make(map[common.Idx]common.Nonce)
|
||||||
var l2Txs []common.PoolL2Tx
|
var l2Txs []common.PoolL2Tx
|
||||||
@@ -229,7 +232,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
|
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
|
||||||
// discard L2Tx, and update Info parameter of
|
// discard L2Tx, and update Info parameter of
|
||||||
// the tx, and add it to the discardedTxs array
|
// the tx, and add it to the discardedTxs array
|
||||||
l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator"
|
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
|
||||||
|
"L1CoordinatorTx and there is not enough space for L1Coordinator"
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -254,7 +258,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
// not valid Amount with current Balance. Discard L2Tx,
|
// not valid Amount with current Balance. Discard L2Tx,
|
||||||
// and update Info parameter of the tx, and add it to
|
// and update Info parameter of the tx, and add it to
|
||||||
// the discardedTxs array
|
// the discardedTxs array
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String())
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
|
||||||
|
"Current sender account Balance: %s, Amount+Fee: %s",
|
||||||
|
balance.String(), feeAndAmount.String())
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -266,7 +272,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
// not valid Nonce at tx. Discard L2Tx, and update Info
|
// not valid Nonce at tx. Discard L2Tx, and update Info
|
||||||
// parameter of the tx, and add it to the discardedTxs
|
// parameter of the tx, and add it to the discardedTxs
|
||||||
// array
|
// array
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
|
||||||
|
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -284,18 +291,31 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
|
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
|
||||||
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
|
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug(err)
|
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
|
||||||
// Discard L2Tx, and update Info parameter of
|
// Discard L2Tx, and update Info parameter of
|
||||||
// the tx, and add it to the discardedTxs array
|
// the tx, and add it to the discardedTxs array
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error())
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s",
|
||||||
|
err.Error())
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if accAuth != nil && l1CoordinatorTx != nil {
|
if l1CoordinatorTx != nil {
|
||||||
|
// If ToEthAddr == 0xff.. this means that we
|
||||||
|
// are handling a TransferToBJJ, which doesn't
|
||||||
|
// require an authorization because it doesn't
|
||||||
|
// contain a valid ethereum address.
|
||||||
|
// Otherwise only create the account if we have
|
||||||
|
// the corresponding authorization
|
||||||
|
if validL2Tx.ToEthAddr == common.FFAddr {
|
||||||
|
accAuths = append(accAuths, common.EmptyEthSignature)
|
||||||
|
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
||||||
|
positionL1++
|
||||||
|
} else if accAuth != nil {
|
||||||
accAuths = append(accAuths, accAuth.Signature)
|
accAuths = append(accAuths, accAuth.Signature)
|
||||||
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
|
||||||
positionL1++
|
positionL1++
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if validL2Tx != nil {
|
if validL2Tx != nil {
|
||||||
validTxs = append(validTxs, *validL2Tx)
|
validTxs = append(validTxs, *validL2Tx)
|
||||||
}
|
}
|
||||||
@@ -307,8 +327,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
"ToIdx", l2Txs[i].ToIdx)
|
"ToIdx", l2Txs[i].ToIdx)
|
||||||
// Discard L2Tx, and update Info parameter of
|
// Discard L2Tx, and update Info parameter of
|
||||||
// the tx, and add it to the discardedTxs array
|
// the tx, and add it to the discardedTxs array
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d",
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+
|
||||||
l2Txs[i].ToIdx)
|
"ToIdx: %d", l2Txs[i].ToIdx)
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -320,7 +340,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
// Discard L2Tx, and update Info
|
// Discard L2Tx, and update Info
|
||||||
// parameter of the tx, and add it to
|
// parameter of the tx, and add it to
|
||||||
// the discardedTxs array
|
// the discardedTxs array
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+
|
||||||
|
"does not correspond to the Account.EthAddr. "+
|
||||||
|
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
|
||||||
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
|
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
@@ -334,7 +356,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
// Discard L2Tx, and update Info
|
// Discard L2Tx, and update Info
|
||||||
// parameter of the tx, and add it to
|
// parameter of the tx, and add it to
|
||||||
// the discardedTxs array
|
// the discardedTxs array
|
||||||
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
|
l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+
|
||||||
|
"does not correspond to the Account.BJJ. "+
|
||||||
|
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
|
||||||
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
|
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
|
||||||
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
|
||||||
continue
|
continue
|
||||||
@@ -408,7 +432,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
log.Error(err)
|
log.Error(err)
|
||||||
// Discard L2Tx, and update Info parameter of the tx,
|
// Discard L2Tx, and update Info parameter of the tx,
|
||||||
// and add it to the discardedTxs array
|
// and add it to the discardedTxs array
|
||||||
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error())
|
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
|
||||||
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
|
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -440,6 +464,11 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
|||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
||||||
|
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||||
|
metricSelectedL2Txs.Set(float64(len(finalL2Txs)))
|
||||||
|
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||||
|
|
||||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
|
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -464,8 +493,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
|||||||
|
|
||||||
var l1CoordinatorTx *common.L1Tx
|
var l1CoordinatorTx *common.L1Tx
|
||||||
var accAuth *common.AccountCreationAuth
|
var accAuth *common.AccountCreationAuth
|
||||||
if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) &&
|
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
|
||||||
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
|
|
||||||
// case: ToEthAddr != 0x00 neither 0xff
|
// case: ToEthAddr != 0x00 neither 0xff
|
||||||
if l2Tx.ToBJJ != common.EmptyBJJComp {
|
if l2Tx.ToBJJ != common.EmptyBJJComp {
|
||||||
// case: ToBJJ!=0:
|
// case: ToBJJ!=0:
|
||||||
@@ -521,8 +549,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
|||||||
DepositAmount: big.NewInt(0),
|
DepositAmount: big.NewInt(0),
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
}
|
}
|
||||||
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) &&
|
} else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp {
|
||||||
l2Tx.ToBJJ != common.EmptyBJJComp {
|
|
||||||
// if idx exist for EthAddr&BJJ use it
|
// if idx exist for EthAddr&BJJ use it
|
||||||
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
|
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
|
||||||
l2Tx.TokenID)
|
l2Tx.TokenID)
|
||||||
@@ -548,7 +575,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
|||||||
}
|
}
|
||||||
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
|
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
|
||||||
// L2Tx discarded
|
// L2Tx discarded
|
||||||
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx"))
|
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
|
||||||
|
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &l2Tx, l1CoordinatorTx, accAuth, nil
|
return &l2Tx, l1CoordinatorTx, accAuth, nil
|
||||||
@@ -557,7 +585,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
|||||||
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
||||||
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
||||||
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
||||||
if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) &&
|
if l1CoordinatorTxs[i].FromEthAddr == addr &&
|
||||||
l1CoordinatorTxs[i].TokenID == tokenID &&
|
l1CoordinatorTxs[i].TokenID == tokenID &&
|
||||||
l1CoordinatorTxs[i].FromBJJ == bjj {
|
l1CoordinatorTxs[i].FromBJJ == bjj {
|
||||||
return true
|
return true
|
||||||
@@ -567,21 +595,33 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
|
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
|
||||||
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx {
|
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx,
|
||||||
sort.Sort(txs(l2Txs))
|
[]common.PoolL2Tx) {
|
||||||
if len(l2Txs) < int(max) {
|
// First sort by nonce so that txs from the same account are sorted so
|
||||||
return l2Txs
|
// that they could be applied in succession.
|
||||||
}
|
sort.Slice(l2Txs, func(i, j int) bool {
|
||||||
|
return l2Txs[i].Nonce < l2Txs[j].Nonce
|
||||||
|
})
|
||||||
|
// Sort by absolute fee with SliceStable, so that txs with same
|
||||||
|
// AbsoluteFee are not rearranged and nonce order is kept in such case
|
||||||
|
sort.SliceStable(l2Txs, func(i, j int) bool {
|
||||||
|
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
|
||||||
|
})
|
||||||
|
|
||||||
|
discardedL2Txs := []common.PoolL2Tx{}
|
||||||
|
if len(l2Txs) > int(max) {
|
||||||
|
discardedL2Txs = l2Txs[max:]
|
||||||
l2Txs = l2Txs[:max]
|
l2Txs = l2Txs[:max]
|
||||||
|
}
|
||||||
|
|
||||||
// sort l2Txs by Nonce. This can be done in many different ways, what
|
// sort l2Txs by Nonce. This can be done in many different ways, what
|
||||||
// is needed is to output the l2Txs where the Nonce of l2Txs for each
|
// is needed is to output the l2Txs where the Nonce of l2Txs for each
|
||||||
// Account is sorted, but the l2Txs can not be grouped by sender Account
|
// Account is sorted, but the l2Txs can not be grouped by sender Account
|
||||||
// neither by Fee. This is because later on the Nonces will need to be
|
// neither by Fee. This is because later on the Nonces will need to be
|
||||||
// sequential for the zkproof generation.
|
// sequential for the zkproof generation.
|
||||||
sort.SliceStable(l2Txs, func(i, j int) bool {
|
sort.Slice(l2Txs, func(i, j int) bool {
|
||||||
return l2Txs[i].Nonce < l2Txs[j].Nonce
|
return l2Txs[i].Nonce < l2Txs[j].Nonce
|
||||||
})
|
})
|
||||||
|
|
||||||
return l2Txs
|
return l2Txs, discardedL2Txs
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,12 +29,13 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
|||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||||
|
|
||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
|
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||||
|
Type: statedb.TypeTxSelector, NLevels: 0})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
||||||
@@ -47,7 +48,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
|||||||
BJJ: coordUser.BJJ.Public().Compress(),
|
BJJ: coordUser.BJJ.Public().Compress(),
|
||||||
AccountCreationAuth: nil,
|
AccountCreationAuth: nil,
|
||||||
}
|
}
|
||||||
fmt.Printf("%v", coordAccount)
|
// fmt.Printf("%v\n", coordAccount)
|
||||||
auth := common.AccountCreationAuth{
|
auth := common.AccountCreationAuth{
|
||||||
EthAddr: coordUser.Addr,
|
EthAddr: coordUser.Addr,
|
||||||
BJJ: coordUser.BJJ.Public().Compress(),
|
BJJ: coordUser.BJJ.Public().Compress(),
|
||||||
@@ -105,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
hdb := historydb.NewHistoryDB(db, nil)
|
hdb := historydb.NewHistoryDB(db, db, nil)
|
||||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||||
Num: 1,
|
Num: 1,
|
||||||
}))
|
}))
|
||||||
@@ -423,9 +424,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
|||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr
|
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr
|
||||||
expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit
|
expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit
|
||||||
expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr
|
expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr
|
||||||
|
|
||||||
// batch2
|
// batch2
|
||||||
// prepare the PoolL2Txs
|
// prepare the PoolL2Txs
|
||||||
@@ -496,3 +497,215 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
|||||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTransferToBjj(t *testing.T) {
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
AddToken(1)
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) Coord: 0
|
||||||
|
CreateAccountDeposit(0) A: 1000
|
||||||
|
CreateAccountDeposit(0) B: 1000
|
||||||
|
CreateAccountDeposit(1) B: 1000
|
||||||
|
|
||||||
|
> batchL1 // freeze L1User{1}
|
||||||
|
> batchL1 // forge L1User{1}
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
|
||||||
|
chainID := uint16(0)
|
||||||
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||||
|
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||||
|
|
||||||
|
// restart nonces of TilContext, as will be set by generating directly
|
||||||
|
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||||
|
tc.RestartNonces()
|
||||||
|
|
||||||
|
addTokens(t, tc, txsel.l2db.DB())
|
||||||
|
|
||||||
|
tpc := txprocessor.Config{
|
||||||
|
NLevels: 16,
|
||||||
|
MaxFeeTx: 10,
|
||||||
|
MaxTx: 20,
|
||||||
|
MaxL1Tx: 10,
|
||||||
|
ChainID: chainID,
|
||||||
|
}
|
||||||
|
selectionConfig := &SelectionConfig{
|
||||||
|
MaxL1UserTxs: 5,
|
||||||
|
TxProcessorConfig: tpc,
|
||||||
|
}
|
||||||
|
// batch1 to freeze L1UserTxs that will create some accounts with
|
||||||
|
// positive balance
|
||||||
|
l1UserTxs := []common.L1Tx{}
|
||||||
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
|
||||||
|
// and the coordinator will create it via L1CoordTx.
|
||||||
|
|
||||||
|
batchPoolL2 := `
|
||||||
|
Type: PoolL2
|
||||||
|
PoolTransferToBJJ(0) A-B: 50 (126)
|
||||||
|
`
|
||||||
|
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// add the PoolL2Txs to the l2DB
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
|
||||||
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 4, len(oL1UserTxs))
|
||||||
|
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
|
||||||
|
require.Equal(t, 1, len(oL1CoordTxs))
|
||||||
|
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
|
||||||
|
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
|
||||||
|
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
|
||||||
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Now the BJJ-only account for B is already created, so the transfer
|
||||||
|
// happens without an L1CoordTx that creates the user account.
|
||||||
|
|
||||||
|
batchPoolL2 = `
|
||||||
|
Type: PoolL2
|
||||||
|
PoolTransferToBJJ(0) A-B: 50 (126)
|
||||||
|
`
|
||||||
|
|
||||||
|
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
|
||||||
|
l1UserTxs = []common.L1Tx{}
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
|
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
|
||||||
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
|
||||||
|
// and the coordinator will create it via L1CoordTx. Since it's a
|
||||||
|
// transfer of a token for which the coordinator doesn't have a fee
|
||||||
|
// account, another L1CoordTx will be created for the coordinator to
|
||||||
|
// receive the fees.
|
||||||
|
|
||||||
|
batchPoolL2 = `
|
||||||
|
Type: PoolL2
|
||||||
|
PoolTransferToBJJ(1) B-A: 50 (126)
|
||||||
|
`
|
||||||
|
|
||||||
|
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
|
||||||
|
l1UserTxs = []common.L1Tx{}
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
|
// We expect the coordinator to add an L1CoordTx to create an account
|
||||||
|
// to receive the fees by the coordinator and another one for the
|
||||||
|
// recipient of the l2tx
|
||||||
|
assert.Equal(t, 2, len(oL1CoordTxs))
|
||||||
|
// [0] Coordinator account cration for token 1
|
||||||
|
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
|
||||||
|
// [1] User A BJJ-only account creation for token 1
|
||||||
|
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
|
||||||
|
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
|
||||||
|
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTransferManyFromSameAccount(t *testing.T) {
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) Coord: 0
|
||||||
|
CreateAccountDeposit(0) A: 1000
|
||||||
|
CreateAccountDeposit(0) B: 1000
|
||||||
|
|
||||||
|
> batchL1 // freeze L1User{1}
|
||||||
|
> batchL1 // forge L1User{1}
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
|
||||||
|
chainID := uint16(0)
|
||||||
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||||
|
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||||
|
|
||||||
|
// restart nonces of TilContext, as will be set by generating directly
|
||||||
|
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||||
|
tc.RestartNonces()
|
||||||
|
|
||||||
|
tpc := txprocessor.Config{
|
||||||
|
NLevels: 16,
|
||||||
|
MaxFeeTx: 10,
|
||||||
|
MaxTx: 10,
|
||||||
|
MaxL1Tx: 10,
|
||||||
|
ChainID: chainID,
|
||||||
|
}
|
||||||
|
selectionConfig := &SelectionConfig{
|
||||||
|
MaxL1UserTxs: 5,
|
||||||
|
TxProcessorConfig: tpc,
|
||||||
|
}
|
||||||
|
// batch1 to freeze L1UserTxs
|
||||||
|
l1UserTxs := []common.L1Tx{}
|
||||||
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// 8 transfers from the same account
|
||||||
|
|
||||||
|
batchPoolL2 := `
|
||||||
|
Type: PoolL2
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 1
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 2
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 3
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 4
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 5
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 6
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 7
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 8
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 9
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 10
|
||||||
|
PoolTransfer(0) A-B: 10 (126) // 11
|
||||||
|
`
|
||||||
|
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 11, len(poolL2Txs))
|
||||||
|
|
||||||
|
// reorder poolL2Txs so that nonces are not sorted
|
||||||
|
poolL2Txs[0], poolL2Txs[7] = poolL2Txs[7], poolL2Txs[0]
|
||||||
|
poolL2Txs[1], poolL2Txs[10] = poolL2Txs[10], poolL2Txs[1]
|
||||||
|
|
||||||
|
// add the PoolL2Txs to the l2DB
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||||
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 3, len(oL1UserTxs))
|
||||||
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
|
assert.Equal(t, 7, len(oL2Txs))
|
||||||
|
assert.Equal(t, 1, len(discardedL2Txs))
|
||||||
|
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user