mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Compare commits
37 Commits
feature/fl
...
feature/co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9dcf166080 | ||
|
|
9d08ec6978 | ||
|
|
ffda9fa1ef | ||
|
|
5a11aa5c27 | ||
|
|
3e5e9bd633 | ||
|
|
c83047f527 | ||
|
|
bcd576480c | ||
|
|
35ea597ac4 | ||
|
|
8259aee884 | ||
|
|
72862147f3 | ||
|
|
3706ddb2fb | ||
|
|
df0cc32eed | ||
|
|
67b2b7da4b | ||
|
|
e23063380c | ||
|
|
ed4d39fcd1 | ||
|
|
d6ec1910da | ||
|
|
c829eb99dc | ||
|
|
6ecb8118bd | ||
|
|
4500820a03 | ||
|
|
b4e6104fd3 | ||
|
|
28f026f628 | ||
|
|
688d376ce0 | ||
|
|
2547d5dce7 | ||
|
|
bb8d81c3aa | ||
|
|
af6f114667 | ||
|
|
e2376980f8 | ||
|
|
264f01b572 | ||
|
|
a21793b2b0 | ||
|
|
7b01f6a288 | ||
|
|
f2e5800ebd | ||
|
|
f0e79f3d55 | ||
|
|
26fbeb5c68 | ||
|
|
05104b0565 | ||
|
|
53507edabb | ||
|
|
729966f854 | ||
|
|
1c10a01cf7 | ||
|
|
1d0abe438f |
@@ -4,6 +4,10 @@ Go implementation of the Hermez node.
|
||||
|
||||
## Developing
|
||||
|
||||
### Go version
|
||||
|
||||
The `hermez-node` has been tested with go version 1.14
|
||||
|
||||
### Unit testing
|
||||
|
||||
Running the unit tests requires a connection to a PostgreSQL database. You can
|
||||
|
||||
@@ -4,10 +4,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
func (a *API) getAccount(c *gin.Context) {
|
||||
@@ -23,16 +20,6 @@ func (a *API) getAccount(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get balance from stateDB
|
||||
account, err := a.s.LastGetAccount(*idx)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
apiAccount.Balance = apitypes.NewBigIntStr(account.Balance)
|
||||
apiAccount.Nonce = account.Nonce
|
||||
|
||||
c.JSON(http.StatusOK, apiAccount)
|
||||
}
|
||||
|
||||
@@ -57,26 +44,6 @@ func (a *API) getAccounts(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get balances from stateDB
|
||||
if err := a.s.LastRead(func(sdb *statedb.Last) error {
|
||||
for x, apiAccount := range apiAccounts {
|
||||
idx, err := stringToIdx(string(apiAccount.Idx), "Account Idx")
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
account, err := sdb.GetAccount(*idx)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
apiAccounts[x].Balance = apitypes.NewBigIntStr(account.Balance)
|
||||
apiAccounts[x].Nonce = account.Nonce
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
// Build succesfull response
|
||||
type accountResponse struct {
|
||||
Accounts []historydb.AccountAPI `json:"accounts"`
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
@@ -34,7 +33,6 @@ type Status struct {
|
||||
type API struct {
|
||||
h *historydb.HistoryDB
|
||||
cg *configAPI
|
||||
s *statedb.StateDB
|
||||
l2 *l2db.L2DB
|
||||
status Status
|
||||
chainID uint16
|
||||
@@ -46,7 +44,6 @@ func NewAPI(
|
||||
coordinatorEndpoints, explorerEndpoints bool,
|
||||
server *gin.Engine,
|
||||
hdb *historydb.HistoryDB,
|
||||
sdb *statedb.StateDB,
|
||||
l2db *l2db.L2DB,
|
||||
config *Config,
|
||||
) (*API, error) {
|
||||
@@ -66,7 +63,6 @@ func NewAPI(
|
||||
AuctionConstants: config.AuctionConstants,
|
||||
WDelayerConstants: config.WDelayerConstants,
|
||||
},
|
||||
s: sdb,
|
||||
l2: l2db,
|
||||
status: Status{},
|
||||
chainID: config.ChainID,
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/db"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/test"
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
@@ -216,12 +215,8 @@ func TestMain(m *testing.M) {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeTxSelector, NLevels: 0})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// L2DB
|
||||
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
|
||||
l2DB := l2db.NewL2DB(database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
||||
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
||||
// Config (smart contract constants)
|
||||
chainID := uint16(0)
|
||||
@@ -239,7 +234,6 @@ func TestMain(m *testing.M) {
|
||||
true,
|
||||
apiGin,
|
||||
hdb,
|
||||
sdb,
|
||||
l2DB,
|
||||
&_config,
|
||||
)
|
||||
@@ -350,19 +344,6 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
}
|
||||
|
||||
// lastBlockNum2 := blocksData[len(blocksData)-1].Block.EthBlockNum
|
||||
|
||||
// Add accounts to StateDB
|
||||
for i := 0; i < len(commonAccounts); i++ {
|
||||
if _, err := api.s.CreateAccount(commonAccounts[i].Idx, &commonAccounts[i]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// Make a checkpoint to make the accounts available in Last
|
||||
if err := api.s.MakeCheckpoint(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Generate Coordinators and add them to HistoryDB
|
||||
const nCoords = 10
|
||||
commonCoords := test.GenCoordinators(nCoords, commonBlocks)
|
||||
@@ -529,13 +510,41 @@ func TestMain(m *testing.M) {
|
||||
testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks)
|
||||
testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs)
|
||||
poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts)
|
||||
// Add balance and nonce to historyDB
|
||||
accounts := genTestAccounts(commonAccounts, testTokens)
|
||||
accUpdates := []common.AccountUpdate{}
|
||||
for i := 0; i < len(accounts); i++ {
|
||||
balance := new(big.Int)
|
||||
balance.SetString(string(*accounts[i].Balance), 10)
|
||||
idx, err := stringToIdx(string(accounts[i].Idx), "foo")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
accUpdates = append(accUpdates, common.AccountUpdate{
|
||||
EthBlockNum: 0,
|
||||
BatchNum: 1,
|
||||
Idx: *idx,
|
||||
Nonce: 0,
|
||||
Balance: balance,
|
||||
})
|
||||
accUpdates = append(accUpdates, common.AccountUpdate{
|
||||
EthBlockNum: 0,
|
||||
BatchNum: 1,
|
||||
Idx: *idx,
|
||||
Nonce: accounts[i].Nonce,
|
||||
Balance: balance,
|
||||
})
|
||||
}
|
||||
if err := api.h.AddAccountUpdates(accUpdates); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tc = testCommon{
|
||||
blocks: commonBlocks,
|
||||
tokens: testTokens,
|
||||
batches: testBatches,
|
||||
fullBatches: testFullBatches,
|
||||
coordinators: testCoords,
|
||||
accounts: genTestAccounts(commonAccounts, testTokens),
|
||||
accounts: accounts,
|
||||
txs: testTxs,
|
||||
exits: testExits,
|
||||
poolTxsToSend: poolTxsToSend,
|
||||
@@ -585,7 +594,7 @@ func TestTimeout(t *testing.T) {
|
||||
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
|
||||
require.NoError(t, err)
|
||||
// L2DB
|
||||
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
|
||||
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 0.0, 24*time.Hour, apiConnConTO)
|
||||
|
||||
// API
|
||||
apiGinTO := gin.Default()
|
||||
@@ -612,7 +621,6 @@ func TestTimeout(t *testing.T) {
|
||||
true,
|
||||
apiGinTO,
|
||||
hdbTO,
|
||||
nil,
|
||||
l2DBTO,
|
||||
&_config,
|
||||
)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/lib/pq"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -46,24 +47,33 @@ var (
|
||||
func retSQLErr(err error, c *gin.Context) {
|
||||
log.Warnw("HTTP API SQL request error", "err", err)
|
||||
errMsg := tracerr.Unwrap(err).Error()
|
||||
retDupKey := func(errCode pq.ErrorCode) {
|
||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||
if errCode == "23505" {
|
||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||
Message: errDuplicatedKey,
|
||||
})
|
||||
} else {
|
||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||
Message: errMsg,
|
||||
})
|
||||
}
|
||||
}
|
||||
if errMsg == errCtxTimeout {
|
||||
c.JSON(http.StatusServiceUnavailable, errorMsg{
|
||||
Message: errSQLTimeout,
|
||||
})
|
||||
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||
if sqlErr.Code == "23505" {
|
||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||
Message: errDuplicatedKey,
|
||||
})
|
||||
}
|
||||
retDupKey(sqlErr.Code)
|
||||
} else if sqlErr, ok := meddler.DriverErr(tracerr.Unwrap(err)); ok {
|
||||
retDupKey(sqlErr.(*pq.Error).Code)
|
||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
c.JSON(http.StatusNotFound, errorMsg{
|
||||
Message: err.Error(),
|
||||
Message: errMsg,
|
||||
})
|
||||
} else {
|
||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||
Message: err.Error(),
|
||||
Message: errMsg,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
|
||||
@@ -27,6 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
writeTx.ClientIP = c.ClientIP()
|
||||
// Insert to DB
|
||||
if err := a.l2.AddTxAPI(writeTx); err != nil {
|
||||
retSQLErr(err, c)
|
||||
@@ -169,16 +171,21 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// Get public key
|
||||
account, err := a.s.LastGetAccount(poolTx.FromIdx)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// Validate feeAmount
|
||||
_, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// Get public key
|
||||
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// Validate TokenID
|
||||
if poolTx.TokenID != account.TokenID {
|
||||
return tracerr.Wrap(fmt.Errorf("tx.TokenID (%v) != account.TokenID (%v)",
|
||||
poolTx.TokenID, account.TokenID))
|
||||
}
|
||||
// Check signature
|
||||
if !poolTx.VerifySignature(a.chainID, account.BJJ) {
|
||||
return tracerr.Wrap(errors.New("wrong signature"))
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testPoolTxReceive is a struct to be used to assert the response
|
||||
@@ -170,9 +171,9 @@ func TestPoolTxs(t *testing.T) {
|
||||
fetchedTxID := common.TxID{}
|
||||
for _, tx := range tc.poolTxsToSend {
|
||||
jsonTxBytes, err := json.Marshal(tx)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
||||
assert.NoError(
|
||||
require.NoError(
|
||||
t, doGoodReq(
|
||||
"POST",
|
||||
endpoint,
|
||||
@@ -187,42 +188,42 @@ func TestPoolTxs(t *testing.T) {
|
||||
badTx.Amount = "99950000000000000"
|
||||
badTx.Fee = 255
|
||||
jsonTxBytes, err := json.Marshal(badTx)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Wrong signature
|
||||
badTx = tc.poolTxsToSend[0]
|
||||
badTx.FromIdx = "hez:foo:1000"
|
||||
jsonTxBytes, err = json.Marshal(badTx)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Wrong to
|
||||
badTx = tc.poolTxsToSend[0]
|
||||
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
|
||||
badTx.ToEthAddr = ðAddr
|
||||
badTx.ToIdx = nil
|
||||
jsonTxBytes, err = json.Marshal(badTx)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Wrong rq
|
||||
badTx = tc.poolTxsToSend[0]
|
||||
rqFromIdx := "hez:foo:30"
|
||||
badTx.RqFromIdx = &rqFromIdx
|
||||
jsonTxBytes, err = json.Marshal(badTx)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
jsonTxReader = bytes.NewReader(jsonTxBytes)
|
||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// GET
|
||||
endpoint += "/"
|
||||
for _, tx := range tc.poolTxsToReceive {
|
||||
fetchedTx := testPoolTxReceive{}
|
||||
assert.NoError(
|
||||
require.NoError(
|
||||
t, doGoodReq(
|
||||
"GET",
|
||||
endpoint+tx.TxID.String(),
|
||||
@@ -233,10 +234,10 @@ func TestPoolTxs(t *testing.T) {
|
||||
}
|
||||
// 400, due invalid TxID
|
||||
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// 404, due inexistent TxID in DB
|
||||
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {
|
||||
|
||||
@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
|
||||
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
||||
// it can just roll back the internal copy.
|
||||
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
|
||||
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
|
||||
}
|
||||
|
||||
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
||||
@@ -64,7 +64,10 @@ func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBa
|
||||
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
|
||||
|
||||
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
|
||||
return ptOut.ZKInputs, tracerr.Wrap(err)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
return ptOut.ZKInputs, nil
|
||||
}
|
||||
|
||||
// LocalStateDB returns the underlying LocalStateDB
|
||||
|
||||
1
cli/node/.gitignore
vendored
1
cli/node/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
cfg.example.secret.toml
|
||||
cfg.toml
|
||||
node
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
This is the main cli for the node
|
||||
|
||||
## Go version
|
||||
|
||||
The `hermez-node` has been tested with go version 1.14
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
@@ -65,29 +69,64 @@ when running the coordinator in sync mode
|
||||
- The node requires a PostgreSQL database. The parameters of the server and
|
||||
database must be set in the `PostgreSQL` section.
|
||||
|
||||
## Building
|
||||
|
||||
*All commands assume you are at the `cli/node` directory.*
|
||||
|
||||
Building the node requires using the packr utility to bundle the database
|
||||
migrations inside the resulting binary. Install the packr utility with:
|
||||
```
|
||||
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
|
||||
```
|
||||
|
||||
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
|
||||
not be found.
|
||||
|
||||
Now build the node executable:
|
||||
```
|
||||
cd ../../db && packr2 && cd -
|
||||
go build .
|
||||
cd ../../db && packr2 clean && cd -
|
||||
```
|
||||
|
||||
The executable is `node`.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
The following commands assume you have built the node previously. You can also
|
||||
run the following examples by replacing `./node` with `go run .` and executing
|
||||
them in the `cli/node` directory to build from source and run at the same time.
|
||||
|
||||
Run the node in mode synchronizer:
|
||||
```
|
||||
go run . --mode sync --cfg cfg.buidler.toml run
|
||||
./node --mode sync --cfg cfg.buidler.toml run
|
||||
```
|
||||
|
||||
Run the node in mode coordinator:
|
||||
```
|
||||
go run . --mode coord --cfg cfg.buidler.toml run
|
||||
./node --mode coord --cfg cfg.buidler.toml run
|
||||
```
|
||||
|
||||
Import an ethereum private key into the keystore:
|
||||
```
|
||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
||||
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
||||
```
|
||||
|
||||
Generate a new BabyJubJub key pair:
|
||||
```
|
||||
go run . --mode coord --cfg cfg.buidler.toml genbjj
|
||||
./node --mode coord --cfg cfg.buidler.toml genbjj
|
||||
```
|
||||
|
||||
Wipe the entier SQL database (this will destroy all synchronized and pool data):
|
||||
Wipe the entier SQL database (this will destroy all synchronized and pool
|
||||
data):
|
||||
```
|
||||
go run . --mode coord --cfg cfg.buidler.toml wipesql
|
||||
./node --mode coord --cfg cfg.buidler.toml wipesql
|
||||
```
|
||||
|
||||
Discard all synchronized blocks and associated state up to a given block
|
||||
number. This command is useful in case the synchronizer reaches an invalid
|
||||
state and you want to roll back a few blocks and try again (maybe with some
|
||||
fixes in the code).
|
||||
```
|
||||
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
|
||||
```
|
||||
|
||||
@@ -41,15 +41,21 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
|
||||
TokenHEZName = "Hermez Network Token"
|
||||
|
||||
[Coordinator]
|
||||
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
||||
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
||||
ConfirmBlocks = 10
|
||||
L1BatchTimeoutPerc = 0.999
|
||||
L1BatchTimeoutPerc = 0.6
|
||||
StartSlotBlocksDelay = 2
|
||||
ScheduleBatchBlocksAheadCheck = 3
|
||||
SendBatchBlocksMarginCheck = 1
|
||||
ProofServerPollInterval = "1s"
|
||||
ForgeRetryInterval = "500ms"
|
||||
SyncRetryInterval = "1s"
|
||||
ForgeDelay = "10s"
|
||||
ForgeNoTxsDelay = "0s"
|
||||
PurgeByExtDelInterval = "1m"
|
||||
|
||||
[Coordinator.FeeAccount]
|
||||
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
|
||||
@@ -60,6 +66,7 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
|
||||
[Coordinator.L2DB]
|
||||
SafetyPeriod = 10
|
||||
MaxTxs = 512
|
||||
MinFeeUSD = 0.0
|
||||
TTL = "24h"
|
||||
PurgeBatchDelay = 10
|
||||
InvalidateBatchDelay = 20
|
||||
@@ -80,13 +87,13 @@ MaxTx = 512
|
||||
NLevels = 32
|
||||
|
||||
[Coordinator.EthClient]
|
||||
ReceiptTimeout = "60s"
|
||||
ReceiptLoopInterval = "500ms"
|
||||
CheckLoopInterval = "500ms"
|
||||
Attempts = 4
|
||||
AttemptsDelay = "500ms"
|
||||
CallGasLimit = 300000
|
||||
GasPriceDiv = 100
|
||||
TxResendTimeout = "2m"
|
||||
NoReuseNonce = false
|
||||
MaxGasPrice = "5000000000"
|
||||
GasPriceIncPerc = 10
|
||||
|
||||
[Coordinator.EthClient.Keystore]
|
||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/hermeznetwork/hermez-node/config"
|
||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/node"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
@@ -23,6 +25,7 @@ const (
|
||||
flagMode = "mode"
|
||||
flagSK = "privatekey"
|
||||
flagYes = "yes"
|
||||
flagBlock = "block"
|
||||
modeSync = "sync"
|
||||
modeCoord = "coord"
|
||||
)
|
||||
@@ -139,6 +142,48 @@ func cmdRun(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdDiscard(c *cli.Context) error {
|
||||
_cfg, err := parseCli(c)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
|
||||
}
|
||||
cfg := _cfg.node
|
||||
blockNum := c.Int64(flagBlock)
|
||||
log.Infof("Discarding all blocks up to block %v...", blockNum)
|
||||
|
||||
db, err := dbUtils.InitSQLDB(
|
||||
cfg.PostgreSQL.Port,
|
||||
cfg.PostgreSQL.Host,
|
||||
cfg.PostgreSQL.User,
|
||||
cfg.PostgreSQL.Password,
|
||||
cfg.PostgreSQL.Name,
|
||||
)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
|
||||
}
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
if err := historyDB.Reorg(blockNum); err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
|
||||
}
|
||||
batchNum, err := historyDB.GetLastBatchNum()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
||||
}
|
||||
l2DB := l2db.NewL2DB(
|
||||
db,
|
||||
cfg.Coordinator.L2DB.SafetyPeriod,
|
||||
cfg.Coordinator.L2DB.MaxTxs,
|
||||
cfg.Coordinator.L2DB.MinFeeUSD,
|
||||
cfg.Coordinator.L2DB.TTL.Duration,
|
||||
nil,
|
||||
)
|
||||
if err := l2DB.Reorg(batchNum); err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Config is the configuration of the hermez node execution
|
||||
type Config struct {
|
||||
mode node.Mode
|
||||
@@ -239,6 +284,18 @@ func main() {
|
||||
Usage: "Run the hermez-node in the indicated mode",
|
||||
Action: cmdRun,
|
||||
},
|
||||
{
|
||||
Name: "discard",
|
||||
Aliases: []string{},
|
||||
Usage: "Discard blocks up to a specified block number",
|
||||
Action: cmdDiscard,
|
||||
Flags: []cli.Flag{
|
||||
&cli.Int64Flag{
|
||||
Name: flagBlock,
|
||||
Usage: "last block number to keep",
|
||||
Required: false,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
err := app.Run(os.Args)
|
||||
|
||||
@@ -263,3 +263,13 @@ type IdxNonce struct {
|
||||
Idx Idx `db:"idx"`
|
||||
Nonce Nonce `db:"nonce"`
|
||||
}
|
||||
|
||||
// AccountUpdate represents an account balance and/or nonce update after a
|
||||
// processed batch
|
||||
type AccountUpdate struct {
|
||||
EthBlockNum int64 `meddler:"eth_block_num"`
|
||||
BatchNum BatchNum `meddler:"batch_num"`
|
||||
Idx Idx `meddler:"idx"`
|
||||
Nonce Nonce `meddler:"nonce"`
|
||||
Balance *big.Int `meddler:"balance,bigint"`
|
||||
}
|
||||
|
||||
@@ -27,6 +27,24 @@ type Batch struct {
|
||||
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
|
||||
}
|
||||
|
||||
// NewEmptyBatch creates a new empty batch
|
||||
func NewEmptyBatch() *Batch {
|
||||
return &Batch{
|
||||
BatchNum: 0,
|
||||
EthBlockNum: 0,
|
||||
ForgerAddr: ethCommon.Address{},
|
||||
CollectedFees: make(map[TokenID]*big.Int),
|
||||
FeeIdxsCoordinator: make([]Idx, 0),
|
||||
StateRoot: big.NewInt(0),
|
||||
NumAccounts: 0,
|
||||
LastIdx: 0,
|
||||
ExitRoot: big.NewInt(0),
|
||||
ForgeL1TxsNum: nil,
|
||||
SlotNum: 0,
|
||||
TotalFeesUSD: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// BatchNum identifies a batch
|
||||
type BatchNum int64
|
||||
|
||||
@@ -59,6 +77,7 @@ type BatchData struct {
|
||||
L1CoordinatorTxs []L1Tx
|
||||
L2Txs []L2Tx
|
||||
CreatedAccounts []Account
|
||||
UpdatedAccounts []AccountUpdate
|
||||
ExitTree []ExitInfo
|
||||
Batch Batch
|
||||
}
|
||||
|
||||
@@ -33,7 +33,8 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
|
||||
if blockNum >= c.GenesisBlockNum {
|
||||
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||
}
|
||||
return -1
|
||||
// This result will be negative
|
||||
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||
}
|
||||
|
||||
// SlotBlocks returns the first and the last block numbers included in that slot
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
// Package common Float16 provides methods to work with Hermez custom half float
|
||||
// precision, 16 bits, codification internally called Float16 has been adopted
|
||||
// to encode large integers. This is done in order to save bits when L2
|
||||
// transactions are published.
|
||||
//nolint:gomnd
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
|
||||
ErrRoundingLoss = errors.New("input value causes rounding loss")
|
||||
)
|
||||
|
||||
// Float16 represents a float in a 16 bit format
|
||||
type Float16 uint16
|
||||
|
||||
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
|
||||
func (f16 Float16) Bytes() []byte {
|
||||
var b [2]byte
|
||||
binary.BigEndian.PutUint16(b[:], uint16(f16))
|
||||
return b[:]
|
||||
}
|
||||
|
||||
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
|
||||
func Float16FromBytes(b []byte) *Float16 {
|
||||
// WARNING b[:2] for a b where len(b)<2 can break
|
||||
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
|
||||
return &f16
|
||||
}
|
||||
|
||||
// BigInt converts the Float16 to a *big.Int integer
|
||||
func (f16 *Float16) BigInt() *big.Int {
|
||||
fl := int64(*f16)
|
||||
|
||||
m := big.NewInt(fl & 0x3FF)
|
||||
e := big.NewInt(fl >> 11)
|
||||
e5 := (fl >> 10) & 0x01
|
||||
|
||||
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
|
||||
res := m.Mul(m, exp)
|
||||
|
||||
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
|
||||
res.Add(res, exp.Div(exp, big.NewInt(2)))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// floorFix2Float converts a fix to a float, always rounding down
|
||||
func floorFix2Float(_f *big.Int) Float16 {
|
||||
zero := big.NewInt(0)
|
||||
ten := big.NewInt(10)
|
||||
e := int64(0)
|
||||
|
||||
m := big.NewInt(0)
|
||||
m.Set(_f)
|
||||
|
||||
if m.Cmp(zero) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
s := big.NewInt(0).Rsh(m, 10)
|
||||
|
||||
for s.Cmp(zero) != 0 {
|
||||
m.Div(m, ten)
|
||||
s.Rsh(m, 10)
|
||||
e++
|
||||
}
|
||||
|
||||
return Float16(m.Int64() | e<<11)
|
||||
}
|
||||
|
||||
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
|
||||
// case of loss during the encoding.
|
||||
func NewFloat16(f *big.Int) (Float16, error) {
|
||||
fl1 := floorFix2Float(f)
|
||||
fi1 := fl1.BigInt()
|
||||
fl2 := fl1 | 0x400
|
||||
fi2 := fl2.BigInt()
|
||||
|
||||
m3 := (fl1 & 0x3FF) + 1
|
||||
e3 := fl1 >> 11
|
||||
|
||||
if m3&0x400 == 0 {
|
||||
m3 = 0x66
|
||||
e3++
|
||||
}
|
||||
|
||||
fl3 := m3 + e3<<11
|
||||
fi3 := fl3.BigInt()
|
||||
|
||||
res := fl1
|
||||
|
||||
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
|
||||
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
|
||||
|
||||
if d.Cmp(d2) == 1 {
|
||||
res = fl2
|
||||
d = d2
|
||||
}
|
||||
|
||||
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
|
||||
|
||||
if d.Cmp(d3) == 1 {
|
||||
res = fl3
|
||||
}
|
||||
|
||||
// Do rounding check
|
||||
if res.BigInt().Cmp(f) == 0 {
|
||||
return res, nil
|
||||
}
|
||||
return res, tracerr.Wrap(ErrRoundingLoss)
|
||||
}
|
||||
|
||||
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
|
||||
// case of loss during the encoding.
|
||||
func NewFloat16Floor(f *big.Int) Float16 {
|
||||
fl1 := floorFix2Float(f)
|
||||
fl2 := fl1 | 0x400
|
||||
fi2 := fl2.BigInt()
|
||||
|
||||
if fi2.Cmp(f) < 1 {
|
||||
return fl2
|
||||
}
|
||||
return fl1
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConversionsFloat16(t *testing.T) {
|
||||
testVector := map[Float16]string{
|
||||
0x307B: "123000000",
|
||||
0x1DC6: "454500",
|
||||
0xFFFF: "10235000000000000000000000000000000",
|
||||
0x0000: "0",
|
||||
0x0400: "0",
|
||||
0x0001: "1",
|
||||
0x0401: "1",
|
||||
0x0800: "0",
|
||||
0x0c00: "5",
|
||||
0x0801: "10",
|
||||
0x0c01: "15",
|
||||
}
|
||||
|
||||
for test := range testVector {
|
||||
fix := test.BigInt()
|
||||
|
||||
assert.Equal(t, fix.String(), testVector[test])
|
||||
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(testVector[test], 10)
|
||||
|
||||
fl, err := NewFloat16(bi)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fx2 := fl.BigInt()
|
||||
assert.Equal(t, fx2.String(), testVector[test])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloorFix2FloatFloat16(t *testing.T) {
|
||||
testVector := map[string]Float16{
|
||||
"87999990000000000": 0x776f,
|
||||
"87950000000000001": 0x776f,
|
||||
"87950000000000000": 0x776f,
|
||||
"87949999999999999": 0x736f,
|
||||
}
|
||||
|
||||
for test := range testVector {
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(test, 10)
|
||||
|
||||
testFloat := NewFloat16Floor(bi)
|
||||
|
||||
assert.Equal(t, testFloat, testVector[test])
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversionLossesFloat16(t *testing.T) {
|
||||
a := big.NewInt(1000)
|
||||
b, err := NewFloat16(a)
|
||||
assert.NoError(t, err)
|
||||
c := b.BigInt()
|
||||
assert.Equal(t, c, a)
|
||||
|
||||
a = big.NewInt(1024)
|
||||
b, err = NewFloat16(a)
|
||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
||||
c = b.BigInt()
|
||||
assert.NotEqual(t, c, a)
|
||||
|
||||
a = big.NewInt(32767)
|
||||
b, err = NewFloat16(a)
|
||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
||||
c = b.BigInt()
|
||||
assert.NotEqual(t, c, a)
|
||||
|
||||
a = big.NewInt(32768)
|
||||
b, err = NewFloat16(a)
|
||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
||||
c = b.BigInt()
|
||||
assert.NotEqual(t, c, a)
|
||||
|
||||
a = big.NewInt(65536000)
|
||||
b, err = NewFloat16(a)
|
||||
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
|
||||
c = b.BigInt()
|
||||
assert.NotEqual(t, c, a)
|
||||
}
|
||||
|
||||
func BenchmarkFloat16(b *testing.B) {
|
||||
newBigInt := func(s string) *big.Int {
|
||||
bigInt, ok := new(big.Int).SetString(s, 10)
|
||||
if !ok {
|
||||
panic("Bad big int")
|
||||
}
|
||||
return bigInt
|
||||
}
|
||||
type pair struct {
|
||||
Float16 Float16
|
||||
BigInt *big.Int
|
||||
}
|
||||
testVector := []pair{
|
||||
{0x307B, newBigInt("123000000")},
|
||||
{0x1DC6, newBigInt("454500")},
|
||||
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
|
||||
{0x0000, newBigInt("0")},
|
||||
{0x0400, newBigInt("0")},
|
||||
{0x0001, newBigInt("1")},
|
||||
{0x0401, newBigInt("1")},
|
||||
{0x0800, newBigInt("0")},
|
||||
{0x0c00, newBigInt("5")},
|
||||
{0x0801, newBigInt("10")},
|
||||
{0x0c01, newBigInt("15")},
|
||||
}
|
||||
b.Run("floorFix2Float()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
|
||||
}
|
||||
})
|
||||
b.Run("NewFloat16()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
|
||||
}
|
||||
})
|
||||
b.Run("Float16.BigInt()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testVector[i%len(testVector)].Float16.BigInt()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -6,7 +6,6 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/big"
|
||||
@@ -24,8 +23,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFloat40Overflow is used when a given nonce overflows the maximum
|
||||
// capacity of the Float40 (2**40-1)
|
||||
// ErrFloat40Overflow is used when a given Float40 overflows the
|
||||
// maximum capacity of the Float40 (2**40-1)
|
||||
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
|
||||
// ErrFloat40E31 is used when the e > 31 when trying to convert a
|
||||
// *big.Int to Float40
|
||||
@@ -88,15 +87,14 @@ func NewFloat40(f *big.Int) (Float40, error) {
|
||||
zero := big.NewInt(0)
|
||||
ten := big.NewInt(10)
|
||||
thres := big.NewInt(0x08_00_00_00_00)
|
||||
for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) &&
|
||||
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
|
||||
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
|
||||
m = new(big.Int).Div(m, ten)
|
||||
e = new(big.Int).Add(e, big.NewInt(1))
|
||||
}
|
||||
if e.Int64() > 31 {
|
||||
return 0, ErrFloat40E31
|
||||
}
|
||||
if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
|
||||
if m.Cmp(thres) >= 0 {
|
||||
return 0, ErrFloat40NotEnoughPrecission
|
||||
}
|
||||
r := new(big.Int).Add(m,
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewL1UserTx(t *testing.T) {
|
||||
func TestNewL1UserTxID(t *testing.T) {
|
||||
toForge := int64(123456)
|
||||
l1Tx := &L1Tx{
|
||||
ToForgeL1TxsNum: &toForge,
|
||||
@@ -30,6 +30,38 @@ func TestNewL1UserTx(t *testing.T) {
|
||||
l1Tx, err := NewL1Tx(l1Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x00a6cbae3b8661fb75b0919ca6605a02cfb04d9c6dd16870fa0fcdf01befa32768", l1Tx.TxID.String())
|
||||
|
||||
maxInt64 := 0xFFFF_FFFF_FFFF_FFFF >> 1
|
||||
|
||||
toForge = int64(maxInt64)
|
||||
l1Tx = &L1Tx{
|
||||
ToForgeL1TxsNum: &toForge,
|
||||
Position: maxInt64,
|
||||
UserOrigin: true,
|
||||
}
|
||||
l1Tx, err = NewL1Tx(l1Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x001ff31eb325f324652bfe6b607a19e04789e082ee3b779eefe4a466062ea331d9", l1Tx.TxID.String())
|
||||
|
||||
toForge = int64(maxInt64 - 1)
|
||||
l1Tx = &L1Tx{
|
||||
ToForgeL1TxsNum: &toForge,
|
||||
Position: maxInt64 - 1,
|
||||
UserOrigin: true,
|
||||
}
|
||||
l1Tx, err = NewL1Tx(l1Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x0003434eca58d35fd85795e3a6cce67c8801deb805ea1f7429cc270aa9f35ea403", l1Tx.TxID.String())
|
||||
|
||||
toForge = int64(0)
|
||||
l1Tx = &L1Tx{
|
||||
ToForgeL1TxsNum: &toForge,
|
||||
Position: 0,
|
||||
UserOrigin: true,
|
||||
}
|
||||
l1Tx, err = NewL1Tx(l1Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x006bd2dd6bd408cbee33429358bf24fdc64612fbf8b1b4db604518f40ffd34b607", l1Tx.TxID.String())
|
||||
}
|
||||
|
||||
func TestNewL1CoordinatorTx(t *testing.T) {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewL2Tx(t *testing.T) {
|
||||
func TestNewL2TxID(t *testing.T) {
|
||||
l2Tx := &L2Tx{
|
||||
FromIdx: 87654,
|
||||
ToIdx: 300,
|
||||
|
||||
@@ -36,7 +36,7 @@ type PoolL2Tx struct {
|
||||
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
|
||||
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
|
||||
TokenID TokenID `meddler:"token_id"`
|
||||
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40
|
||||
Amount *big.Int `meddler:"amount,bigint"`
|
||||
Fee FeeSelector `meddler:"fee"`
|
||||
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
|
||||
State PoolL2TxState `meddler:"state"`
|
||||
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
|
||||
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
|
||||
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
|
||||
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
|
||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40
|
||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"`
|
||||
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
|
||||
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
|
||||
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
|
||||
@@ -126,7 +126,7 @@ func (tx *PoolL2Tx) SetID() error {
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// [ 16 bits ] chainId // 2 bytes
|
||||
// [ 32 bits ] signatureConstant // 4 bytes
|
||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
||||
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
|
||||
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
var b [29]byte
|
||||
|
||||
@@ -179,7 +179,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
||||
// [ 40 bits ] amountFloat40 // 5 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
||||
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
|
||||
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
if tx.Amount == nil {
|
||||
tx.Amount = big.NewInt(0)
|
||||
@@ -238,7 +238,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
// [ 40 bits ] rqAmountFloat40 // 5 bytes
|
||||
// [ 48 bits ] rqToIdx // 6 bytes
|
||||
// [ 48 bits ] rqFromIdx // 6 bytes
|
||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
||||
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
|
||||
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
||||
if tx.RqAmount == nil {
|
||||
tx.RqAmount = big.NewInt(0)
|
||||
|
||||
@@ -62,3 +62,17 @@ func RmEndingZeroes(siblings []*merkletree.Hash) []*merkletree.Hash {
|
||||
}
|
||||
return siblings[:pos]
|
||||
}
|
||||
|
||||
// TokensToUSD is a helper function to calculate the USD value of a certain
|
||||
// amount of tokens considering the normalized token price (which is the price
|
||||
// commonly reported by exhanges)
|
||||
func TokensToUSD(amount *big.Int, decimals uint64, valueUSD float64) float64 {
|
||||
amountF := new(big.Float).SetInt(amount)
|
||||
// Divide by 10^decimals to normalize the amount
|
||||
baseF := new(big.Float).SetInt(new(big.Int).Exp(
|
||||
big.NewInt(10), big.NewInt(int64(decimals)), nil)) //nolint:gomnd
|
||||
amountF.Mul(amountF, big.NewFloat(valueUSD))
|
||||
amountF.Quo(amountF, baseF)
|
||||
amountUSD, _ := amountF.Float64()
|
||||
return amountUSD
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ type ZKInputs struct {
|
||||
// ToEthAddr
|
||||
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
|
||||
// AmountF encoded as float40
|
||||
AmountF []*big.Int `json:"amountF"`
|
||||
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
|
||||
|
||||
// OnChain determines if is L1 (1/true) or L2 (0/false)
|
||||
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
|
||||
@@ -479,7 +479,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
||||
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
|
||||
b = append(b, newExitRoot...)
|
||||
|
||||
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
|
||||
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData
|
||||
l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
|
||||
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
|
||||
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
|
||||
@@ -497,7 +497,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
||||
}
|
||||
b = append(b, l1TxsDataAvailability...)
|
||||
|
||||
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
|
||||
// [MAX_TX*(2*NLevels + 48) bits] L2TxsData
|
||||
var l2TxsData []byte
|
||||
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd
|
||||
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
|
||||
|
||||
@@ -3,6 +3,7 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
@@ -51,33 +52,85 @@ type Coordinator struct {
|
||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||
// timeout that will trigger a schedule to forge an L1Batch
|
||||
L1BatchTimeoutPerc float64 `validate:"required"`
|
||||
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||
// starting the pipeline when we reach a slot in which we can forge.
|
||||
StartSlotBlocksDelay int64
|
||||
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||
// the forger address is checked to be allowed to forge (apart from
|
||||
// checking the next block), used to decide when to stop scheduling new
|
||||
// batches (by stopping the pipeline).
|
||||
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||
// stopped if we can't forge at block 15.
|
||||
// This value should be the expected number of blocks it takes between
|
||||
// scheduling a batch and having it mined.
|
||||
ScheduleBatchBlocksAheadCheck int64
|
||||
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||
// which the coordinator is also checked to be allowed to forge, apart
|
||||
// from the next block; used to decide when to stop sending batches to
|
||||
// the smart contract.
|
||||
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||
// if we can't forge at block 15.
|
||||
SendBatchBlocksMarginCheck int64
|
||||
// ProofServerPollInterval is the waiting interval between polling the
|
||||
// ProofServer while waiting for a particular status
|
||||
ProofServerPollInterval Duration `validate:"required"`
|
||||
// ForgeRetryInterval is the waiting interval between calls forge a
|
||||
// batch after an error
|
||||
ForgeRetryInterval Duration `validate:"required"`
|
||||
// ForgeDelay is the delay after which a batch is forged if the slot is
|
||||
// already committed. If set to 0s, the coordinator will continuously
|
||||
// forge at the maximum rate.
|
||||
ForgeDelay Duration `validate:"-"`
|
||||
// ForgeNoTxsDelay is the delay after which a batch is forged even if
|
||||
// there are no txs to forge if the slot is already committed. If set
|
||||
// to 0s, the coordinator will continuously forge even if the batches
|
||||
// are empty.
|
||||
ForgeNoTxsDelay Duration `validate:"-"`
|
||||
// SyncRetryInterval is the waiting interval between calls to the main
|
||||
// handler of a synced block after an error
|
||||
SyncRetryInterval Duration `validate:"required"`
|
||||
// PurgeByExtDelInterval is the waiting interval between calls
|
||||
// to the PurgeByExternalDelete function of the l2db which deletes
|
||||
// pending txs externally marked by the column `external_delete`
|
||||
PurgeByExtDelInterval Duration `validate:"required"`
|
||||
// L2DB is the DB that holds the pool of L2Txs
|
||||
L2DB struct {
|
||||
// SafetyPeriod is the number of batches after which
|
||||
// non-pending L2Txs are deleted from the pool
|
||||
SafetyPeriod common.BatchNum `validate:"required"`
|
||||
// MaxTxs is the number of L2Txs that once reached triggers
|
||||
// deletion of old L2Txs
|
||||
// MaxTxs is the maximum number of pending L2Txs that can be
|
||||
// stored in the pool. Once this number of pending L2Txs is
|
||||
// reached, inserts to the pool will be denied until some of
|
||||
// the pending txs are forged.
|
||||
MaxTxs uint32 `validate:"required"`
|
||||
// MinFeeUSD is the minimum fee in USD that a tx must pay in
|
||||
// order to be accepted into the pool. Txs with lower than
|
||||
// minimum fee will be rejected at the API level.
|
||||
MinFeeUSD float64
|
||||
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
|
||||
// L2Txs is reached, L2Txs older than TTL will be deleted.
|
||||
TTL Duration `validate:"required"`
|
||||
// PurgeBatchDelay is the delay between batches to purge outdated transactions
|
||||
// PurgeBatchDelay is the delay between batches to purge
|
||||
// outdated transactions. Oudated L2Txs are those that have
|
||||
// been forged or marked as invalid for longer than the
|
||||
// SafetyPeriod and pending L2Txs that have been in the pool
|
||||
// for longer than TTL once there are MaxTxs.
|
||||
PurgeBatchDelay int64 `validate:"required"`
|
||||
// InvalidateBatchDelay is the delay between batches to mark invalid transactions
|
||||
// InvalidateBatchDelay is the delay between batches to mark
|
||||
// invalid transactions due to nonce lower than the account
|
||||
// nonce.
|
||||
InvalidateBatchDelay int64 `validate:"required"`
|
||||
// PurgeBlockDelay is the delay between blocks to purge outdated transactions
|
||||
// PurgeBlockDelay is the delay between blocks to purge
|
||||
// outdated transactions. Oudated L2Txs are those that have
|
||||
// been forged or marked as invalid for longer than the
|
||||
// SafetyPeriod and pending L2Txs that have been in the pool
|
||||
// for longer than TTL once there are MaxTxs.
|
||||
PurgeBlockDelay int64 `validate:"required"`
|
||||
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions
|
||||
// InvalidateBlockDelay is the delay between blocks to mark
|
||||
// invalid transactions due to nonce lower than the account
|
||||
// nonce.
|
||||
InvalidateBlockDelay int64 `validate:"required"`
|
||||
} `validate:"required"`
|
||||
TxSelector struct {
|
||||
@@ -97,12 +150,13 @@ type Coordinator struct {
|
||||
NLevels int64 `validate:"required"`
|
||||
} `validate:"required"`
|
||||
EthClient struct {
|
||||
// CallGasLimit is the default gas limit set for ethereum
|
||||
// calls, except for methods where a particular gas limit is
|
||||
// harcoded because it's known to be a big value
|
||||
CallGasLimit uint64 `validate:"required"`
|
||||
// GasPriceDiv is the gas price division
|
||||
GasPriceDiv uint64 `validate:"required"`
|
||||
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||
// transactions
|
||||
MaxGasPrice *big.Int `validate:"required"`
|
||||
// GasPriceIncPerc is the percentage increase of gas price set
|
||||
// in an ethereum transaction from the suggested gas price by
|
||||
// the ehtereum node
|
||||
GasPriceIncPerc int64
|
||||
// CheckLoopInterval is the waiting interval between receipt
|
||||
// checks of ethereum transactions in the TxManager
|
||||
CheckLoopInterval Duration `validate:"required"`
|
||||
@@ -112,6 +166,13 @@ type Coordinator struct {
|
||||
// AttemptsDelay is delay between attempts do do an eth client
|
||||
// RPC call
|
||||
AttemptsDelay Duration `validate:"required"`
|
||||
// TxResendTimeout is the timeout after which a non-mined
|
||||
// ethereum transaction will be resent (reusing the nonce) with
|
||||
// a newly calculated gas price
|
||||
TxResendTimeout Duration `validate:"required"`
|
||||
// NoReuseNonce disables reusing nonces of pending transactions for
|
||||
// new replacement transactions
|
||||
NoReuseNonce bool
|
||||
// Keystore is the ethereum keystore where private keys are kept
|
||||
Keystore struct {
|
||||
// Path to the keystore
|
||||
|
||||
@@ -47,6 +47,8 @@ type Debug struct {
|
||||
MineBlockNum int64
|
||||
// SendBlockNum is the blockNum when the batch was sent to ethereum
|
||||
SendBlockNum int64
|
||||
// ResendNum is the number of times the tx has been resent
|
||||
ResendNum int
|
||||
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
|
||||
// was scheduled
|
||||
LastScheduledL1BatchBlockNum int64
|
||||
@@ -64,10 +66,17 @@ type Debug struct {
|
||||
// StartToSendDelay is the delay between starting a batch and sending
|
||||
// it to ethereum, in seconds
|
||||
StartToSendDelay float64
|
||||
// StartToMineDelay is the delay between starting a batch and having
|
||||
// it mined in seconds
|
||||
StartToMineDelay float64
|
||||
// SendToMineDelay is the delay between sending a batch tx and having
|
||||
// it mined in seconds
|
||||
SendToMineDelay float64
|
||||
}
|
||||
|
||||
// BatchInfo contans the Batch information
|
||||
type BatchInfo struct {
|
||||
PipelineNum int
|
||||
BatchNum common.BatchNum
|
||||
ServerProof prover.Client
|
||||
ZKInputs *common.ZKInputs
|
||||
@@ -82,9 +91,16 @@ type BatchInfo struct {
|
||||
CoordIdxs []common.Idx
|
||||
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
||||
// FeesInfo
|
||||
EthTx *types.Transaction
|
||||
Receipt *types.Receipt
|
||||
Debug Debug
|
||||
EthTx *types.Transaction
|
||||
EthTxErr error
|
||||
// SendTimestamp the time of batch sent to ethereum
|
||||
SendTimestamp time.Time
|
||||
Receipt *types.Receipt
|
||||
// Fail is true if:
|
||||
// - The receipt status is failed
|
||||
// - A previous parent batch is failed
|
||||
Fail bool
|
||||
Debug Debug
|
||||
}
|
||||
|
||||
// DebugStore is a debug function to store the BatchInfo as a json text file in
|
||||
|
||||
@@ -3,8 +3,8 @@ package coordinator
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -23,7 +23,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
|
||||
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
|
||||
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
|
||||
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,25 +44,77 @@ type Config struct {
|
||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||
// timeout that will trigger a schedule to forge an L1Batch
|
||||
L1BatchTimeoutPerc float64
|
||||
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||
// starting the pipeline when we reach a slot in which we can forge.
|
||||
StartSlotBlocksDelay int64
|
||||
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||
// the forger address is checked to be allowed to forge (apart from
|
||||
// checking the next block), used to decide when to stop scheduling new
|
||||
// batches (by stopping the pipeline).
|
||||
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||
// stopped if we can't forge at block 15.
|
||||
// This value should be the expected number of blocks it takes between
|
||||
// scheduling a batch and having it mined.
|
||||
ScheduleBatchBlocksAheadCheck int64
|
||||
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||
// which the coordinator is also checked to be allowed to forge, apart
|
||||
// from the next block; used to decide when to stop sending batches to
|
||||
// the smart contract.
|
||||
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||
// if we can't forge at block 15.
|
||||
// This value should be the expected number of blocks it takes between
|
||||
// sending a batch and having it mined.
|
||||
SendBatchBlocksMarginCheck int64
|
||||
// EthClientAttempts is the number of attempts to do an eth client RPC
|
||||
// call before giving up
|
||||
EthClientAttempts int
|
||||
// ForgeRetryInterval is the waiting interval between calls forge a
|
||||
// batch after an error
|
||||
ForgeRetryInterval time.Duration
|
||||
// ForgeDelay is the delay after which a batch is forged if the slot is
|
||||
// already committed. If set to 0s, the coordinator will continuously
|
||||
// forge at the maximum rate.
|
||||
ForgeDelay time.Duration
|
||||
// ForgeNoTxsDelay is the delay after which a batch is forged even if
|
||||
// there are no txs to forge if the slot is already committed. If set
|
||||
// to 0s, the coordinator will continuously forge even if the batches
|
||||
// are empty.
|
||||
ForgeNoTxsDelay time.Duration
|
||||
// SyncRetryInterval is the waiting interval between calls to the main
|
||||
// handler of a synced block after an error
|
||||
SyncRetryInterval time.Duration
|
||||
// PurgeByExtDelInterval is the waiting interval between calls
|
||||
// to the PurgeByExternalDelete function of the l2db which deletes
|
||||
// pending txs externally marked by the column `external_delete`
|
||||
PurgeByExtDelInterval time.Duration
|
||||
// EthClientAttemptsDelay is delay between attempts do do an eth client
|
||||
// RPC call
|
||||
EthClientAttemptsDelay time.Duration
|
||||
// EthTxResendTimeout is the timeout after which a non-mined ethereum
|
||||
// transaction will be resent (reusing the nonce) with a newly
|
||||
// calculated gas price
|
||||
EthTxResendTimeout time.Duration
|
||||
// EthNoReuseNonce disables reusing nonces of pending transactions for
|
||||
// new replacement transactions
|
||||
EthNoReuseNonce bool
|
||||
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||
// transactions
|
||||
MaxGasPrice *big.Int
|
||||
// GasPriceIncPerc is the percentage increase of gas price set in an
|
||||
// ethereum transaction from the suggested gas price by the ehtereum
|
||||
// node
|
||||
GasPriceIncPerc int64
|
||||
// TxManagerCheckInterval is the waiting interval between receipt
|
||||
// checks of ethereum transactions in the TxManager
|
||||
TxManagerCheckInterval time.Duration
|
||||
// DebugBatchPath if set, specifies the path where batchInfo is stored
|
||||
// in JSON in every step/update of the pipeline
|
||||
DebugBatchPath string
|
||||
Purger PurgerCfg
|
||||
DebugBatchPath string
|
||||
Purger PurgerCfg
|
||||
// VerifierIdx is the index of the verifier contract registered in the
|
||||
// smart contract
|
||||
VerifierIdx uint8
|
||||
TxProcessorConfig txprocessor.Config
|
||||
}
|
||||
@@ -74,15 +128,22 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
type fromBatch struct {
|
||||
BatchNum common.BatchNum
|
||||
ForgerAddr ethCommon.Address
|
||||
StateRoot *big.Int
|
||||
}
|
||||
|
||||
// Coordinator implements the Coordinator type
|
||||
type Coordinator struct {
|
||||
// State
|
||||
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
|
||||
provers []prover.Client
|
||||
consts synchronizer.SCConsts
|
||||
vars synchronizer.SCVariables
|
||||
stats synchronizer.Stats
|
||||
started bool
|
||||
pipelineNum int // Pipeline sequential number. The first pipeline is 1
|
||||
pipelineFromBatch fromBatch // batch from which we started the pipeline
|
||||
provers []prover.Client
|
||||
consts synchronizer.SCConsts
|
||||
vars synchronizer.SCVariables
|
||||
stats synchronizer.Stats
|
||||
started bool
|
||||
|
||||
cfg Config
|
||||
|
||||
@@ -96,7 +157,17 @@ type Coordinator struct {
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
|
||||
pipeline *Pipeline
|
||||
// mutexL2DBUpdateDelete protects updates to the L2DB so that
|
||||
// these two processes always happen exclusively:
|
||||
// - Pipeline taking pending txs, running through the TxProcessor and
|
||||
// marking selected txs as forging
|
||||
// - Coordinator deleting pending txs that have been marked with
|
||||
// `external_delete`.
|
||||
// Without this mutex, the coordinator could delete a pending txs that
|
||||
// has just been selected by the TxProcessor in the pipeline.
|
||||
mutexL2DBUpdateDelete sync.Mutex
|
||||
pipeline *Pipeline
|
||||
lastNonFailedBatchNum common.BatchNum
|
||||
|
||||
purger *Purger
|
||||
txManager *TxManager
|
||||
@@ -139,10 +210,15 @@ func NewCoordinator(cfg Config,
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c := Coordinator{
|
||||
pipelineBatchNum: -1,
|
||||
provers: serverProofs,
|
||||
consts: *scConsts,
|
||||
vars: *initSCVars,
|
||||
pipelineNum: 0,
|
||||
pipelineFromBatch: fromBatch{
|
||||
BatchNum: 0,
|
||||
ForgerAddr: ethCommon.Address{},
|
||||
StateRoot: big.NewInt(0),
|
||||
},
|
||||
provers: serverProofs,
|
||||
consts: *scConsts,
|
||||
vars: *initSCVars,
|
||||
|
||||
cfg: cfg,
|
||||
|
||||
@@ -183,8 +259,10 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
|
||||
}
|
||||
|
||||
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
||||
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
|
||||
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
|
||||
c.pipelineNum++
|
||||
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
|
||||
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager,
|
||||
c.provers, &c.consts)
|
||||
}
|
||||
|
||||
// MsgSyncBlock indicates an update to the Synchronizer stats
|
||||
@@ -205,6 +283,9 @@ type MsgSyncReorg struct {
|
||||
// MsgStopPipeline indicates a signal to reset the pipeline
|
||||
type MsgStopPipeline struct {
|
||||
Reason string
|
||||
// FailedBatchNum indicates the first batchNum that failed in the
|
||||
// pipeline. If FailedBatchNum is 0, it should be ignored.
|
||||
FailedBatchNum common.BatchNum
|
||||
}
|
||||
|
||||
// SendMsg is a thread safe method to pass a message to the Coordinator
|
||||
@@ -215,27 +296,36 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
|
||||
if update.Rollup != nil {
|
||||
vars.Rollup = *update.Rollup
|
||||
}
|
||||
if update.Auction != nil {
|
||||
vars.Auction = *update.Auction
|
||||
}
|
||||
if update.WDelayer != nil {
|
||||
vars.WDelayer = *update.WDelayer
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
c.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
c.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
c.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
updateSCVars(&c.vars, vars)
|
||||
}
|
||||
|
||||
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
||||
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
||||
if blockNum < auctionConstants.GenesisBlockNum {
|
||||
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
|
||||
"genesis", auctionConstants.GenesisBlockNum)
|
||||
return false
|
||||
}
|
||||
var slot *common.Slot
|
||||
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
|
||||
slot = currentSlot
|
||||
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
|
||||
slot = nextSlot
|
||||
} else {
|
||||
log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
|
||||
log.Warnw("canForge: requested blockNum is outside current and next slot",
|
||||
"blockNum", blockNum, "currentSlot", currentSlot,
|
||||
"nextSlot", nextSlot,
|
||||
)
|
||||
@@ -244,16 +334,23 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
|
||||
anyoneForge := false
|
||||
if !slot.ForgerCommitment &&
|
||||
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
|
||||
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
|
||||
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
|
||||
"block", blockNum)
|
||||
anyoneForge = true
|
||||
}
|
||||
if slot.Forger == addr || anyoneForge {
|
||||
return true
|
||||
}
|
||||
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Coordinator) canForgeAt(blockNum int64) bool {
|
||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
||||
c.cfg.ForgerAddress, blockNum)
|
||||
}
|
||||
|
||||
func (c *Coordinator) canForge() bool {
|
||||
blockNum := c.stats.Eth.LastBlock.Num + 1
|
||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||
@@ -262,21 +359,39 @@ func (c *Coordinator) canForge() bool {
|
||||
}
|
||||
|
||||
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
||||
canForge := c.canForge()
|
||||
nextBlock := c.stats.Eth.LastBlock.Num + 1
|
||||
canForge := c.canForgeAt(nextBlock)
|
||||
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
|
||||
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
|
||||
}
|
||||
if c.pipeline == nil {
|
||||
if canForge {
|
||||
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
|
||||
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
|
||||
log.Debugf("Coordinator: delaying pipeline start due to "+
|
||||
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
|
||||
relativeBlock, c.cfg.StartSlotBlocksDelay)
|
||||
} else if canForge {
|
||||
log.Infow("Coordinator: forging state begin", "block",
|
||||
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
|
||||
batchNum := common.BatchNum(stats.Sync.LastBatch)
|
||||
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
|
||||
fromBatch := fromBatch{
|
||||
BatchNum: stats.Sync.LastBatch.BatchNum,
|
||||
ForgerAddr: stats.Sync.LastBatch.ForgerAddr,
|
||||
StateRoot: stats.Sync.LastBatch.StateRoot,
|
||||
}
|
||||
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
|
||||
fromBatch.BatchNum = c.lastNonFailedBatchNum
|
||||
fromBatch.ForgerAddr = c.cfg.ForgerAddress
|
||||
fromBatch.StateRoot = big.NewInt(0)
|
||||
}
|
||||
var err error
|
||||
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil {
|
||||
c.pipelineFromBatch = fromBatch
|
||||
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
|
||||
c.pipeline = nil
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
c.pipelineBatchNum = batchNum
|
||||
}
|
||||
} else {
|
||||
if !canForge {
|
||||
@@ -286,25 +401,12 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
}
|
||||
}
|
||||
if c.pipeline == nil {
|
||||
// Mark invalid in Pool due to forged L2Txs
|
||||
// for _, batch := range batches {
|
||||
// if err := c.l2DB.InvalidateOldNonces(
|
||||
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
|
||||
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
||||
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
||||
if err != nil {
|
||||
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
||||
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
||||
if err != nil {
|
||||
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
|
||||
int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
@@ -331,33 +433,43 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
|
||||
}
|
||||
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
|
||||
// There's been a reorg and the batch from which the pipeline
|
||||
// was started was in a block that was discarded. The batch
|
||||
// may not be in the main chain, so we stop the pipeline as a
|
||||
// precaution (it will be started again once the node is in
|
||||
// sync).
|
||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
|
||||
"sync.LastBatch", c.stats.Sync.LastBatch,
|
||||
"c.pipelineBatchNum", c.pipelineBatchNum)
|
||||
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
|
||||
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
|
||||
(c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil ||
|
||||
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) {
|
||||
// There's been a reorg and the batch state root from which the
|
||||
// pipeline was started has changed (probably because it was in
|
||||
// a block that was discarded), and it was sent by a different
|
||||
// coordinator than us. That batch may never be in the main
|
||||
// chain, so we stop the pipeline (it will be started again
|
||||
// once the node is in sync).
|
||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
|
||||
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
|
||||
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
|
||||
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
|
||||
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
|
||||
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
|
||||
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
|
||||
// the next pipeline will start from the last state of the synchronizer,
|
||||
// otherwise, it will state from failedBatchNum-1.
|
||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
|
||||
batchNum := c.stats.Sync.LastBatch.BatchNum
|
||||
if failedBatchNum != 0 {
|
||||
batchNum = failedBatchNum - 1
|
||||
}
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.Stop(c.ctx)
|
||||
c.pipeline = nil
|
||||
}
|
||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
||||
if err := c.l2DB.Reorg(batchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
|
||||
// TODO: Check that we are in a slot in which we can't forge
|
||||
}
|
||||
c.lastNonFailedBatchNum = batchNum
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -373,7 +485,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
|
||||
}
|
||||
case MsgStopPipeline:
|
||||
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
|
||||
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
|
||||
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
|
||||
}
|
||||
default:
|
||||
@@ -396,7 +508,7 @@ func (c *Coordinator) Start() {
|
||||
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
waitDuration := longWaitDuration
|
||||
waitCh := time.After(longWaitDuration)
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
@@ -408,23 +520,42 @@ func (c *Coordinator) Start() {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("Coordinator.handleMsg", "err", err)
|
||||
waitDuration = c.cfg.SyncRetryInterval
|
||||
waitCh = time.After(c.cfg.SyncRetryInterval)
|
||||
continue
|
||||
}
|
||||
waitDuration = longWaitDuration
|
||||
case <-time.After(waitDuration):
|
||||
waitCh = time.After(longWaitDuration)
|
||||
case <-waitCh:
|
||||
if !c.stats.Synced() {
|
||||
waitDuration = longWaitDuration
|
||||
waitCh = time.After(longWaitDuration)
|
||||
continue
|
||||
}
|
||||
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
|
||||
waitCh = time.After(longWaitDuration)
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("Coordinator.syncStats", "err", err)
|
||||
waitDuration = c.cfg.SyncRetryInterval
|
||||
waitCh = time.After(c.cfg.SyncRetryInterval)
|
||||
continue
|
||||
}
|
||||
waitDuration = longWaitDuration
|
||||
waitCh = time.After(longWaitDuration)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
|
||||
c.wg.Done()
|
||||
return
|
||||
case <-time.After(c.cfg.PurgeByExtDelInterval):
|
||||
c.mutexL2DBUpdateDelete.Lock()
|
||||
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
|
||||
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
|
||||
}
|
||||
c.mutexL2DBUpdateDelete.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -105,7 +105,7 @@ func newTestModules(t *testing.T) modules {
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
test.WipeDB(db)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
|
||||
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
|
||||
@@ -261,8 +261,8 @@ func TestCoordinatorFlow(t *testing.T) {
|
||||
var stats synchronizer.Stats
|
||||
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
|
||||
stats.Sync.LastBlock = stats.Eth.LastBlock
|
||||
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
|
||||
stats.Sync.LastBatch = stats.Eth.LastBatch
|
||||
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
|
||||
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
|
||||
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
|
||||
require.NoError(t, err)
|
||||
var slot common.Slot
|
||||
@@ -279,7 +279,7 @@ func TestCoordinatorFlow(t *testing.T) {
|
||||
// Copy stateDB to synchronizer if there was a new batch
|
||||
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
|
||||
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
|
||||
if stats.Sync.LastBatch != 0 {
|
||||
if stats.Sync.LastBatch.BatchNum != 0 {
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
log.Infow("Making pebble checkpoint for sync",
|
||||
"source", source, "dest", dest)
|
||||
|
||||
@@ -2,6 +2,7 @@ package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
@@ -24,25 +25,36 @@ type statsVars struct {
|
||||
Vars synchronizer.SCVariablesPtr
|
||||
}
|
||||
|
||||
type state struct {
|
||||
batchNum common.BatchNum
|
||||
lastScheduledL1BatchBlockNum int64
|
||||
lastForgeL1TxsNum int64
|
||||
lastSlotForged int64
|
||||
}
|
||||
|
||||
// Pipeline manages the forging of batches with parallel server proofs
|
||||
type Pipeline struct {
|
||||
num int
|
||||
cfg Config
|
||||
consts synchronizer.SCConsts
|
||||
|
||||
// state
|
||||
batchNum common.BatchNum
|
||||
lastScheduledL1BatchBlockNum int64
|
||||
lastForgeL1TxsNum int64
|
||||
started bool
|
||||
state state
|
||||
started bool
|
||||
rw sync.RWMutex
|
||||
errAtBatchNum common.BatchNum
|
||||
lastForgeTime time.Time
|
||||
|
||||
proversPool *ProversPool
|
||||
provers []prover.Client
|
||||
txManager *TxManager
|
||||
historyDB *historydb.HistoryDB
|
||||
l2DB *l2db.L2DB
|
||||
txSelector *txselector.TxSelector
|
||||
batchBuilder *batchbuilder.BatchBuilder
|
||||
purger *Purger
|
||||
proversPool *ProversPool
|
||||
provers []prover.Client
|
||||
coord *Coordinator
|
||||
txManager *TxManager
|
||||
historyDB *historydb.HistoryDB
|
||||
l2DB *l2db.L2DB
|
||||
txSelector *txselector.TxSelector
|
||||
batchBuilder *batchbuilder.BatchBuilder
|
||||
mutexL2DBUpdateDelete *sync.Mutex
|
||||
purger *Purger
|
||||
|
||||
stats synchronizer.Stats
|
||||
vars synchronizer.SCVariables
|
||||
@@ -53,14 +65,29 @@ type Pipeline struct {
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
|
||||
p.rw.Lock()
|
||||
defer p.rw.Unlock()
|
||||
p.errAtBatchNum = batchNum
|
||||
}
|
||||
|
||||
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
|
||||
p.rw.RLock()
|
||||
defer p.rw.RUnlock()
|
||||
return p.errAtBatchNum
|
||||
}
|
||||
|
||||
// NewPipeline creates a new Pipeline
|
||||
func NewPipeline(ctx context.Context,
|
||||
cfg Config,
|
||||
num int, // Pipeline sequential number
|
||||
historyDB *historydb.HistoryDB,
|
||||
l2DB *l2db.L2DB,
|
||||
txSelector *txselector.TxSelector,
|
||||
batchBuilder *batchbuilder.BatchBuilder,
|
||||
mutexL2DBUpdateDelete *sync.Mutex,
|
||||
purger *Purger,
|
||||
coord *Coordinator,
|
||||
txManager *TxManager,
|
||||
provers []prover.Client,
|
||||
scConsts *synchronizer.SCConsts,
|
||||
@@ -79,17 +106,20 @@ func NewPipeline(ctx context.Context,
|
||||
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
|
||||
}
|
||||
return &Pipeline{
|
||||
cfg: cfg,
|
||||
historyDB: historyDB,
|
||||
l2DB: l2DB,
|
||||
txSelector: txSelector,
|
||||
batchBuilder: batchBuilder,
|
||||
provers: provers,
|
||||
proversPool: proversPool,
|
||||
purger: purger,
|
||||
txManager: txManager,
|
||||
consts: *scConsts,
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
num: num,
|
||||
cfg: cfg,
|
||||
historyDB: historyDB,
|
||||
l2DB: l2DB,
|
||||
txSelector: txSelector,
|
||||
batchBuilder: batchBuilder,
|
||||
provers: provers,
|
||||
proversPool: proversPool,
|
||||
mutexL2DBUpdateDelete: mutexL2DBUpdateDelete,
|
||||
purger: purger,
|
||||
coord: coord,
|
||||
txManager: txManager,
|
||||
consts: *scConsts,
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -104,47 +134,87 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
|
||||
// reset pipeline state
|
||||
func (p *Pipeline) reset(batchNum common.BatchNum,
|
||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
||||
p.batchNum = batchNum
|
||||
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
|
||||
p.state = state{
|
||||
batchNum: batchNum,
|
||||
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
||||
lastScheduledL1BatchBlockNum: 0,
|
||||
lastSlotForged: -1,
|
||||
}
|
||||
p.stats = *stats
|
||||
p.vars = *vars
|
||||
p.lastScheduledL1BatchBlockNum = 0
|
||||
|
||||
err := p.txSelector.Reset(p.batchNum)
|
||||
// Reset the StateDB in TxSelector and BatchBuilder from the
|
||||
// synchronizer only if the checkpoint we reset from either:
|
||||
// a. Doesn't exist in the TxSelector/BatchBuilder
|
||||
// b. The batch has already been synced by the synchronizer and has a
|
||||
// different MTRoot than the BatchBuilder
|
||||
// Otherwise, reset from the local checkpoint.
|
||||
|
||||
// First attempt to reset from local checkpoint if such checkpoint exists
|
||||
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
err = p.batchBuilder.Reset(p.batchNum, true)
|
||||
fromSynchronizerTxSelector := !existsTxSelector
|
||||
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
fromSynchronizerBatchBuilder := !existsBatchBuilder
|
||||
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// After reset, check that if the batch exists in the historyDB, the
|
||||
// stateRoot matches with the local one, if not, force a reset from
|
||||
// synchronizer
|
||||
batch, err := p.historyDB.GetBatch(p.state.batchNum)
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
// nothing to do
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
} else {
|
||||
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
|
||||
if batch.StateRoot.Cmp(localStateRoot) != 0 {
|
||||
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
|
||||
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
|
||||
// StateRoot from synchronizer doesn't match StateRoot
|
||||
// from batchBuilder, force a reset from synchronizer
|
||||
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
p.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
p.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
p.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
updateSCVars(&p.vars, vars)
|
||||
}
|
||||
|
||||
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
|
||||
// and then waits for an available proof server and sends the zkInputs to it so
|
||||
// that the proof computation begins.
|
||||
func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
|
||||
p.mutexL2DBUpdateDelete.Lock()
|
||||
batchInfo, err := p.forgeBatch(batchNum)
|
||||
p.mutexL2DBUpdateDelete.Unlock()
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
} else if err != nil {
|
||||
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
||||
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
|
||||
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
||||
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
||||
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
||||
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
||||
// no log
|
||||
} else {
|
||||
log.Errorw("forgeBatch", "err", err)
|
||||
}
|
||||
@@ -188,7 +258,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
|
||||
p.wg.Add(1)
|
||||
go func() {
|
||||
waitDuration := zeroDuration
|
||||
waitCh := time.After(zeroDuration)
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
@@ -198,20 +268,42 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
case statsVars := <-p.statsVarsCh:
|
||||
p.stats = statsVars.Stats
|
||||
p.syncSCVars(statsVars.Vars)
|
||||
case <-time.After(waitDuration):
|
||||
batchNum = p.batchNum + 1
|
||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||
if p.ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
waitDuration = p.cfg.SyncRetryInterval
|
||||
case <-waitCh:
|
||||
// Once errAtBatchNum != 0, we stop forging
|
||||
// batches because there's been an error and we
|
||||
// wait for the pipeline to be stopped.
|
||||
if p.getErrAtBatchNum() != 0 {
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
}
|
||||
p.batchNum = batchNum
|
||||
batchNum = p.state.batchNum + 1
|
||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||
if p.ctx.Err() != nil {
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
|
||||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
||||
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
} else if err != nil {
|
||||
p.setErrAtBatchNum(batchNum)
|
||||
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf(
|
||||
"Pipeline.handleForgBatch: %v", err),
|
||||
FailedBatchNum: batchNum,
|
||||
})
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
}
|
||||
p.lastForgeTime = time.Now()
|
||||
|
||||
p.state.batchNum = batchNum
|
||||
select {
|
||||
case batchChSentServerProof <- batchInfo:
|
||||
case <-p.ctx.Done():
|
||||
}
|
||||
waitCh = time.After(zeroDuration)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -225,16 +317,28 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
p.wg.Done()
|
||||
return
|
||||
case batchInfo := <-batchChSentServerProof:
|
||||
// Once errAtBatchNum != 0, we stop forging
|
||||
// batches because there's been an error and we
|
||||
// wait for the pipeline to be stopped.
|
||||
if p.getErrAtBatchNum() != 0 {
|
||||
continue
|
||||
}
|
||||
err := p.waitServerProof(p.ctx, batchInfo)
|
||||
// We are done with this serverProof, add it back to the pool
|
||||
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
||||
batchInfo.ServerProof = nil
|
||||
if p.ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("waitServerProof", "err", err)
|
||||
p.setErrAtBatchNum(batchInfo.BatchNum)
|
||||
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf(
|
||||
"Pipeline.waitServerProof: %v", err),
|
||||
FailedBatchNum: batchInfo.BatchNum,
|
||||
})
|
||||
continue
|
||||
}
|
||||
// We are done with this serverProof, add it back to the pool
|
||||
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
||||
// batchInfo.ServerProof = nil
|
||||
p.txManager.AddBatch(p.ctx, batchInfo)
|
||||
}
|
||||
}
|
||||
@@ -284,9 +388,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
|
||||
batchInfo.Debug.StartTimestamp = time.Now()
|
||||
// Structure to accumulate data and metadata of the batch
|
||||
now := time.Now()
|
||||
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
|
||||
batchInfo.Debug.StartTimestamp = now
|
||||
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||
|
||||
selectionCfg := &txselector.SelectionConfig{
|
||||
@@ -300,22 +405,26 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
var auths [][]byte
|
||||
var coordIdxs []common.Idx
|
||||
|
||||
// Check if the slot is not yet fulfilled
|
||||
slotCommitted := false
|
||||
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
|
||||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
|
||||
slotCommitted = true
|
||||
}
|
||||
|
||||
// If we haven't reached the ForgeDelay, skip forging the batch
|
||||
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
|
||||
return nil, errForgeBeforeDelay
|
||||
}
|
||||
|
||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||
if p.shouldL1L2Batch(batchInfo) {
|
||||
batchInfo.L1Batch = true
|
||||
defer func() {
|
||||
// If there's no error, update the parameters related
|
||||
// to the last L1Batch forged
|
||||
if err == nil {
|
||||
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||
p.lastForgeL1TxsNum++
|
||||
}
|
||||
}()
|
||||
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
||||
}
|
||||
// 2a: L1+L2 txs
|
||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
|
||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -334,6 +443,43 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
l1UserTxsExtra = nil
|
||||
}
|
||||
|
||||
// If there are no txs to forge, no l1UserTxs in the open queue to
|
||||
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
|
||||
// batch.
|
||||
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
|
||||
noTxs := false
|
||||
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
|
||||
if batchInfo.L1Batch {
|
||||
// Query the L1UserTxs in the queue following
|
||||
// the one we are trying to forge.
|
||||
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
|
||||
p.state.lastForgeL1TxsNum + 1)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
// If there are future L1UserTxs, we forge a
|
||||
// batch to advance the queues and forge the
|
||||
// L1UserTxs in the future. Otherwise, skip.
|
||||
if len(nextL1UserTxs) == 0 {
|
||||
noTxs = true
|
||||
}
|
||||
} else {
|
||||
noTxs = true
|
||||
}
|
||||
}
|
||||
if noTxs {
|
||||
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
return nil, errForgeNoTxsBeforeDelay
|
||||
}
|
||||
}
|
||||
|
||||
if batchInfo.L1Batch {
|
||||
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||
p.state.lastForgeL1TxsNum++
|
||||
}
|
||||
|
||||
// 3. Save metadata from TxSelector output for BatchNum
|
||||
batchInfo.L1UserTxsExtra = l1UserTxsExtra
|
||||
batchInfo.L1CoordTxs = l1CoordTxs
|
||||
@@ -378,6 +524,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
p.cfg.debugBatchStore(batchInfo)
|
||||
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
|
||||
|
||||
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
|
||||
|
||||
return batchInfo, nil
|
||||
}
|
||||
|
||||
@@ -399,12 +547,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
||||
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
||||
// Take the lastL1BatchBlockNum as the biggest between the last
|
||||
// scheduled one, and the synchronized one.
|
||||
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
|
||||
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
|
||||
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
||||
}
|
||||
// Set Debug information
|
||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
|
||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
|
||||
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
||||
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
||||
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
||||
|
||||
@@ -25,6 +25,14 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newBigInt(s string) *big.Int {
|
||||
v, ok := new(big.Int).SetString(s, 10)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("Can't set big.Int from %s", s))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func TestPipelineShouldL1L2Batch(t *testing.T) {
|
||||
ethClientSetup := test.NewClientSetupExample()
|
||||
ethClientSetup.ChainID = big.NewInt(int64(chainID))
|
||||
@@ -77,7 +85,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
|
||||
//
|
||||
// Scheduled L1Batch
|
||||
//
|
||||
pipeline.lastScheduledL1BatchBlockNum = startBlock
|
||||
pipeline.state.lastScheduledL1BatchBlockNum = startBlock
|
||||
stats.Sync.LastL1BatchBlock = startBlock - 10
|
||||
|
||||
// We are are one block before the timeout range * 0.5
|
||||
@@ -128,6 +136,11 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
|
||||
blocks, err := tc.GenerateBlocksFromInstructions(set)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, blocks)
|
||||
// Set StateRoots for batches manually (til doesn't set it)
|
||||
blocks[0].Rollup.Batches[0].Batch.StateRoot =
|
||||
newBigInt("0")
|
||||
blocks[0].Rollup.Batches[1].Batch.StateRoot =
|
||||
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
|
||||
|
||||
ethAddTokens(blocks, ethClient)
|
||||
err = ethClient.CtlAddBlocks(blocks)
|
||||
@@ -172,7 +185,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
|
||||
// users with positive balances
|
||||
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
|
||||
syncStats := sync.Stats()
|
||||
batchNum := common.BatchNum(syncStats.Sync.LastBatch)
|
||||
batchNum := syncStats.Sync.LastBatch.BatchNum
|
||||
syncSCVars := sync.SCVars()
|
||||
|
||||
pipeline, err := coord.newPipeline(ctx)
|
||||
|
||||
@@ -13,13 +13,23 @@ import (
|
||||
|
||||
// PurgerCfg is the purger configuration
|
||||
type PurgerCfg struct {
|
||||
// PurgeBatchDelay is the delay between batches to purge outdated transactions
|
||||
// PurgeBatchDelay is the delay between batches to purge outdated
|
||||
// transactions. Oudated L2Txs are those that have been forged or
|
||||
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
||||
// that have been in the pool for longer than TTL once there are
|
||||
// MaxTxs.
|
||||
PurgeBatchDelay int64
|
||||
// InvalidateBatchDelay is the delay between batches to mark invalid transactions
|
||||
// InvalidateBatchDelay is the delay between batches to mark invalid
|
||||
// transactions due to nonce lower than the account nonce.
|
||||
InvalidateBatchDelay int64
|
||||
// PurgeBlockDelay is the delay between blocks to purge outdated transactions
|
||||
// PurgeBlockDelay is the delay between blocks to purge outdated
|
||||
// transactions. Oudated L2Txs are those that have been forged or
|
||||
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
||||
// that have been in the pool for longer than TTL once there are
|
||||
// MaxTxs.
|
||||
PurgeBlockDelay int64
|
||||
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions
|
||||
// InvalidateBlockDelay is the delay between blocks to mark invalid
|
||||
// transactions due to nonce lower than the account nonce.
|
||||
InvalidateBlockDelay int64
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
test.WipeDB(db)
|
||||
return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
return l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||
}
|
||||
|
||||
func newStateDB(t *testing.T) *statedb.LocalStateDB {
|
||||
|
||||
@@ -4,11 +4,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
@@ -35,12 +37,20 @@ type TxManager struct {
|
||||
vars synchronizer.SCVariables
|
||||
statsVarsCh chan statsVars
|
||||
|
||||
queue []*BatchInfo
|
||||
discardPipelineCh chan int // int refers to the pipelineNum
|
||||
|
||||
minPipelineNum int
|
||||
queue Queue
|
||||
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
|
||||
lastSuccessBatch common.BatchNum
|
||||
lastPendingBatch common.BatchNum
|
||||
lastSuccessNonce uint64
|
||||
lastPendingNonce uint64
|
||||
// lastPendingBatch common.BatchNum
|
||||
// accNonce is the account nonce in the last mined block (due to mined txs)
|
||||
accNonce uint64
|
||||
// accNextNonce is the nonce that we should use to send the next tx.
|
||||
// In some cases this will be a reused nonce of an already pending tx.
|
||||
accNextNonce uint64
|
||||
|
||||
lastSentL1BatchBlockNum int64
|
||||
}
|
||||
|
||||
// NewTxManager creates a new TxManager
|
||||
@@ -54,26 +64,19 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
||||
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lastSuccessNonce != lastPendingNonce {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
|
||||
lastSuccessNonce, lastPendingNonce))
|
||||
}
|
||||
log.Infow("TxManager started", "nonce", lastSuccessNonce)
|
||||
log.Infow("TxManager started", "nonce", accNonce)
|
||||
return &TxManager{
|
||||
cfg: *cfg,
|
||||
ethClient: ethClient,
|
||||
l2DB: l2DB,
|
||||
coord: coord,
|
||||
batchCh: make(chan *BatchInfo, queueLen),
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
cfg: *cfg,
|
||||
ethClient: ethClient,
|
||||
l2DB: l2DB,
|
||||
coord: coord,
|
||||
batchCh: make(chan *BatchInfo, queueLen),
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
discardPipelineCh: make(chan int, queueLen),
|
||||
account: accounts.Account{
|
||||
Address: *address,
|
||||
},
|
||||
@@ -82,8 +85,10 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
||||
|
||||
vars: *initSCVars,
|
||||
|
||||
lastSuccessNonce: lastSuccessNonce,
|
||||
lastPendingNonce: lastPendingNonce,
|
||||
minPipelineNum: 0,
|
||||
queue: NewQueue(),
|
||||
accNonce: accNonce,
|
||||
accNextNonce: accNonce,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -104,16 +109,17 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
|
||||
}
|
||||
}
|
||||
|
||||
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
|
||||
// due to a reorg
|
||||
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
|
||||
select {
|
||||
case t.discardPipelineCh <- pipelineNum:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
t.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
t.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
t.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
updateSCVars(&t.vars, vars)
|
||||
}
|
||||
|
||||
// NewAuth generates a new auth object for an ethereum transaction
|
||||
@@ -122,10 +128,14 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
inc := new(big.Int).Set(gasPrice)
|
||||
const gasPriceDiv = 100
|
||||
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
|
||||
gasPrice.Add(gasPrice, inc)
|
||||
if t.cfg.GasPriceIncPerc != 0 {
|
||||
inc := new(big.Int).Set(gasPrice)
|
||||
inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc))
|
||||
// nolint reason: to calculate percentages we use 100
|
||||
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd
|
||||
gasPrice.Add(gasPrice, inc)
|
||||
}
|
||||
|
||||
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
|
||||
|
||||
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
|
||||
@@ -134,6 +144,13 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
}
|
||||
auth.Value = big.NewInt(0) // in wei
|
||||
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
|
||||
// This requires a function that estimates the gas usage of the
|
||||
// forgeBatch call based on the contents of the ForgeBatch args:
|
||||
// - length of l2txs
|
||||
// - length of l1Usertxs
|
||||
// - length of l1CoordTxs with authorization signature
|
||||
// - length of l1CoordTxs without authoriation signature
|
||||
// - etc.
|
||||
auth.GasLimit = 1000000
|
||||
auth.GasPrice = gasPrice
|
||||
auth.Nonce = nil
|
||||
@@ -141,34 +158,83 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
|
||||
// TODO: Check if we can forge in the next blockNum, abort if we can't
|
||||
batchInfo.Debug.Status = StatusSent
|
||||
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||
batchInfo.Debug.SendTimestamp = time.Now()
|
||||
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
||||
batchInfo.Debug.StartTimestamp).Seconds()
|
||||
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
|
||||
nextBlock := t.stats.Eth.LastBlock.Num + 1
|
||||
if !t.canForgeAt(nextBlock) {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
|
||||
}
|
||||
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
|
||||
}
|
||||
margin := t.cfg.SendBatchBlocksMarginCheck
|
||||
if margin != 0 {
|
||||
if !t.canForgeAt(nextBlock + margin) {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
|
||||
margin, nextBlock))
|
||||
}
|
||||
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
|
||||
margin, nextBlock))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addPerc(v *big.Int, p int64) *big.Int {
|
||||
r := new(big.Int).Set(v)
|
||||
r.Mul(r, big.NewInt(p))
|
||||
// nolint reason: to calculate percentages we divide by 100
|
||||
r.Div(r, big.NewInt(100)) //nolit:gomnd
|
||||
return r.Add(v, r)
|
||||
}
|
||||
|
||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
|
||||
var ethTx *types.Transaction
|
||||
var err error
|
||||
auth, err := t.NewAuth(ctx)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
|
||||
t.lastPendingNonce++
|
||||
auth.Nonce = big.NewInt(int64(t.accNextNonce))
|
||||
if resend {
|
||||
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
|
||||
}
|
||||
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
|
||||
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
|
||||
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
|
||||
auth.GasPrice, t.cfg.MaxGasPrice))
|
||||
}
|
||||
// RollupForgeBatch() calls ethclient.SendTransaction()
|
||||
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
|
||||
if err != nil {
|
||||
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
|
||||
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
|
||||
// "block", t.stats.Eth.LastBlock.Num+1)
|
||||
// return tracerr.Wrap(err)
|
||||
// }
|
||||
// We check the errors via strings because we match the
|
||||
// definition of the error from geth, with the string returned
|
||||
// via RPC obtained by the client.
|
||||
if err == nil {
|
||||
break
|
||||
} else if strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
|
||||
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
|
||||
attempt--
|
||||
} else if strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
|
||||
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
|
||||
attempt--
|
||||
} else if strings.Contains(err.Error(), core.ErrReplaceUnderpriced.Error()) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||
attempt--
|
||||
} else if strings.Contains(err.Error(), core.ErrUnderpriced.Error()) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||
attempt--
|
||||
} else {
|
||||
log.Errorw("TxManager ethClient.RollupForgeBatch",
|
||||
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
|
||||
"batchNum", batchInfo.BatchNum)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -179,10 +245,29 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
||||
}
|
||||
if !resend {
|
||||
t.accNextNonce = auth.Nonce.Uint64() + 1
|
||||
}
|
||||
batchInfo.EthTx = ethTx
|
||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
|
||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
|
||||
now := time.Now()
|
||||
batchInfo.SendTimestamp = now
|
||||
|
||||
if resend {
|
||||
batchInfo.Debug.ResendNum++
|
||||
}
|
||||
batchInfo.Debug.Status = StatusSent
|
||||
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
|
||||
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
||||
batchInfo.Debug.StartTimestamp).Seconds()
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
t.lastPendingBatch = batchInfo.BatchNum
|
||||
|
||||
if !resend {
|
||||
if batchInfo.L1Batch {
|
||||
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||
}
|
||||
}
|
||||
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -225,13 +310,20 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
|
||||
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
|
||||
receipt := batchInfo.Receipt
|
||||
if receipt != nil {
|
||||
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
|
||||
t.accNonce = batchInfo.EthTx.Nonce() + 1
|
||||
}
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
batchInfo.Debug.Status = StatusFailed
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
|
||||
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
|
||||
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
|
||||
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
|
||||
"err", err)
|
||||
batchInfo.EthTxErr = err
|
||||
if batchInfo.BatchNum <= t.lastSuccessBatch {
|
||||
t.lastSuccessBatch = batchInfo.BatchNum - 1
|
||||
}
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
return nil, tracerr.Wrap(fmt.Errorf(
|
||||
"ethereum transaction receipt status is failed: %w", err))
|
||||
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
||||
@@ -239,6 +331,17 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
||||
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
|
||||
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
|
||||
batchInfo.Debug.StartBlockNum
|
||||
if batchInfo.Debug.StartToMineDelay == 0 {
|
||||
if block, err := t.ethClient.EthBlockByNumber(ctx,
|
||||
receipt.BlockNumber.Int64()); err != nil {
|
||||
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
|
||||
} else {
|
||||
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
|
||||
batchInfo.Debug.SendTimestamp).Seconds()
|
||||
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
|
||||
batchInfo.Debug.StartTimestamp).Seconds()
|
||||
}
|
||||
}
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
if batchInfo.BatchNum > t.lastSuccessBatch {
|
||||
t.lastSuccessBatch = batchInfo.BatchNum
|
||||
@@ -250,10 +353,73 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
|
||||
|
||||
// Queue of BatchInfos
|
||||
type Queue struct {
|
||||
list []*BatchInfo
|
||||
// nonceByBatchNum map[common.BatchNum]uint64
|
||||
next int
|
||||
}
|
||||
|
||||
// NewQueue returns a new queue
|
||||
func NewQueue() Queue {
|
||||
return Queue{
|
||||
list: make([]*BatchInfo, 0),
|
||||
// nonceByBatchNum: make(map[common.BatchNum]uint64),
|
||||
next: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Len is the length of the queue
|
||||
func (q *Queue) Len() int {
|
||||
return len(q.list)
|
||||
}
|
||||
|
||||
// At returns the BatchInfo at position (or nil if position is out of bounds)
|
||||
func (q *Queue) At(position int) *BatchInfo {
|
||||
if position >= len(q.list) {
|
||||
return nil
|
||||
}
|
||||
return q.list[position]
|
||||
}
|
||||
|
||||
// Next returns the next BatchInfo (or nil if queue is empty)
|
||||
func (q *Queue) Next() (int, *BatchInfo) {
|
||||
if len(q.list) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
defer func() { q.next = (q.next + 1) % len(q.list) }()
|
||||
return q.next, q.list[q.next]
|
||||
}
|
||||
|
||||
// Remove removes the BatchInfo at position
|
||||
func (q *Queue) Remove(position int) {
|
||||
// batchInfo := q.list[position]
|
||||
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
|
||||
q.list = append(q.list[:position], q.list[position+1:]...)
|
||||
if len(q.list) == 0 {
|
||||
q.next = 0
|
||||
} else {
|
||||
q.next = position % len(q.list)
|
||||
}
|
||||
}
|
||||
|
||||
// Push adds a new BatchInfo
|
||||
func (q *Queue) Push(batchInfo *BatchInfo) {
|
||||
q.list = append(q.list, batchInfo)
|
||||
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
|
||||
}
|
||||
|
||||
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
|
||||
// nonce, ok := q.nonceByBatchNum[batchNum]
|
||||
// return nonce, ok
|
||||
// }
|
||||
|
||||
// Run the TxManager
|
||||
func (t *TxManager) Run(ctx context.Context) {
|
||||
next := 0
|
||||
waitDuration := longWaitDuration
|
||||
waitCh := time.After(longWaitDuration)
|
||||
|
||||
var statsVars statsVars
|
||||
select {
|
||||
@@ -263,7 +429,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
t.stats = statsVars.Stats
|
||||
t.syncSCVars(statsVars.Vars)
|
||||
log.Infow("TxManager: received initial statsVars",
|
||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
|
||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -273,8 +439,27 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
case statsVars := <-t.statsVarsCh:
|
||||
t.stats = statsVars.Stats
|
||||
t.syncSCVars(statsVars.Vars)
|
||||
case pipelineNum := <-t.discardPipelineCh:
|
||||
t.minPipelineNum = pipelineNum + 1
|
||||
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||
continue
|
||||
}
|
||||
case batchInfo := <-t.batchCh:
|
||||
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
|
||||
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
|
||||
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
|
||||
}
|
||||
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
|
||||
log.Warnw("TxManager: shouldSend", "err", err,
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
|
||||
continue
|
||||
}
|
||||
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
// If we reach here it's because our ethNode has
|
||||
@@ -282,19 +467,20 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
// ethereum. This could be due to the ethNode
|
||||
// failure, or an invalid transaction (that
|
||||
// can't be mined)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
||||
log.Warnw("TxManager: forgeBatch send failed", "err", err,
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
||||
continue
|
||||
}
|
||||
t.queue = append(t.queue, batchInfo)
|
||||
waitDuration = t.cfg.TxManagerCheckInterval
|
||||
case <-time.After(waitDuration):
|
||||
if len(t.queue) == 0 {
|
||||
waitDuration = longWaitDuration
|
||||
t.queue.Push(batchInfo)
|
||||
waitCh = time.After(t.cfg.TxManagerCheckInterval)
|
||||
case <-waitCh:
|
||||
queuePosition, batchInfo := t.queue.Next()
|
||||
if batchInfo == nil {
|
||||
waitCh = time.After(longWaitDuration)
|
||||
continue
|
||||
}
|
||||
current := next
|
||||
next = (current + 1) % len(t.queue)
|
||||
batchInfo := t.queue[current]
|
||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil { //nolint:staticcheck
|
||||
@@ -304,7 +490,8 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
// if it was not mined, mined and succesfull or
|
||||
// mined and failed. This could be due to the
|
||||
// ethNode failure.
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
||||
}
|
||||
|
||||
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||
@@ -312,32 +499,106 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
continue
|
||||
} else if err != nil { //nolint:staticcheck
|
||||
// Transaction was rejected
|
||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
||||
if len(t.queue) == 0 {
|
||||
next = 0
|
||||
} else {
|
||||
next = current % len(t.queue)
|
||||
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||
continue
|
||||
}
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
||||
continue
|
||||
}
|
||||
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
||||
log.Debugw("TxManager tx for RollupForgeBatch confirmed",
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
||||
if len(t.queue) == 0 {
|
||||
next = 0
|
||||
} else {
|
||||
next = current % len(t.queue)
|
||||
now := time.Now()
|
||||
if !t.cfg.EthNoReuseNonce && confirm == nil &&
|
||||
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
|
||||
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
|
||||
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
// If we reach here it's because our ethNode has
|
||||
// been unable to send the transaction to
|
||||
// ethereum. This could be due to the ethNode
|
||||
// failure, or an invalid transaction (that
|
||||
// can't be mined)
|
||||
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
||||
log.Debugw("TxManager: forgeBatch tx confirmed",
|
||||
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||
t.queue.Remove(queuePosition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nolint reason: this function will be used in the future
|
||||
//nolint:unused
|
||||
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
|
||||
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
|
||||
next := 0
|
||||
for {
|
||||
batchInfo := t.queue.At(next)
|
||||
if batchInfo == nil {
|
||||
break
|
||||
}
|
||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// Our ethNode is giving an error different
|
||||
// than "not found" when getting the receipt
|
||||
// for the transaction, so we can't figure out
|
||||
// if it was not mined, mined and succesfull or
|
||||
// mined and failed. This could be due to the
|
||||
// ethNode failure.
|
||||
next++
|
||||
continue
|
||||
}
|
||||
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||
if ctx.Err() != nil {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// Transaction was rejected
|
||||
if t.minPipelineNum <= batchInfo.PipelineNum {
|
||||
t.minPipelineNum = batchInfo.PipelineNum + 1
|
||||
}
|
||||
t.queue.Remove(next)
|
||||
continue
|
||||
}
|
||||
// If tx is pending but is from a cancelled pipeline, remove it
|
||||
// from the queue
|
||||
if confirm == nil {
|
||||
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||
t.queue.Remove(next)
|
||||
continue
|
||||
}
|
||||
}
|
||||
next++
|
||||
}
|
||||
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !t.cfg.EthNoReuseNonce {
|
||||
t.accNextNonce = accNonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TxManager) canForgeAt(blockNum int64) bool {
|
||||
return canForge(&t.consts.Auction, &t.vars.Auction,
|
||||
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
|
||||
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
|
||||
t.cfg.ForgerAddress, blockNum)
|
||||
}
|
||||
|
||||
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
|
||||
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
|
||||
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
|
||||
}
|
||||
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
|
||||
}
|
||||
|
||||
15
coordinator/txmanager_test.go
Normal file
15
coordinator/txmanager_test.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAddPerc(t *testing.T) {
|
||||
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
|
||||
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
|
||||
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
|
||||
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
|
||||
}
|
||||
@@ -833,10 +833,18 @@ func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
||||
defer hdb.apiConnCon.Release()
|
||||
account := &AccountAPI{}
|
||||
err = meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx,
|
||||
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
|
||||
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
||||
FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx)
|
||||
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
|
||||
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd,
|
||||
token.usd_update, account_update.nonce, account_update.balance
|
||||
FROM account inner JOIN (
|
||||
SELECT idx, nonce, balance
|
||||
FROM account_update
|
||||
WHERE idx = $1
|
||||
ORDER BY item_id DESC LIMIT 1
|
||||
) AS account_update ON account_update.idx = account.idx
|
||||
INNER JOIN token ON account.token_id = token.token_id
|
||||
WHERE account.idx = $1;`, idx)
|
||||
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
@@ -864,8 +872,13 @@ func (hdb *HistoryDB) GetAccountsAPI(
|
||||
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
|
||||
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
|
||||
COUNT(*) OVER() AS total_items
|
||||
FROM account INNER JOIN token ON account.token_id = token.token_id `
|
||||
account_update.nonce, account_update.balance, COUNT(*) OVER() AS total_items
|
||||
FROM account inner JOIN (
|
||||
SELECT DISTINCT idx,
|
||||
first_value(nonce) over(partition by idx ORDER BY item_id DESC) as nonce,
|
||||
first_value(balance) over(partition by idx ORDER BY item_id DESC) as balance
|
||||
FROM account_update
|
||||
) AS account_update ON account_update.idx = account.idx INNER JOIN token ON account.token_id = token.token_id `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
// ethAddr filter
|
||||
@@ -1024,3 +1037,18 @@ func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) {
|
||||
|
||||
return avgTransactionFee, nil
|
||||
}
|
||||
|
||||
// GetCommonAccountAPI returns the account associated to an account idx
|
||||
func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
|
||||
cancel, err := hdb.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer hdb.apiConnCon.Release()
|
||||
account := &common.Account{}
|
||||
err = meddler.QueryRow(
|
||||
hdb.db, account, `SELECT * FROM account WHERE idx = $1;`, idx,
|
||||
)
|
||||
return account, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
|
||||
timestamp,
|
||||
hash
|
||||
) VALUES %s;`,
|
||||
blocks[:],
|
||||
blocks,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -164,6 +164,19 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBatch returns the batch with the given batchNum
|
||||
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
|
||||
var batch common.Batch
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
|
||||
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
|
||||
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
|
||||
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
|
||||
batchNum,
|
||||
)
|
||||
return &batch, err
|
||||
}
|
||||
|
||||
// GetAllBatches retrieve all batches from the DB
|
||||
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
|
||||
var batches []*common.Batch
|
||||
@@ -208,6 +221,18 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
|
||||
return batchNum, tracerr.Wrap(row.Scan(&batchNum))
|
||||
}
|
||||
|
||||
// GetLastBatch returns the last forged batch
|
||||
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
|
||||
var batch common.Batch
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
|
||||
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
|
||||
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
|
||||
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
|
||||
)
|
||||
return &batch, err
|
||||
}
|
||||
|
||||
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
|
||||
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
|
||||
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
|
||||
@@ -248,7 +273,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
|
||||
return tracerr.Wrap(db.BulkInsert(
|
||||
d,
|
||||
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
|
||||
bids[:],
|
||||
bids,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -299,7 +324,7 @@ func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordi
|
||||
return tracerr.Wrap(db.BulkInsert(
|
||||
d,
|
||||
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
|
||||
coordinators[:],
|
||||
coordinators,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -315,7 +340,7 @@ func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) erro
|
||||
d,
|
||||
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
|
||||
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
|
||||
exitTree[:],
|
||||
exitTree,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -418,11 +443,12 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
|
||||
symbol,
|
||||
decimals
|
||||
) VALUES %s;`,
|
||||
tokens[:],
|
||||
tokens,
|
||||
))
|
||||
}
|
||||
|
||||
// UpdateTokenValue updates the USD value of a token
|
||||
// UpdateTokenValue updates the USD value of a token. Value is the price in
|
||||
// USD of a normalized token (1 token = 10^decimals units)
|
||||
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
|
||||
// Sanitize symbol
|
||||
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
|
||||
@@ -489,7 +515,7 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
|
||||
bjj,
|
||||
eth_addr
|
||||
) VALUES %s;`,
|
||||
accounts[:],
|
||||
accounts,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -503,6 +529,37 @@ func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
|
||||
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// AddAccountUpdates inserts accUpdates into the DB
|
||||
func (hdb *HistoryDB) AddAccountUpdates(accUpdates []common.AccountUpdate) error {
|
||||
return tracerr.Wrap(hdb.addAccountUpdates(hdb.db, accUpdates))
|
||||
}
|
||||
func (hdb *HistoryDB) addAccountUpdates(d meddler.DB, accUpdates []common.AccountUpdate) error {
|
||||
if len(accUpdates) == 0 {
|
||||
return nil
|
||||
}
|
||||
return tracerr.Wrap(db.BulkInsert(
|
||||
d,
|
||||
`INSERT INTO account_update (
|
||||
eth_block_num,
|
||||
batch_num,
|
||||
idx,
|
||||
nonce,
|
||||
balance
|
||||
) VALUES %s;`,
|
||||
accUpdates,
|
||||
))
|
||||
}
|
||||
|
||||
// GetAllAccountUpdates returns all the AccountUpdate from the DB
|
||||
func (hdb *HistoryDB) GetAllAccountUpdates() ([]common.AccountUpdate, error) {
|
||||
var accUpdates []*common.AccountUpdate
|
||||
err := meddler.QueryAll(
|
||||
hdb.db, &accUpdates,
|
||||
"SELECT eth_block_num, batch_num, idx, nonce, balance FROM account_update ORDER BY idx;",
|
||||
)
|
||||
return db.SlicePtrsToSlice(accUpdates).([]common.AccountUpdate), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
|
||||
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
|
||||
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
|
||||
@@ -621,7 +678,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
|
||||
fee,
|
||||
nonce
|
||||
) VALUES %s;`,
|
||||
txs[:],
|
||||
txs,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -756,7 +813,7 @@ func (hdb *HistoryDB) addBucketUpdates(d meddler.DB, bucketUpdates []common.Buck
|
||||
block_stamp,
|
||||
withdrawals
|
||||
) VALUES %s;`,
|
||||
bucketUpdates[:],
|
||||
bucketUpdates,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -788,7 +845,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
|
||||
eth_addr,
|
||||
value_usd
|
||||
) VALUES %s;`,
|
||||
tokenExchanges[:],
|
||||
tokenExchanges,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -816,7 +873,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
|
||||
token_addr,
|
||||
amount
|
||||
) VALUES %s;`,
|
||||
escapeHatchWithdrawals[:],
|
||||
escapeHatchWithdrawals,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -993,6 +1050,11 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// Add accountBalances if it exists
|
||||
if err := hdb.addAccountUpdates(txn, batch.UpdatedAccounts); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// Set the EffectiveAmount and EffectiveDepositAmount of all the
|
||||
// L1UserTxs that have been forged in this batch
|
||||
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {
|
||||
|
||||
@@ -203,6 +203,10 @@ func TestBatches(t *testing.T) {
|
||||
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
||||
// Test GetLastBatch
|
||||
fetchedLastBatch, err := historyDB.GetLastBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
|
||||
// Test GetLastL1TxsNum
|
||||
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
||||
assert.NoError(t, err)
|
||||
@@ -211,6 +215,12 @@ func TestBatches(t *testing.T) {
|
||||
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
|
||||
// Test GetBatch
|
||||
fetchedBatch, err := historyDB.GetBatch(1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &batches[0], fetchedBatch)
|
||||
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
|
||||
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
|
||||
}
|
||||
|
||||
func TestBids(t *testing.T) {
|
||||
@@ -367,6 +377,22 @@ func TestAccounts(t *testing.T) {
|
||||
accs[i].Balance = nil
|
||||
assert.Equal(t, accs[i], acc)
|
||||
}
|
||||
// Test AccountBalances
|
||||
accUpdates := make([]common.AccountUpdate, len(accs))
|
||||
for i, acc := range accs {
|
||||
accUpdates[i] = common.AccountUpdate{
|
||||
EthBlockNum: batches[acc.BatchNum-1].EthBlockNum,
|
||||
BatchNum: acc.BatchNum,
|
||||
Idx: acc.Idx,
|
||||
Nonce: common.Nonce(i),
|
||||
Balance: big.NewInt(int64(i)),
|
||||
}
|
||||
}
|
||||
err = historyDB.AddAccountUpdates(accUpdates)
|
||||
require.NoError(t, err)
|
||||
fetchedAccBalances, err := historyDB.GetAllAccountUpdates()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, accUpdates, fetchedAccBalances)
|
||||
}
|
||||
|
||||
func TestTxs(t *testing.T) {
|
||||
@@ -1185,7 +1211,8 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
|
||||
|
||||
// Transfers
|
||||
for x := 0; x < 6000; x++ {
|
||||
const numBlocks int = 30
|
||||
for x := 0; x < numBlocks; x++ {
|
||||
set = append(set, til.Instruction{
|
||||
Typ: common.TxTypeTransfer,
|
||||
TokenID: common.TokenID(0),
|
||||
@@ -1209,19 +1236,20 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
|
||||
require.NoError(t, err)
|
||||
|
||||
const numBatches int = 6002
|
||||
const numTx int = 6003
|
||||
const blockNum = 6005 - 1
|
||||
const numBatches int = 2 + numBlocks
|
||||
const blockNum = 4 + numBlocks
|
||||
|
||||
// Sanity check
|
||||
require.Equal(t, blockNum, len(blocks))
|
||||
|
||||
// Adding one batch per block
|
||||
// batch frequency can be chosen
|
||||
const frequency int = 15
|
||||
const blockTime time.Duration = 3600 * time.Second
|
||||
now := time.Now()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := range blocks {
|
||||
blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
|
||||
blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime)
|
||||
err = historyDB.AddBlockSCData(&blocks[i])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1229,16 +1257,10 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
|
||||
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
|
||||
|
||||
// Frequency is not exactly the desired one, some decimals may appear
|
||||
assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
|
||||
assert.Less(t, res.BatchFrequency, float64(frequency+1))
|
||||
// Truncate frecuency into an int to do an exact check
|
||||
assert.Equal(t, frequency, int(res.BatchFrequency))
|
||||
// This may also be different in some decimals
|
||||
// Truncate it to the third decimal to compare
|
||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
|
||||
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
|
||||
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
|
||||
assert.Equal(t, int64(3), res.TotalAccounts)
|
||||
assert.Equal(t, int64(3), res.TotalBJJs)
|
||||
// Til does not set fees
|
||||
|
||||
@@ -239,8 +239,8 @@ type AccountAPI struct {
|
||||
BatchNum common.BatchNum `meddler:"batch_num"`
|
||||
PublicKey apitypes.HezBJJ `meddler:"bjj"`
|
||||
EthAddr apitypes.HezEthAddr `meddler:"eth_addr"`
|
||||
Nonce common.Nonce `meddler:"-"` // max of 40 bits used
|
||||
Balance *apitypes.BigIntStr `meddler:"-"` // max of 192 bits used
|
||||
Nonce common.Nonce `meddler:"nonce"` // max of 40 bits used
|
||||
Balance *apitypes.BigIntStr `meddler:"balance"` // max of 192 bits used
|
||||
TotalItems uint64 `meddler:"total_items"`
|
||||
FirstItem uint64 `meddler:"first_item"`
|
||||
LastItem uint64 `meddler:"last_item"`
|
||||
|
||||
@@ -425,12 +425,13 @@ func (k *KVDB) MakeCheckpoint() error {
|
||||
}
|
||||
|
||||
// if checkpoint BatchNum already exist in disk, delete it
|
||||
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
|
||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
} else {
|
||||
if err := os.RemoveAll(checkpointPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// execute Checkpoint
|
||||
@@ -451,12 +452,25 @@ func (k *KVDB) MakeCheckpoint() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckpointExists returns true if the checkpoint exists
|
||||
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
||||
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
|
||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
return os.RemoveAll(checkpointPath)
|
||||
@@ -520,6 +534,8 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
|
||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||
// if kvdb does not have checkpoint at batchNum, return err
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// By locking we allow calling MakeCheckpointFromTo from multiple
|
||||
// places at the same time for the same stateDB. This allows the
|
||||
@@ -533,12 +549,13 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
|
||||
|
||||
func pebbleMakeCheckpoint(source, dest string) error {
|
||||
// Remove dest folder (if it exists) before doing the checkpoint
|
||||
if _, err := os.Stat(dest); !os.IsNotExist(err) {
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
} else {
|
||||
if err := os.RemoveAll(dest); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
sto, err := pebble.NewPebbleStorage(source, false)
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
package l2db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
var (
|
||||
errPoolFull = fmt.Errorf("the pool is at full capacity. More transactions are not accepted currently")
|
||||
)
|
||||
|
||||
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
|
||||
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
@@ -42,20 +48,54 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
row := l2db.db.QueryRow(
|
||||
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
|
||||
common.PoolL2TxStatePending,
|
||||
)
|
||||
var totalTxs uint32
|
||||
if err := row.Scan(&totalTxs); err != nil {
|
||||
|
||||
row := l2db.db.QueryRow(`SELECT
|
||||
($1::NUMERIC * token.usd * fee_percentage($2::NUMERIC)) /
|
||||
(10.0 ^ token.decimals::NUMERIC)
|
||||
FROM token WHERE token.token_id = $3;`,
|
||||
tx.AmountFloat, tx.Fee, tx.TokenID)
|
||||
var feeUSD float64
|
||||
if err := row.Scan(&feeUSD); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if totalTxs >= l2db.maxTxs {
|
||||
return tracerr.New(
|
||||
"The pool is at full capacity. More transactions are not accepted currently",
|
||||
)
|
||||
if feeUSD < l2db.minFeeUSD {
|
||||
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)",
|
||||
feeUSD, l2db.minFeeUSD))
|
||||
}
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
|
||||
|
||||
// Prepare insert SQL query argument parameters
|
||||
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valuesPart, err := meddler.Default.PlaceholdersString(tx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
values, err := meddler.Default.Values(tx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q := fmt.Sprintf(
|
||||
`INSERT INTO tx_pool (%s)
|
||||
SELECT %s
|
||||
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
|
||||
namesPart, valuesPart,
|
||||
len(values)+1, len(values)+2) //nolint:gomnd
|
||||
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
|
||||
res, err := l2db.db.Exec(q, values...)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
rowsAffected, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if rowsAffected == 0 {
|
||||
return tracerr.Wrap(errPoolFull)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
||||
|
||||
@@ -25,6 +25,7 @@ type L2DB struct {
|
||||
safetyPeriod common.BatchNum
|
||||
ttl time.Duration
|
||||
maxTxs uint32 // limit of txs that are accepted in the pool
|
||||
minFeeUSD float64
|
||||
apiConnCon *db.APIConnectionController
|
||||
}
|
||||
|
||||
@@ -35,6 +36,7 @@ func NewL2DB(
|
||||
db *sqlx.DB,
|
||||
safetyPeriod common.BatchNum,
|
||||
maxTxs uint32,
|
||||
minFeeUSD float64,
|
||||
TTL time.Duration,
|
||||
apiConnCon *db.APIConnectionController,
|
||||
) *L2DB {
|
||||
@@ -43,6 +45,7 @@ func NewL2DB(
|
||||
safetyPeriod: safetyPeriod,
|
||||
ttl: TTL,
|
||||
maxTxs: maxTxs,
|
||||
minFeeUSD: minFeeUSD,
|
||||
apiConnCon: apiConnCon,
|
||||
}
|
||||
}
|
||||
@@ -73,24 +76,6 @@ func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.Accoun
|
||||
))
|
||||
}
|
||||
|
||||
// AddTx inserts a tx to the pool
|
||||
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
|
||||
row := l2db.db.QueryRow(
|
||||
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
|
||||
common.PoolL2TxStatePending,
|
||||
)
|
||||
var totalTxs uint32
|
||||
if err := row.Scan(&totalTxs); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if totalTxs >= l2db.maxTxs {
|
||||
return tracerr.New(
|
||||
"The pool is at full capacity. More transactions are not accepted currently",
|
||||
)
|
||||
}
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
|
||||
}
|
||||
|
||||
// UpdateTxsInfo updates the parameter Info of the pool transactions
|
||||
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
|
||||
if len(txs) == 0 {
|
||||
@@ -122,9 +107,8 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
|
||||
// but in production txs will only be inserted through the API
|
||||
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
||||
// NewPoolL2TxWriteFromPoolL2Tx creates a new PoolL2TxWrite from a PoolL2Tx
|
||||
func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite {
|
||||
// transform tx from *common.PoolL2Tx to PoolL2TxWrite
|
||||
insertTx := &PoolL2TxWrite{
|
||||
TxID: tx.TxID,
|
||||
@@ -166,6 +150,13 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
||||
f := new(big.Float).SetInt(tx.Amount)
|
||||
amountF, _ := f.Float64()
|
||||
insertTx.AmountFloat = amountF
|
||||
return insertTx
|
||||
}
|
||||
|
||||
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
|
||||
// but in production txs will only be inserted through the API
|
||||
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
||||
insertTx := NewPoolL2TxWriteFromPoolL2Tx(tx)
|
||||
// insert tx
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
|
||||
}
|
||||
@@ -176,7 +167,8 @@ tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
|
||||
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
||||
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
||||
fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f AS fee_usd, token.usd_update
|
||||
(fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f) /
|
||||
(10.0 ^ token.decimals::NUMERIC) AS fee_usd, token.usd_update
|
||||
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
||||
|
||||
// GetTx return the specified Tx in common.PoolL2Tx format
|
||||
@@ -354,3 +346,14 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
|
||||
)
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// PurgeByExternalDelete deletes all pending transactions marked with true in
|
||||
// the `external_delete` column. An external process can set this column to
|
||||
// true to instruct the coordinator to delete the tx when possible.
|
||||
func (l2db *L2DB) PurgeByExternalDelete() error {
|
||||
_, err := l2db.db.Exec(
|
||||
`DELETE from tx_pool WHERE (external_delete = true AND state = $1);`,
|
||||
common.PoolL2TxStatePending,
|
||||
)
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package l2db
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -20,12 +20,14 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var decimals = uint64(3)
|
||||
var tokenValue = 1.0 // The price update gives a value of 1.0 USD to the token
|
||||
var l2DB *L2DB
|
||||
var l2DBWithACC *L2DB
|
||||
var historyDB *historydb.HistoryDB
|
||||
var tc *til.Context
|
||||
var tokens map[common.TokenID]historydb.TokenWithUSD
|
||||
var tokensValue map[common.TokenID]float64
|
||||
|
||||
var accs map[common.Idx]common.Account
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -35,9 +37,9 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
|
||||
l2DB = NewL2DB(db, 10, 1000, 0.0, 24*time.Hour, nil)
|
||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
|
||||
l2DBWithACC = NewL2DB(db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
||||
test.WipeDB(l2DB.DB())
|
||||
historyDB = historydb.NewHistoryDB(db, nil)
|
||||
// Run tests
|
||||
@@ -58,10 +60,10 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
||||
|
||||
AddToken(1)
|
||||
AddToken(2)
|
||||
CreateAccountDeposit(1) A: 2000
|
||||
CreateAccountDeposit(2) A: 2000
|
||||
CreateAccountDeposit(1) B: 1000
|
||||
CreateAccountDeposit(2) B: 1000
|
||||
CreateAccountDeposit(1) A: 20000
|
||||
CreateAccountDeposit(2) A: 20000
|
||||
CreateAccountDeposit(1) B: 10000
|
||||
CreateAccountDeposit(2) B: 10000
|
||||
> batchL1
|
||||
> batchL1
|
||||
> block
|
||||
@@ -82,15 +84,23 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
for i := range blocks {
|
||||
block := &blocks[i]
|
||||
for j := range block.Rollup.AddedTokens {
|
||||
token := &block.Rollup.AddedTokens[j]
|
||||
token.Name = fmt.Sprintf("Token %d", token.TokenID)
|
||||
token.Symbol = fmt.Sprintf("TK%d", token.TokenID)
|
||||
token.Decimals = decimals
|
||||
}
|
||||
}
|
||||
|
||||
tokens = make(map[common.TokenID]historydb.TokenWithUSD)
|
||||
tokensValue = make(map[common.TokenID]float64)
|
||||
// tokensValue = make(map[common.TokenID]float64)
|
||||
accs = make(map[common.Idx]common.Account)
|
||||
value := 5 * 5.389329
|
||||
now := time.Now().UTC()
|
||||
// Add all blocks except for the last one
|
||||
for i := range blocks[:len(blocks)-1] {
|
||||
err = historyDB.AddBlockSCData(&blocks[i])
|
||||
if err != nil {
|
||||
if err := historyDB.AddBlockSCData(&blocks[i]); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
for _, batch := range blocks[i].Rollup.Batches {
|
||||
@@ -106,39 +116,38 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
||||
Name: token.Name,
|
||||
Symbol: token.Symbol,
|
||||
Decimals: token.Decimals,
|
||||
USD: &tokenValue,
|
||||
USDUpdate: &now,
|
||||
}
|
||||
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
|
||||
readToken.USDUpdate = &now
|
||||
readToken.USD = &value
|
||||
tokens[token.TokenID] = readToken
|
||||
}
|
||||
// Set value to the tokens (tokens have no symbol)
|
||||
tokenSymbol := ""
|
||||
err := historyDB.UpdateTokenValue(tokenSymbol, value)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
// Set value to the tokens
|
||||
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
|
||||
// Fee = 126 corresponds to ~10%
|
||||
setPool := `
|
||||
Type: PoolL2
|
||||
PoolTransfer(1) A-B: 6 (4)
|
||||
PoolTransfer(2) A-B: 3 (1)
|
||||
PoolTransfer(1) B-A: 5 (2)
|
||||
PoolTransfer(2) B-A: 10 (3)
|
||||
PoolTransfer(1) A-B: 7 (2)
|
||||
PoolTransfer(2) A-B: 2 (1)
|
||||
PoolTransfer(1) B-A: 8 (2)
|
||||
PoolTransfer(2) B-A: 1 (1)
|
||||
PoolTransfer(1) A-B: 3 (1)
|
||||
PoolTransferToEthAddr(2) B-A: 5 (2)
|
||||
PoolTransferToBJJ(2) B-A: 5 (2)
|
||||
PoolTransfer(1) A-B: 6000 (126)
|
||||
PoolTransfer(2) A-B: 3000 (126)
|
||||
PoolTransfer(1) B-A: 5000 (126)
|
||||
PoolTransfer(2) B-A: 10000 (126)
|
||||
PoolTransfer(1) A-B: 7000 (126)
|
||||
PoolTransfer(2) A-B: 2000 (126)
|
||||
PoolTransfer(1) B-A: 8000 (126)
|
||||
PoolTransfer(2) B-A: 1000 (126)
|
||||
PoolTransfer(1) A-B: 3000 (126)
|
||||
PoolTransferToEthAddr(2) B-A: 5000 (126)
|
||||
PoolTransferToBJJ(2) B-A: 5000 (126)
|
||||
|
||||
PoolExit(1) A: 5 (2)
|
||||
PoolExit(2) B: 3 (1)
|
||||
PoolExit(1) A: 5000 (126)
|
||||
PoolExit(2) B: 3000 (126)
|
||||
`
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
|
||||
if err != nil {
|
||||
@@ -153,25 +162,74 @@ func TestAddTxTest(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assertTx(t, &poolL2Txs[i], fetchedTx)
|
||||
nameZone, offset := fetchedTx.Timestamp.Zone()
|
||||
assert.Equal(t, "UTC", nameZone)
|
||||
assert.Equal(t, 0, offset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTxAPI(t *testing.T) {
|
||||
err := prepareHistoryDB(historyDB)
|
||||
if err != nil {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
|
||||
oldMaxTxs := l2DBWithACC.maxTxs
|
||||
// set max number of pending txs that can be kept in the pool to 5
|
||||
l2DBWithACC.maxTxs = 5
|
||||
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
txs := make([]*PoolL2TxWrite, len(poolL2Txs))
|
||||
for i := range poolL2Txs {
|
||||
txs[i] = NewPoolL2TxWriteFromPoolL2Tx(&poolL2Txs[i])
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.GreaterOrEqual(t, len(poolL2Txs), 8)
|
||||
for i := range txs[:5] {
|
||||
err := l2DBWithACC.AddTxAPI(txs[i])
|
||||
require.NoError(t, err)
|
||||
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
||||
require.NoError(t, err)
|
||||
assertTx(t, &poolL2Txs[i], fetchedTx)
|
||||
nameZone, offset := fetchedTx.Timestamp.Zone()
|
||||
assert.Equal(t, "UTC", nameZone)
|
||||
assert.Equal(t, 0, offset)
|
||||
}
|
||||
err = l2DBWithACC.AddTxAPI(txs[5])
|
||||
assert.Equal(t, errPoolFull, tracerr.Unwrap(err))
|
||||
// reset maxTxs to original value
|
||||
l2DBWithACC.maxTxs = oldMaxTxs
|
||||
|
||||
// set minFeeUSD to a high value than the tx feeUSD to test the error
|
||||
// of inserting a tx with lower than min fee
|
||||
oldMinFeeUSD := l2DBWithACC.minFeeUSD
|
||||
tx := txs[5]
|
||||
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
|
||||
require.NoError(t, err)
|
||||
feeAmountUSD := common.TokensToUSD(feeAmount, decimals, tokenValue)
|
||||
// set minFeeUSD higher than the tx fee to trigger the error
|
||||
l2DBWithACC.minFeeUSD = feeAmountUSD + 1
|
||||
err = l2DBWithACC.AddTxAPI(tx)
|
||||
require.Error(t, err)
|
||||
assert.Regexp(t, "tx.feeUSD (.*) < minFeeUSD (.*)", err.Error())
|
||||
// reset minFeeUSD to original value
|
||||
l2DBWithACC.minFeeUSD = oldMinFeeUSD
|
||||
}
|
||||
|
||||
func TestUpdateTxsInfo(t *testing.T) {
|
||||
err := prepareHistoryDB(historyDB)
|
||||
if err != nil {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
require.NoError(t, err)
|
||||
@@ -185,7 +243,7 @@ func TestUpdateTxsInfo(t *testing.T) {
|
||||
|
||||
for i := range poolL2Txs {
|
||||
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "test", fetchedTx.Info)
|
||||
}
|
||||
}
|
||||
@@ -203,9 +261,8 @@ func assertTx(t *testing.T, expected, actual *common.PoolL2Tx) {
|
||||
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
|
||||
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
|
||||
// Set expected fee
|
||||
f := new(big.Float).SetInt(expected.Amount)
|
||||
amountF, _ := f.Float64()
|
||||
expected.AbsoluteFee = *token.USD * amountF * expected.Fee.Percentage()
|
||||
amountUSD := common.TokensToUSD(expected.Amount, token.Decimals, *token.USD)
|
||||
expected.AbsoluteFee = amountUSD * expected.Fee.Percentage()
|
||||
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
@@ -230,19 +287,28 @@ func TestGetPending(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
var pendingTxs []*common.PoolL2Tx
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
pendingTxs = append(pendingTxs, &poolL2Txs[i])
|
||||
}
|
||||
fetchedTxs, err := l2DB.GetPendingTxs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
|
||||
for i := range fetchedTxs {
|
||||
assertTx(t, pendingTxs[i], &fetchedTxs[i])
|
||||
}
|
||||
// Check AbsoluteFee amount
|
||||
for i := range fetchedTxs {
|
||||
tx := &fetchedTxs[i]
|
||||
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
|
||||
require.NoError(t, err)
|
||||
feeAmountUSD := common.TokensToUSD(feeAmount,
|
||||
tokens[tx.TokenID].Decimals, *tokens[tx.TokenID].USD)
|
||||
assert.InEpsilon(t, feeAmountUSD, tx.AbsoluteFee, 0.01)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartForging(t *testing.T) {
|
||||
@@ -253,13 +319,13 @@ func TestStartForging(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
var startForgingTxIDs []common.TxID
|
||||
randomizer := 0
|
||||
// Add txs to DB
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||
}
|
||||
@@ -267,11 +333,11 @@ func TestStartForging(t *testing.T) {
|
||||
}
|
||||
// Start forging txs
|
||||
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range startForgingTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
}
|
||||
@@ -285,13 +351,13 @@ func TestDoneForging(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
var startForgingTxIDs []common.TxID
|
||||
randomizer := 0
|
||||
// Add txs to DB
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||
}
|
||||
@@ -299,7 +365,7 @@ func TestDoneForging(t *testing.T) {
|
||||
}
|
||||
// Start forging txs
|
||||
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var doneForgingTxIDs []common.TxID
|
||||
randomizer = 0
|
||||
@@ -311,12 +377,12 @@ func TestDoneForging(t *testing.T) {
|
||||
}
|
||||
// Done forging txs
|
||||
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range doneForgingTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
}
|
||||
@@ -330,13 +396,13 @@ func TestInvalidate(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
var invalidTxIDs []common.TxID
|
||||
randomizer := 0
|
||||
// Add txs to DB
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
|
||||
randomizer++
|
||||
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
|
||||
@@ -344,11 +410,11 @@ func TestInvalidate(t *testing.T) {
|
||||
}
|
||||
// Invalidate txs
|
||||
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range invalidTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
}
|
||||
@@ -362,7 +428,7 @@ func TestInvalidateOldNonces(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Update Accounts currentNonce
|
||||
var updateAccounts []common.IdxNonce
|
||||
var currentNonce = common.Nonce(1)
|
||||
@@ -379,13 +445,13 @@ func TestInvalidateOldNonces(t *testing.T) {
|
||||
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
|
||||
}
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// sanity check
|
||||
require.Greater(t, len(invalidTxIDs), 0)
|
||||
|
||||
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range invalidTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
@@ -407,7 +473,7 @@ func TestReorg(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
reorgedTxIDs := []common.TxID{}
|
||||
nonReorgedTxIDs := []common.TxID{}
|
||||
@@ -418,7 +484,7 @@ func TestReorg(t *testing.T) {
|
||||
// Add txs to DB
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
|
||||
@@ -430,7 +496,7 @@ func TestReorg(t *testing.T) {
|
||||
}
|
||||
// Start forging txs
|
||||
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var doneForgingTxIDs []common.TxID
|
||||
randomizer = 0
|
||||
@@ -455,22 +521,22 @@ func TestReorg(t *testing.T) {
|
||||
|
||||
// Invalidate txs BEFORE reorgBatch --> nonReorg
|
||||
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Done forging txs in reorgBatch --> Reorg
|
||||
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l2DB.Reorg(lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
for _, id := range reorgedTxIDs {
|
||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, tx.BatchNum)
|
||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||
}
|
||||
for _, id := range nonReorgedTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||
}
|
||||
}
|
||||
@@ -487,7 +553,7 @@ func TestReorg2(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
reorgedTxIDs := []common.TxID{}
|
||||
nonReorgedTxIDs := []common.TxID{}
|
||||
@@ -498,7 +564,7 @@ func TestReorg2(t *testing.T) {
|
||||
// Add txs to DB
|
||||
for i := range poolL2Txs {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
|
||||
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
|
||||
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
|
||||
@@ -510,7 +576,7 @@ func TestReorg2(t *testing.T) {
|
||||
}
|
||||
// Start forging txs
|
||||
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var doneForgingTxIDs []common.TxID
|
||||
randomizer = 0
|
||||
@@ -532,22 +598,22 @@ func TestReorg2(t *testing.T) {
|
||||
}
|
||||
// Done forging txs BEFORE reorgBatch --> nonReorg
|
||||
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Invalidate txs in reorgBatch --> Reorg
|
||||
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l2DB.Reorg(lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
for _, id := range reorgedTxIDs {
|
||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, tx.BatchNum)
|
||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||
}
|
||||
for _, id := range nonReorgedTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||
}
|
||||
}
|
||||
@@ -563,7 +629,7 @@ func TestPurge(t *testing.T) {
|
||||
var poolL2Tx []common.PoolL2Tx
|
||||
for i := 0; i < generateTx; i++ {
|
||||
poolL2TxAux, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
poolL2Tx = append(poolL2Tx, poolL2TxAux...)
|
||||
}
|
||||
|
||||
@@ -590,7 +656,7 @@ func TestPurge(t *testing.T) {
|
||||
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
|
||||
}
|
||||
err := l2DB.AddTxTest(&tx)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Set batchNum keeped txs
|
||||
for i := range keepedIDs {
|
||||
@@ -598,17 +664,17 @@ func TestPurge(t *testing.T) {
|
||||
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
|
||||
safeBatchNum, keepedIDs[i],
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Start forging txs and set batchNum
|
||||
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Done forging txs and set batchNum
|
||||
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Invalidate txs and set batchNum
|
||||
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Update timestamp of afterTTL txs
|
||||
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
|
||||
for _, id := range afterTTLIDs {
|
||||
@@ -617,12 +683,12 @@ func TestPurge(t *testing.T) {
|
||||
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
|
||||
deleteTimestamp, common.PoolL2TxStatePending, id,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Purge txs
|
||||
err = l2DB.Purge(safeBatchNum)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Check results
|
||||
for _, id := range deletedIDs {
|
||||
_, err := l2DB.GetTx(id)
|
||||
@@ -630,7 +696,7 @@ func TestPurge(t *testing.T) {
|
||||
}
|
||||
for _, id := range keepedIDs {
|
||||
_, err := l2DB.GetTx(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -644,10 +710,10 @@ func TestAuth(t *testing.T) {
|
||||
for i := 0; i < len(auths); i++ {
|
||||
// Add to the DB
|
||||
err := l2DB.AddAccountCreationAuth(auths[i])
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Fetch from DB
|
||||
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// Check fetched vs generated
|
||||
assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
|
||||
assert.Equal(t, auths[i].BJJ, auth.BJJ)
|
||||
@@ -665,7 +731,7 @@ func TestAddGet(t *testing.T) {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We will work with only 3 txs
|
||||
require.GreaterOrEqual(t, len(poolL2Txs), 3)
|
||||
@@ -701,3 +767,56 @@ func TestAddGet(t *testing.T) {
|
||||
assert.Equal(t, txs[i], *dbTx)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPurgeByExternalDelete(t *testing.T) {
|
||||
err := prepareHistoryDB(historyDB)
|
||||
if err != nil {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
txs, err := generatePoolL2Txs()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We will work with 8 txs
|
||||
require.GreaterOrEqual(t, len(txs), 8)
|
||||
txs = txs[:8]
|
||||
for i := range txs {
|
||||
require.NoError(t, l2DB.AddTxTest(&txs[i]))
|
||||
}
|
||||
|
||||
// We will recreate this scenario:
|
||||
// tx index, status , external_delete
|
||||
// 0 , pending, false
|
||||
// 1 , pending, false
|
||||
// 2 , pending, true // will be deleted
|
||||
// 3 , pending, true // will be deleted
|
||||
// 4 , fging , false
|
||||
// 5 , fging , false
|
||||
// 6 , fging , true
|
||||
// 7 , fging , true
|
||||
|
||||
require.NoError(t, l2DB.StartForging(
|
||||
[]common.TxID{txs[4].TxID, txs[5].TxID, txs[6].TxID, txs[7].TxID},
|
||||
1))
|
||||
_, err = l2DB.db.Exec(
|
||||
`UPDATE tx_pool SET external_delete = true WHERE
|
||||
tx_id IN ($1, $2, $3, $4)
|
||||
;`,
|
||||
txs[2].TxID, txs[3].TxID, txs[6].TxID, txs[7].TxID,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, l2DB.PurgeByExternalDelete())
|
||||
|
||||
// Query txs that are have been not deleted
|
||||
for _, i := range []int{0, 1, 4, 5, 6, 7} {
|
||||
txID := txs[i].TxID
|
||||
_, err := l2DB.GetTx(txID)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Query txs that have been deleted
|
||||
for _, i := range []int{2, 3} {
|
||||
txID := txs[i].TxID
|
||||
_, err := l2DB.GetTx(txID)
|
||||
require.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ type PoolL2TxWrite struct {
|
||||
RqFee *common.FeeSelector `meddler:"rq_fee"`
|
||||
RqNonce *common.Nonce `meddler:"rq_nonce"`
|
||||
Type common.TxType `meddler:"tx_type"`
|
||||
ClientIP string `meddler:"client_ip"`
|
||||
}
|
||||
|
||||
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API
|
||||
|
||||
@@ -47,7 +47,7 @@ CREATE TABLE token (
|
||||
name VARCHAR(20) NOT NULL,
|
||||
symbol VARCHAR(10) NOT NULL,
|
||||
decimals INT NOT NULL,
|
||||
usd NUMERIC,
|
||||
usd NUMERIC, -- value of a normalized token (1 token = 10^decimals units)
|
||||
usd_update TIMESTAMP WITHOUT TIME ZONE
|
||||
);
|
||||
|
||||
@@ -100,6 +100,15 @@ CREATE TABLE account (
|
||||
eth_addr BYTEA NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE account_update (
|
||||
item_id SERIAL,
|
||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
|
||||
nonce BIGINT NOT NULL,
|
||||
balance BYTEA NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE exit_tree (
|
||||
item_id SERIAL PRIMARY KEY,
|
||||
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||
@@ -618,7 +627,9 @@ CREATE TABLE tx_pool (
|
||||
rq_amount BYTEA,
|
||||
rq_fee SMALLINT,
|
||||
rq_nonce BIGINT,
|
||||
tx_type VARCHAR(40) NOT NULL
|
||||
tx_type VARCHAR(40) NOT NULL,
|
||||
client_ip VARCHAR,
|
||||
external_delete BOOLEAN NOT NULL DEFAULT false
|
||||
);
|
||||
|
||||
-- +migrate StatementBegin
|
||||
@@ -651,34 +662,35 @@ CREATE TABLE account_creation_auth (
|
||||
);
|
||||
|
||||
-- +migrate Down
|
||||
-- drop triggers
|
||||
DROP TRIGGER trigger_token_usd_update ON token;
|
||||
DROP TRIGGER trigger_set_tx ON tx;
|
||||
DROP TRIGGER trigger_forge_l1_txs ON batch;
|
||||
DROP TRIGGER trigger_set_pool_tx ON tx_pool;
|
||||
-- drop functions
|
||||
DROP FUNCTION hez_idx;
|
||||
DROP FUNCTION set_token_usd_update;
|
||||
DROP FUNCTION fee_percentage;
|
||||
DROP FUNCTION set_tx;
|
||||
DROP FUNCTION forge_l1_user_txs;
|
||||
DROP FUNCTION set_pool_tx;
|
||||
-- drop tables
|
||||
DROP TABLE account_creation_auth;
|
||||
DROP TABLE tx_pool;
|
||||
DROP TABLE auction_vars;
|
||||
DROP TABLE rollup_vars;
|
||||
DROP TABLE escape_hatch_withdrawal;
|
||||
DROP TABLE bucket_update;
|
||||
DROP TABLE token_exchange;
|
||||
DROP TABLE wdelayer_vars;
|
||||
DROP TABLE tx;
|
||||
DROP TABLE exit_tree;
|
||||
DROP TABLE account;
|
||||
DROP TABLE token;
|
||||
DROP TABLE bid;
|
||||
DROP TABLE batch;
|
||||
DROP TABLE coordinator;
|
||||
DROP TABLE block;
|
||||
-- drop sequences
|
||||
DROP SEQUENCE tx_item_id;
|
||||
-- triggers
|
||||
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
|
||||
DROP TRIGGER IF EXISTS trigger_set_tx ON tx;
|
||||
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch;
|
||||
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool;
|
||||
-- functions
|
||||
DROP FUNCTION IF EXISTS hez_idx;
|
||||
DROP FUNCTION IF EXISTS set_token_usd_update;
|
||||
DROP FUNCTION IF EXISTS fee_percentage;
|
||||
DROP FUNCTION IF EXISTS set_tx;
|
||||
DROP FUNCTION IF EXISTS forge_l1_user_txs;
|
||||
DROP FUNCTION IF EXISTS set_pool_tx;
|
||||
-- drop tables IF EXISTS
|
||||
DROP TABLE IF EXISTS account_creation_auth;
|
||||
DROP TABLE IF EXISTS tx_pool;
|
||||
DROP TABLE IF EXISTS auction_vars;
|
||||
DROP TABLE IF EXISTS rollup_vars;
|
||||
DROP TABLE IF EXISTS escape_hatch_withdrawal;
|
||||
DROP TABLE IF EXISTS bucket_update;
|
||||
DROP TABLE IF EXISTS token_exchange;
|
||||
DROP TABLE IF EXISTS wdelayer_vars;
|
||||
DROP TABLE IF EXISTS tx;
|
||||
DROP TABLE IF EXISTS exit_tree;
|
||||
DROP TABLE IF EXISTS account_update;
|
||||
DROP TABLE IF EXISTS account;
|
||||
DROP TABLE IF EXISTS token;
|
||||
DROP TABLE IF EXISTS bid;
|
||||
DROP TABLE IF EXISTS batch;
|
||||
DROP TABLE IF EXISTS coordinator;
|
||||
DROP TABLE IF EXISTS block;
|
||||
-- sequences
|
||||
DROP SEQUENCE IF EXISTS tx_item_id;
|
||||
|
||||
@@ -498,11 +498,17 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckpointExists returns true if the checkpoint exists
|
||||
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||
return l.db.CheckpointExists(batchNum)
|
||||
}
|
||||
|
||||
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
||||
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
if fromSynchronizer {
|
||||
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
|
||||
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/marusama/semaphore/v2"
|
||||
migrate "github.com/rubenv/sql-migrate"
|
||||
"github.com/russross/meddler"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
var migrations *migrate.PackrMigrationSource
|
||||
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
|
||||
|
||||
// APIConnectionController is used to limit the SQL open connections used by the API
|
||||
type APIConnectionController struct {
|
||||
smphr semaphore.Semaphore
|
||||
smphr *semaphore.Weighted
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewAPICnnectionController initialize APIConnectionController
|
||||
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
||||
return &APIConnectionController{
|
||||
smphr: semaphore.New(maxConnections),
|
||||
smphr: semaphore.NewWeighted(int64(maxConnections)),
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,5 +324,6 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction,
|
||||
Value: tx.Value(),
|
||||
Data: tx.Data(),
|
||||
}
|
||||
return c.client.CallContract(ctx, msg, blockNum)
|
||||
result, err := c.client.CallContract(ctx, msg, blockNum)
|
||||
return result, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -316,7 +316,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
|
||||
}
|
||||
consts, err := c.RollupConstants()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err))
|
||||
}
|
||||
c.consts = consts
|
||||
return c, nil
|
||||
@@ -327,7 +327,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
|
||||
if auth == nil {
|
||||
auth, err = c.client.NewAuth()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auth.GasLimit = 1000000
|
||||
}
|
||||
@@ -393,7 +393,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
|
||||
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
|
||||
args.ProofA, args.ProofB, args.ProofC)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Failed Hermez.ForgeBatch: %w", err))
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Hermez.ForgeBatch: %w", err))
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
2
go.mod
2
go.mod
@@ -17,7 +17,6 @@ require (
|
||||
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/lib/pq v1.8.0
|
||||
github.com/marusama/semaphore/v2 v2.4.1
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
|
||||
github.com/mitchellh/copystructure v1.0.0
|
||||
@@ -29,5 +28,6 @@ require (
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1
|
||||
)
|
||||
|
||||
13
go.sum
13
go.sum
@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
@@ -84,6 +86,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||
@@ -169,6 +173,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
@@ -415,9 +421,6 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
|
||||
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
|
||||
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
|
||||
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
|
||||
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
@@ -599,6 +602,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@@ -617,6 +622,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
||||
|
||||
@@ -67,6 +67,11 @@ func Init(levelStr string, outputs []string) {
|
||||
|
||||
func sprintStackTrace(st []tracerr.Frame) string {
|
||||
builder := strings.Builder{}
|
||||
// Skip deepest frame because it belongs to the go runtime and we don't
|
||||
// care about it.
|
||||
if len(st) > 0 {
|
||||
st = st[:len(st)-1]
|
||||
}
|
||||
for _, f := range st {
|
||||
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
|
||||
}
|
||||
|
||||
31
node/node.go
31
node/node.go
@@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
@@ -102,8 +103,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
var keyStore *ethKeystore.KeyStore
|
||||
if mode == ModeCoordinator {
|
||||
ethCfg = eth.EthereumConfig{
|
||||
CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit,
|
||||
GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv,
|
||||
CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit,
|
||||
GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv,
|
||||
}
|
||||
|
||||
scryptN := ethKeystore.StandardScryptN
|
||||
@@ -203,6 +204,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
db,
|
||||
cfg.Coordinator.L2DB.SafetyPeriod,
|
||||
cfg.Coordinator.L2DB.MaxTxs,
|
||||
cfg.Coordinator.L2DB.MinFeeUSD,
|
||||
cfg.Coordinator.L2DB.TTL.Duration,
|
||||
apiConnCon,
|
||||
)
|
||||
@@ -244,9 +246,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs))
|
||||
for i, serverProofCfg := range cfg.Coordinator.ServerProofs {
|
||||
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
|
||||
@@ -298,9 +297,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
|
||||
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
|
||||
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
|
||||
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
|
||||
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
|
||||
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
||||
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
|
||||
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
|
||||
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
|
||||
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
|
||||
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
|
||||
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
|
||||
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
|
||||
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
|
||||
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
|
||||
Purger: coordinator.PurgerCfg{
|
||||
@@ -421,7 +427,6 @@ func NewNodeAPI(
|
||||
coordinatorEndpoints, explorerEndpoints,
|
||||
engine,
|
||||
hdb,
|
||||
sdb,
|
||||
l2db,
|
||||
config,
|
||||
)
|
||||
@@ -487,11 +492,15 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
|
||||
if stats.Synced() {
|
||||
if err := n.nodeAPI.api.UpdateNetworkInfo(
|
||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||
common.BatchNum(stats.Eth.LastBatch),
|
||||
common.BatchNum(stats.Eth.LastBatchNum),
|
||||
stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||
); err != nil {
|
||||
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
||||
}
|
||||
} else {
|
||||
n.nodeAPI.api.UpdateNetworkInfoBlock(
|
||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -573,7 +582,13 @@ func (n *Node) StartSynchronizer() {
|
||||
if n.ctx.Err() != nil {
|
||||
continue
|
||||
}
|
||||
log.Errorw("Synchronizer.Sync", "err", err)
|
||||
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
|
||||
log.Warnw("Synchronizer.Sync", "err", err)
|
||||
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
|
||||
log.Warnw("Synchronizer.Sync", "err", err)
|
||||
} else {
|
||||
log.Errorw("Synchronizer.Sync", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,19 @@ import (
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
const (
|
||||
// errStrUnknownBlock is the string returned by geth when querying an
|
||||
// unknown block
|
||||
errStrUnknownBlock = "unknown block"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnknownBlock is the error returned by the Synchronizer when a
|
||||
// block is queried by hash but the ethereum node doesn't find it due
|
||||
// to it being discarded from a reorg.
|
||||
ErrUnknownBlock = fmt.Errorf("unknown block")
|
||||
)
|
||||
|
||||
// Stats of the syncrhonizer
|
||||
type Stats struct {
|
||||
Eth struct {
|
||||
@@ -25,12 +38,12 @@ type Stats struct {
|
||||
Updated time.Time
|
||||
FirstBlockNum int64
|
||||
LastBlock common.Block
|
||||
LastBatch int64
|
||||
LastBatchNum int64
|
||||
}
|
||||
Sync struct {
|
||||
Updated time.Time
|
||||
LastBlock common.Block
|
||||
LastBatch int64
|
||||
LastBatch common.Batch
|
||||
// LastL1BatchBlock is the last ethereum block in which an
|
||||
// l1Batch was forged
|
||||
LastL1BatchBlock int64
|
||||
@@ -77,13 +90,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
|
||||
}
|
||||
|
||||
// UpdateSync updates the synchronizer stats
|
||||
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
|
||||
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
|
||||
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
|
||||
now := time.Now()
|
||||
s.rw.Lock()
|
||||
s.Sync.LastBlock = *lastBlock
|
||||
if lastBatch != nil {
|
||||
s.Sync.LastBatch = int64(*lastBatch)
|
||||
s.Sync.LastBatch = *lastBatch
|
||||
}
|
||||
if lastL1BatchBlock != nil {
|
||||
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
|
||||
@@ -105,16 +118,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
|
||||
|
||||
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
|
||||
}
|
||||
lastBatch, err := ethClient.RollupLastForgedBatch()
|
||||
lastBatchNum, err := ethClient.RollupLastForgedBatch()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err))
|
||||
}
|
||||
s.rw.Lock()
|
||||
s.Eth.Updated = now
|
||||
s.Eth.LastBlock = *lastBlock
|
||||
s.Eth.LastBatch = lastBatch
|
||||
s.Eth.LastBatchNum = lastBatchNum
|
||||
s.rw.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -139,6 +152,10 @@ func (s *StatsHolder) CopyStats() *Stats {
|
||||
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
|
||||
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
|
||||
}
|
||||
if s.Sync.LastBatch.StateRoot != nil {
|
||||
sCopy.Sync.LastBatch.StateRoot =
|
||||
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
|
||||
}
|
||||
s.rw.RUnlock()
|
||||
return &sCopy
|
||||
}
|
||||
@@ -152,9 +169,9 @@ func (s *StatsHolder) blocksPerc() float64 {
|
||||
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
|
||||
}
|
||||
|
||||
func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
|
||||
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
|
||||
return float64(batchNum) * 100.0 /
|
||||
float64(s.Eth.LastBatch)
|
||||
float64(s.Eth.LastBatchNum)
|
||||
}
|
||||
|
||||
// StartBlockNums sets the first block used to start tracking the smart
|
||||
@@ -329,23 +346,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// firstBatchBlockNum is the blockNum of first batch in that block, if any
|
||||
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
|
||||
slot := common.Slot{
|
||||
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
|
||||
}
|
||||
// updateCurrentSlot updates the slot with information of the current slot.
|
||||
// The information abouth which coordinator is allowed to forge is only updated
|
||||
// when we are Synced.
|
||||
// hasBatch is true when the last synced block contained at least one batch.
|
||||
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
|
||||
// We want the next block because the current one is already mined
|
||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||
slotNum := s.consts.Auction.SlotNum(blockNum)
|
||||
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
|
||||
if reset {
|
||||
// Using this query only to know if there
|
||||
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
firstBatchBlockNum = nil
|
||||
hasBatch = false
|
||||
} else {
|
||||
firstBatchBlockNum = &dbFirstBatchBlockNum
|
||||
hasBatch = true
|
||||
firstBatchBlockNum = dbFirstBatchBlockNum
|
||||
}
|
||||
slot.ForgerCommitment = false
|
||||
} else if slotNum > slot.SlotNum {
|
||||
@@ -356,11 +375,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||
// If Synced, update the current coordinator
|
||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err := s.setSlotCoordinator(slot); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if firstBatchBlockNum != nil &&
|
||||
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
|
||||
if hasBatch &&
|
||||
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
|
||||
int64(s.vars.Auction.SlotDeadline) {
|
||||
slot.ForgerCommitment = true
|
||||
}
|
||||
@@ -369,57 +388,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
||||
// BEGIN SANITY CHECK
|
||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
|
||||
}
|
||||
if !canForge {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
"differs from smart contract: %+v", slot))
|
||||
}
|
||||
// END SANITY CHECK
|
||||
}
|
||||
return &slot, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
|
||||
// updateNextSlot updates the slot with information of the next slot.
|
||||
// The information abouth which coordinator is allowed to forge is only updated
|
||||
// when we are Synced.
|
||||
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
|
||||
// We want the next block because the current one is already mined
|
||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
|
||||
slot := common.Slot{
|
||||
SlotNum: slotNum,
|
||||
ForgerCommitment: false,
|
||||
}
|
||||
slot.SlotNum = slotNum
|
||||
slot.ForgerCommitment = false
|
||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||
// If Synced, update the current coordinator
|
||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err := s.setSlotCoordinator(slot); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: Remove this SANITY CHECK once this code is tested enough
|
||||
// BEGIN SANITY CHECK
|
||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
|
||||
}
|
||||
if !canForge {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
"differs from smart contract: %+v", slot))
|
||||
}
|
||||
// END SANITY CHECK
|
||||
}
|
||||
return &slot, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
|
||||
current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
|
||||
if err != nil {
|
||||
// updateCurrentNextSlotIfSync updates the current and next slot. Information
|
||||
// about forger address that is allowed to forge is only updated if we are
|
||||
// Synced.
|
||||
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
|
||||
current := s.stats.Sync.Auction.CurrentSlot
|
||||
next := s.stats.Sync.Auction.NextSlot
|
||||
if err := s.updateCurrentSlot(¤t, reset, hasBatch); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
next, err := s.getNextSlot()
|
||||
if err != nil {
|
||||
if err := s.updateNextSlot(&next); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.stats.UpdateCurrentNextSlot(current, next)
|
||||
s.stats.UpdateCurrentNextSlot(¤t, &next)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -458,9 +481,9 @@ func (s *Synchronizer) init() error {
|
||||
"ethLastBlock", s.stats.Eth.LastBlock,
|
||||
)
|
||||
log.Infow("Sync init batch",
|
||||
"syncLastBatch", s.stats.Sync.LastBatch,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
|
||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
||||
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
|
||||
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -521,7 +544,7 @@ func (s *Synchronizer) Sync2(ctx context.Context,
|
||||
if tracerr.Unwrap(err) == ethereum.NotFound {
|
||||
return nil, nil, nil
|
||||
} else if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
|
||||
}
|
||||
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
|
||||
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
|
||||
@@ -627,29 +650,29 @@ func (s *Synchronizer) Sync2(ctx context.Context,
|
||||
}
|
||||
}
|
||||
s.stats.UpdateSync(ethBlock,
|
||||
&rollupData.Batches[batchesLen-1].Batch.BatchNum,
|
||||
&rollupData.Batches[batchesLen-1].Batch,
|
||||
lastL1BatchBlock, lastForgeL1TxsNum)
|
||||
}
|
||||
var firstBatchBlockNum *int64
|
||||
hasBatch := false
|
||||
if len(rollupData.Batches) > 0 {
|
||||
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
|
||||
hasBatch = true
|
||||
}
|
||||
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
|
||||
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
for _, batchData := range rollupData.Batches {
|
||||
log.Debugw("Synced batch",
|
||||
"syncLastBatch", batchData.Batch.BatchNum,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
||||
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||
)
|
||||
}
|
||||
log.Debugw("Synced block",
|
||||
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
|
||||
"syncBlocksPerc", s.stats.blocksPerc(),
|
||||
"ethLastBlockNum", s.stats.Eth.LastBlock.Num,
|
||||
)
|
||||
for _, batchData := range rollupData.Batches {
|
||||
log.Debugw("Synced batch",
|
||||
"syncLastBatch", batchData.Batch.BatchNum,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
|
||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
||||
)
|
||||
}
|
||||
|
||||
return blockData, nil, nil
|
||||
}
|
||||
@@ -700,15 +723,15 @@ func getInitialVariables(ethClient eth.ClientInterface,
|
||||
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
|
||||
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
|
||||
}
|
||||
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
|
||||
}
|
||||
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
|
||||
}
|
||||
rollupVars := rollupInit.RollupVariables()
|
||||
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
|
||||
@@ -753,15 +776,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
||||
s.vars.WDelayer = *wDelayer
|
||||
}
|
||||
|
||||
batchNum, err := s.historyDB.GetLastBatchNum()
|
||||
batch, err := s.historyDB.GetLastBatch()
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
||||
}
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
batchNum = 0
|
||||
batch = &common.Batch{}
|
||||
}
|
||||
|
||||
err = s.stateDB.Reset(batchNum)
|
||||
err = s.stateDB.Reset(batch.BatchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
||||
}
|
||||
@@ -783,9 +806,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
||||
lastForgeL1TxsNum = &n
|
||||
}
|
||||
|
||||
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||
|
||||
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
|
||||
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
@@ -801,8 +824,10 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
// Get rollup events in the block, and make sure the block hash matches
|
||||
// the expected one.
|
||||
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, ðBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err != nil && err.Error() == errStrUnknownBlock {
|
||||
return nil, tracerr.Wrap(ErrUnknownBlock)
|
||||
} else if err != nil {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
|
||||
}
|
||||
// No events in this block
|
||||
if rollupEvents == nil {
|
||||
@@ -919,9 +944,15 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if s.stateDB.CurrentBatch() != batchNum {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)",
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
|
||||
"evtForgeBatch.BatchNum = (%v)",
|
||||
s.stateDB.CurrentBatch(), batchNum))
|
||||
}
|
||||
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
|
||||
"forgeBatchArgs.NewStRoot (%v)",
|
||||
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
|
||||
}
|
||||
|
||||
// Transform processed PoolL2 txs to L2 and store in BatchData
|
||||
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
||||
@@ -962,6 +993,19 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
}
|
||||
batchData.CreatedAccounts = processTxsOut.CreatedAccounts
|
||||
|
||||
batchData.UpdatedAccounts = make([]common.AccountUpdate, 0,
|
||||
len(processTxsOut.UpdatedAccounts))
|
||||
for _, acc := range processTxsOut.UpdatedAccounts {
|
||||
batchData.UpdatedAccounts = append(batchData.UpdatedAccounts,
|
||||
common.AccountUpdate{
|
||||
EthBlockNum: blockNum,
|
||||
BatchNum: batchNum,
|
||||
Idx: acc.Idx,
|
||||
Nonce: acc.Nonce,
|
||||
Balance: acc.Balance,
|
||||
})
|
||||
}
|
||||
|
||||
slotNum := int64(0)
|
||||
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
|
||||
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
|
||||
@@ -1105,8 +1149,10 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
|
||||
|
||||
// Get auction events in the block
|
||||
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, ðBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err != nil && err.Error() == errStrUnknownBlock {
|
||||
return nil, tracerr.Wrap(ErrUnknownBlock)
|
||||
} else if err != nil {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
|
||||
}
|
||||
// No events in this block
|
||||
if auctionEvents == nil {
|
||||
@@ -1202,8 +1248,10 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
|
||||
|
||||
// Get wDelayer events in the block
|
||||
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, ðBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err != nil && err.Error() == errStrUnknownBlock {
|
||||
return nil, tracerr.Wrap(ErrUnknownBlock)
|
||||
} else if err != nil {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
|
||||
}
|
||||
// No events in this block
|
||||
if wDelayerEvents == nil {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/eth"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/test"
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
"github.com/jinzhu/copier"
|
||||
@@ -172,6 +171,8 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
|
||||
*exit = syncBatch.ExitTree[j]
|
||||
}
|
||||
assert.Equal(t, batch.Batch, syncBatch.Batch)
|
||||
// Ignore updated accounts
|
||||
syncBatch.UpdatedAccounts = nil
|
||||
assert.Equal(t, batch, syncBatch)
|
||||
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
|
||||
|
||||
@@ -321,6 +322,14 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
|
||||
return stateDB, historyDB
|
||||
}
|
||||
|
||||
func newBigInt(s string) *big.Int {
|
||||
v, ok := new(big.Int).SetString(s, 10)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("Can't set big.Int from %s", s))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func TestSyncGeneral(t *testing.T) {
|
||||
//
|
||||
// Setup
|
||||
@@ -339,7 +348,6 @@ func TestSyncGeneral(t *testing.T) {
|
||||
s, err := NewSynchronizer(client, historyDB, stateDB, Config{
|
||||
StatsRefreshPeriod: 0 * time.Second,
|
||||
})
|
||||
log.Error(err)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -434,12 +442,22 @@ func TestSyncGeneral(t *testing.T) {
|
||||
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
|
||||
// Set StateRoots for batches manually (til doesn't set it)
|
||||
blocks[i].Rollup.Batches[0].Batch.StateRoot =
|
||||
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
|
||||
blocks[i].Rollup.Batches[1].Batch.StateRoot =
|
||||
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
|
||||
// blocks 1 (blockNum=3)
|
||||
i = 1
|
||||
require.Equal(t, 3, int(blocks[i].Block.Num))
|
||||
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
||||
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
|
||||
// Set StateRoots for batches manually (til doesn't set it)
|
||||
blocks[i].Rollup.Batches[0].Batch.StateRoot =
|
||||
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
|
||||
blocks[i].Rollup.Batches[1].Batch.StateRoot =
|
||||
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
|
||||
|
||||
// Generate extra required data
|
||||
ethAddTokens(blocks, client)
|
||||
@@ -614,6 +632,12 @@ func TestSyncGeneral(t *testing.T) {
|
||||
blocks, err = tc.GenerateBlocks(set2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set StateRoots for batches manually (til doesn't set it)
|
||||
blocks[0].Rollup.Batches[0].Batch.StateRoot =
|
||||
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
|
||||
blocks[0].Rollup.Batches[1].Batch.StateRoot =
|
||||
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
client.CtlRollback()
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ const longWaitDuration = 999 * time.Hour
|
||||
// const provingDuration = 2 * time.Second
|
||||
|
||||
func (s *Mock) runProver(ctx context.Context) {
|
||||
waitDuration := longWaitDuration
|
||||
waitCh := time.After(longWaitDuration)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -153,21 +153,21 @@ func (s *Mock) runProver(ctx context.Context) {
|
||||
case msg := <-s.msgCh:
|
||||
switch msg.value {
|
||||
case "cancel":
|
||||
waitDuration = longWaitDuration
|
||||
waitCh = time.After(longWaitDuration)
|
||||
s.Lock()
|
||||
if !s.status.IsReady() {
|
||||
s.status = prover.StatusCodeAborted
|
||||
}
|
||||
s.Unlock()
|
||||
case "prove":
|
||||
waitDuration = s.provingDuration
|
||||
waitCh = time.After(s.provingDuration)
|
||||
s.Lock()
|
||||
s.status = prover.StatusCodeBusy
|
||||
s.Unlock()
|
||||
}
|
||||
msg.ackCh <- true
|
||||
case <-time.After(waitDuration):
|
||||
waitDuration = longWaitDuration
|
||||
case <-waitCh:
|
||||
waitCh = time.After(longWaitDuration)
|
||||
s.Lock()
|
||||
if s.status != prover.StatusCodeBusy {
|
||||
s.Unlock()
|
||||
|
||||
@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpSyncDB")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -27,16 +27,21 @@ type TxProcessor struct {
|
||||
// AccumulatedFees contains the accumulated fees for each token (Coord
|
||||
// Idx) in the processed batch
|
||||
AccumulatedFees map[common.Idx]*big.Int
|
||||
// updatedAccounts stores the last version of the account when it has
|
||||
// been created/updated by any of the processed transactions.
|
||||
updatedAccounts map[common.Idx]*common.Account
|
||||
config Config
|
||||
}
|
||||
|
||||
// Config contains the TxProcessor configuration parameters
|
||||
type Config struct {
|
||||
NLevels uint32
|
||||
NLevels uint32
|
||||
// MaxFeeTx is the maximum number of coordinator accounts that can receive fees
|
||||
MaxFeeTx uint32
|
||||
MaxTx uint32
|
||||
MaxL1Tx uint32
|
||||
ChainID uint16
|
||||
// ChainID of the blockchain
|
||||
ChainID uint16
|
||||
}
|
||||
|
||||
type processedExit struct {
|
||||
@@ -53,6 +58,9 @@ type ProcessTxOutput struct {
|
||||
CreatedAccounts []common.Account
|
||||
CoordinatorIdxsMap map[common.TokenID]common.Idx
|
||||
CollectedFees map[common.TokenID]*big.Int
|
||||
// UpdatedAccounts returns the current state of each account
|
||||
// created/updated by any of the processed transactions.
|
||||
UpdatedAccounts map[common.Idx]*common.Account
|
||||
}
|
||||
|
||||
func newErrorNotEnoughBalance(tx common.Tx) error {
|
||||
@@ -125,6 +133,10 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx))
|
||||
}
|
||||
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
tp.updatedAccounts = make(map[common.Idx]*common.Account)
|
||||
}
|
||||
|
||||
exits := make([]processedExit, nTx)
|
||||
|
||||
if tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
@@ -196,7 +208,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
}
|
||||
}
|
||||
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
if exitIdx != nil && exitTree != nil {
|
||||
if exitIdx != nil && exitTree != nil && exitAccount != nil {
|
||||
exits[tp.i] = processedExit{
|
||||
exit: true,
|
||||
newExit: newExit,
|
||||
@@ -380,7 +392,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr)
|
||||
}
|
||||
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
|
||||
pFee, err := tp.s.UpdateAccount(idx, accCoord)
|
||||
pFee, err := tp.updateAccount(idx, accCoord)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
@@ -405,39 +417,39 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// once all txs processed (exitTree root frozen), for each Exit,
|
||||
// generate common.ExitInfo data
|
||||
var exitInfos []common.ExitInfo
|
||||
exitInfosByIdx := make(map[common.Idx]*common.ExitInfo)
|
||||
for i := 0; i < nTx; i++ {
|
||||
if !exits[i].exit {
|
||||
continue
|
||||
}
|
||||
exitIdx := exits[i].idx
|
||||
exitAccount := exits[i].acc
|
||||
|
||||
// 0. generate MerkleProof
|
||||
p, err := exitTree.GenerateSCVerifierProof(exitIdx.BigInt(), nil)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// 1. generate common.ExitInfo
|
||||
ei := common.ExitInfo{
|
||||
AccountIdx: exitIdx,
|
||||
MerkleProof: p,
|
||||
Balance: exitAccount.Balance,
|
||||
}
|
||||
if prevExit, ok := exitInfosByIdx[exitIdx]; !ok {
|
||||
exitInfos = append(exitInfos, ei)
|
||||
exitInfosByIdx[exitIdx] = &exitInfos[len(exitInfos)-1]
|
||||
} else {
|
||||
*prevExit = ei
|
||||
}
|
||||
}
|
||||
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
// retuTypeexitInfos, createdAccounts and collectedFees, so Synchronizer will
|
||||
// once all txs processed (exitTree root frozen), for each Exit,
|
||||
// generate common.ExitInfo data
|
||||
var exitInfos []common.ExitInfo
|
||||
exitInfosByIdx := make(map[common.Idx]*common.ExitInfo)
|
||||
for i := 0; i < nTx; i++ {
|
||||
if !exits[i].exit {
|
||||
continue
|
||||
}
|
||||
exitIdx := exits[i].idx
|
||||
exitAccount := exits[i].acc
|
||||
|
||||
// 0. generate MerkleProof
|
||||
p, err := exitTree.GenerateSCVerifierProof(exitIdx.BigInt(), nil)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// 1. generate common.ExitInfo
|
||||
ei := common.ExitInfo{
|
||||
AccountIdx: exitIdx,
|
||||
MerkleProof: p,
|
||||
Balance: exitAccount.Balance,
|
||||
}
|
||||
if prevExit, ok := exitInfosByIdx[exitIdx]; !ok {
|
||||
exitInfos = append(exitInfos, ei)
|
||||
exitInfosByIdx[exitIdx] = &exitInfos[len(exitInfos)-1]
|
||||
} else {
|
||||
*prevExit = ei
|
||||
}
|
||||
}
|
||||
|
||||
// retun exitInfos, createdAccounts and collectedFees, so Synchronizer will
|
||||
// be able to store it into HistoryDB for the concrete BatchNum
|
||||
return &ProcessTxOutput{
|
||||
ZKInputs: nil,
|
||||
@@ -445,6 +457,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
CreatedAccounts: createdAccounts,
|
||||
CoordinatorIdxsMap: coordIdxsMap,
|
||||
CollectedFees: collectedFees,
|
||||
UpdatedAccounts: tp.updatedAccounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -739,7 +752,7 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
|
||||
EthAddr: tx.FromEthAddr,
|
||||
}
|
||||
|
||||
p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), account)
|
||||
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), account)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -774,6 +787,28 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
|
||||
return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1)
|
||||
}
|
||||
|
||||
// createAccount is a wrapper over the StateDB.CreateAccount method that also
|
||||
// stores the created account in the updatedAccounts map in case the StateDB is
|
||||
// of TypeSynchronizer
|
||||
func (tp *TxProcessor) createAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
account.Idx = idx
|
||||
tp.updatedAccounts[idx] = account
|
||||
}
|
||||
return tp.s.CreateAccount(idx, account)
|
||||
}
|
||||
|
||||
// updateAccount is a wrapper over the StateDB.UpdateAccount method that also
|
||||
// stores the updated account in the updatedAccounts map in case the StateDB is
|
||||
// of TypeSynchronizer
|
||||
func (tp *TxProcessor) updateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
account.Idx = idx
|
||||
tp.updatedAccounts[idx] = account
|
||||
}
|
||||
return tp.s.UpdateAccount(idx, account)
|
||||
}
|
||||
|
||||
// applyDeposit updates the balance in the account of the depositer, if
|
||||
// andTransfer parameter is set to true, the method will also apply the
|
||||
// Transfer of the L1Tx/DepositTransfer
|
||||
@@ -804,7 +839,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
||||
}
|
||||
|
||||
// update sender account in localStateDB
|
||||
p, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
|
||||
p, err := tp.updateAccount(tx.FromIdx, accSender)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -841,7 +876,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
||||
|
||||
// update receiver account in localStateDB
|
||||
p, err := tp.s.UpdateAccount(tx.ToIdx, accReceiver)
|
||||
p, err := tp.updateAccount(tx.ToIdx, accReceiver)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -924,7 +959,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
}
|
||||
|
||||
// update sender account in localStateDB
|
||||
pSender, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
|
||||
pSender, err := tp.updateAccount(tx.FromIdx, accSender)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return tracerr.Wrap(err)
|
||||
@@ -963,7 +998,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
|
||||
|
||||
// update receiver account in localStateDB
|
||||
pReceiver, err := tp.s.UpdateAccount(auxToIdx, accReceiver)
|
||||
pReceiver, err := tp.updateAccount(auxToIdx, accReceiver)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -1006,7 +1041,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
|
||||
}
|
||||
|
||||
// create Account of the Sender
|
||||
p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
|
||||
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -1054,7 +1089,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
|
||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
||||
|
||||
// update receiver account in localStateDB
|
||||
p, err = tp.s.UpdateAccount(tx.ToIdx, accReceiver)
|
||||
p, err = tp.updateAccount(tx.ToIdx, accReceiver)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -1128,7 +1163,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
}
|
||||
}
|
||||
|
||||
p, err := tp.s.UpdateAccount(tx.FromIdx, acc)
|
||||
p, err := tp.updateAccount(tx.FromIdx, acc)
|
||||
if err != nil {
|
||||
return nil, false, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -1139,6 +1174,11 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
if exitTree == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
if tx.Amount.Cmp(big.NewInt(0)) == 0 { // Amount == 0
|
||||
// if the Exit Amount==0, the Exit is not added to the ExitTree
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx)
|
||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||
// 1a. if idx does not exist in exitTree:
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
@@ -911,3 +912,198 @@ func TestTwoExits(t *testing.T) {
|
||||
|
||||
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
|
||||
}
|
||||
|
||||
func TestExitOf0Amount(t *testing.T) {
|
||||
// Test to check that when doing an Exit with amount 0 the Exit Root
|
||||
// does not change (as there is no new Exit Leaf created)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID := uint16(1)
|
||||
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
CreateAccountDeposit(0) A: 100
|
||||
CreateAccountDeposit(0) B: 100
|
||||
|
||||
> batchL1 // batch1: freeze L1User{2}
|
||||
> batchL1 // batch2: forge L1User{2}
|
||||
|
||||
ForceExit(0) A: 10
|
||||
ForceExit(0) B: 0
|
||||
|
||||
> batchL1 // batch3: freeze L1User{2}
|
||||
> batchL1 // batch4: forge L1User{2}
|
||||
|
||||
ForceExit(0) A: 10
|
||||
|
||||
> batchL1 // batch5: freeze L1User{1}
|
||||
> batchL1 // batch6: forge L1User{1}
|
||||
|
||||
ForceExit(0) A: 0
|
||||
> batchL1 // batch7: freeze L1User{1}
|
||||
> batchL1 // batch8: forge L1User{1}
|
||||
> block
|
||||
`
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
require.NoError(t, err)
|
||||
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
|
||||
require.NoError(t, err)
|
||||
err = tc.FillBlocksForgedL1UserTxs(blocks)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sanity check
|
||||
require.Equal(t, 2, len(blocks[0].Rollup.Batches[1].L1UserTxs))
|
||||
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
|
||||
require.Equal(t, big.NewInt(10), blocks[0].Rollup.Batches[3].L1UserTxs[0].Amount)
|
||||
require.Equal(t, big.NewInt(0), blocks[0].Rollup.Batches[3].L1UserTxs[1].Amount)
|
||||
|
||||
config := Config{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 16,
|
||||
ChainID: chainID,
|
||||
}
|
||||
tp := NewTxProcessor(sdb, config)
|
||||
|
||||
// For this test are only processed the batches with transactions:
|
||||
// - Batch2, equivalent to Batches[1]
|
||||
// - Batch4, equivalent to Batches[3]
|
||||
// - Batch6, equivalent to Batches[5]
|
||||
// - Batch8, equivalent to Batches[7]
|
||||
|
||||
// process Batch2:
|
||||
_, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[1].L1UserTxs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
// process Batch4:
|
||||
ptOut, err := tp.ProcessTxs(nil, blocks[0].Rollup.Batches[3].L1UserTxs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||
exitRootBatch4 := ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String()
|
||||
|
||||
// process Batch6:
|
||||
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[5].L1UserTxs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||
// Expect that the ExitRoot for the Batch6 will be equal than for the
|
||||
// Batch4, as the Batch4 & Batch6 have the same tx with Exit Amount=10,
|
||||
// and Batch4 has a 2nd tx with Exit Amount=0.
|
||||
assert.Equal(t, exitRootBatch4, ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||
|
||||
// For the Batch8, as there is only 1 exit with Amount=0, the ExitRoot
|
||||
// should be 0.
|
||||
// process Batch8:
|
||||
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[7].L1UserTxs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
|
||||
}
|
||||
|
||||
func TestUpdatedAccounts(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
set := `
|
||||
Type: Blockchain
|
||||
AddToken(1)
|
||||
CreateAccountCoordinator(0) Coord // 256
|
||||
CreateAccountCoordinator(1) Coord // 257
|
||||
> batch // 1
|
||||
CreateAccountDeposit(0) A: 50 // 258
|
||||
CreateAccountDeposit(0) B: 60 // 259
|
||||
CreateAccountDeposit(1) A: 70 // 260
|
||||
CreateAccountDeposit(1) B: 80 // 261
|
||||
> batchL1 // 2
|
||||
> batchL1 // 3
|
||||
Transfer(0) A-B: 5 (126)
|
||||
> batch // 4
|
||||
Exit(1) B: 5 (126)
|
||||
> batch // 5
|
||||
> block
|
||||
`
|
||||
|
||||
chainID := uint16(0)
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
require.NoError(t, err)
|
||||
tilCfgExtra := til.ConfigExtra{
|
||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||
CoordUser: "Coord",
|
||||
}
|
||||
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
|
||||
require.NoError(t, err)
|
||||
tc.FillBlocksL1UserTxsBatchNum(blocks)
|
||||
err = tc.FillBlocksForgedL1UserTxs(blocks)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 5, len(blocks[0].Rollup.Batches))
|
||||
|
||||
config := Config{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 16,
|
||||
ChainID: chainID,
|
||||
}
|
||||
tp := NewTxProcessor(sdb, config)
|
||||
|
||||
sortedKeys := func(m map[common.Idx]*common.Account) []int {
|
||||
keys := make([]int, 0)
|
||||
for k := range m {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
for _, batch := range blocks[0].Rollup.Batches {
|
||||
l2Txs := common.L2TxsToPoolL2Txs(batch.L2Txs)
|
||||
ptOut, err := tp.ProcessTxs(batch.Batch.FeeIdxsCoordinator, batch.L1UserTxs,
|
||||
batch.L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
switch batch.Batch.BatchNum {
|
||||
case 1:
|
||||
assert.Equal(t, 2, len(ptOut.UpdatedAccounts))
|
||||
assert.Equal(t, []int{256, 257}, sortedKeys(ptOut.UpdatedAccounts))
|
||||
case 2:
|
||||
assert.Equal(t, 0, len(ptOut.UpdatedAccounts))
|
||||
assert.Equal(t, []int{}, sortedKeys(ptOut.UpdatedAccounts))
|
||||
case 3:
|
||||
assert.Equal(t, 4, len(ptOut.UpdatedAccounts))
|
||||
assert.Equal(t, []int{258, 259, 260, 261}, sortedKeys(ptOut.UpdatedAccounts))
|
||||
case 4:
|
||||
assert.Equal(t, 2+1, len(ptOut.UpdatedAccounts))
|
||||
assert.Equal(t, []int{256, 258, 259}, sortedKeys(ptOut.UpdatedAccounts))
|
||||
case 5:
|
||||
assert.Equal(t, 1+1, len(ptOut.UpdatedAccounts))
|
||||
assert.Equal(t, []int{257, 261}, sortedKeys(ptOut.UpdatedAccounts))
|
||||
}
|
||||
for idx, updAcc := range ptOut.UpdatedAccounts {
|
||||
acc, err := sdb.GetAccount(idx)
|
||||
require.NoError(t, err)
|
||||
// If acc.Balance is 0, set it to 0 with big.NewInt so
|
||||
// that the comparison succeeds. Without this, the
|
||||
// comparison will not succeed because acc.Balance is
|
||||
// set from a slice, and thus the internal big.Int
|
||||
// buffer is not nil (big.Int.abs)
|
||||
if acc.Balance.BitLen() == 0 {
|
||||
acc.Balance = big.NewInt(0)
|
||||
}
|
||||
assert.Equal(t, acc, updAcc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,19 +18,6 @@ import (
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
)
|
||||
|
||||
// txs implements the interface Sort for an array of Tx
|
||||
type txs []common.PoolL2Tx
|
||||
|
||||
func (t txs) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
func (t txs) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
func (t txs) Less(i, j int) bool {
|
||||
return t[i].AbsoluteFee > t[j].AbsoluteFee
|
||||
}
|
||||
|
||||
// CoordAccount contains the data of the Coordinator account, that will be used
|
||||
// to create new transactions of CreateAccountDeposit type to add new TokenID
|
||||
// accounts for the Coordinator to receive the fees.
|
||||
@@ -88,12 +75,8 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
|
||||
|
||||
// Reset tells the TxSelector to get it's internal AccountsDB
|
||||
// from the required `batchNum`
|
||||
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
|
||||
err := txsel.localAccountsDB.Reset(batchNum, true)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
|
||||
}
|
||||
|
||||
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
||||
@@ -194,14 +177,16 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
|
||||
}
|
||||
|
||||
// discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch
|
||||
var discardedL2Txs []common.PoolL2Tx
|
||||
|
||||
var l1CoordinatorTxs []common.L1Tx
|
||||
positionL1 := len(l1UserTxs)
|
||||
var accAuths [][]byte
|
||||
|
||||
// sort l2TxsRaw (cropping at MaxTx at this point)
|
||||
l2Txs0 := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
|
||||
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
|
||||
for i := range discardedL2Txs {
|
||||
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee"
|
||||
}
|
||||
|
||||
noncesMap := make(map[common.Idx]common.Nonce)
|
||||
var l2Txs []common.PoolL2Tx
|
||||
@@ -593,21 +578,33 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
|
||||
}
|
||||
|
||||
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
|
||||
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx {
|
||||
sort.Sort(txs(l2Txs))
|
||||
if len(l2Txs) < int(max) {
|
||||
return l2Txs
|
||||
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx,
|
||||
[]common.PoolL2Tx) {
|
||||
// First sort by nonce so that txs from the same account are sorted so
|
||||
// that they could be applied in succession.
|
||||
sort.Slice(l2Txs, func(i, j int) bool {
|
||||
return l2Txs[i].Nonce < l2Txs[j].Nonce
|
||||
})
|
||||
// Sort by absolute fee with SliceStable, so that txs with same
|
||||
// AbsoluteFee are not rearranged and nonce order is kept in such case
|
||||
sort.SliceStable(l2Txs, func(i, j int) bool {
|
||||
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
|
||||
})
|
||||
|
||||
discardedL2Txs := []common.PoolL2Tx{}
|
||||
if len(l2Txs) > int(max) {
|
||||
discardedL2Txs = l2Txs[max:]
|
||||
l2Txs = l2Txs[:max]
|
||||
}
|
||||
l2Txs = l2Txs[:max]
|
||||
|
||||
// sort l2Txs by Nonce. This can be done in many different ways, what
|
||||
// is needed is to output the l2Txs where the Nonce of l2Txs for each
|
||||
// Account is sorted, but the l2Txs can not be grouped by sender Account
|
||||
// neither by Fee. This is because later on the Nonces will need to be
|
||||
// sequential for the zkproof generation.
|
||||
sort.SliceStable(l2Txs, func(i, j int) bool {
|
||||
sort.Slice(l2Txs, func(i, j int) bool {
|
||||
return l2Txs[i].Nonce < l2Txs[j].Nonce
|
||||
})
|
||||
|
||||
return l2Txs
|
||||
return l2Txs, discardedL2Txs
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
@@ -538,7 +538,8 @@ func TestTransferToBjj(t *testing.T) {
|
||||
MaxL1UserTxs: 5,
|
||||
TxProcessorConfig: tpc,
|
||||
}
|
||||
// batch1 to create some accounts with positive balance
|
||||
// batch1 to freeze L1UserTxs that will create some accounts with
|
||||
// positive balance
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
@@ -628,3 +629,83 @@ func TestTransferToBjj(t *testing.T) {
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTransferManyFromSameAccount(t *testing.T) {
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
CreateAccountDeposit(0) Coord: 0
|
||||
CreateAccountDeposit(0) A: 1000
|
||||
CreateAccountDeposit(0) B: 1000
|
||||
|
||||
> batchL1 // freeze L1User{1}
|
||||
> batchL1 // forge L1User{1}
|
||||
> block
|
||||
`
|
||||
|
||||
chainID := uint16(0)
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||
|
||||
// restart nonces of TilContext, as will be set by generating directly
|
||||
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||
tc.RestartNonces()
|
||||
|
||||
tpc := txprocessor.Config{
|
||||
NLevels: 16,
|
||||
MaxFeeTx: 10,
|
||||
MaxTx: 10,
|
||||
MaxL1Tx: 10,
|
||||
ChainID: chainID,
|
||||
}
|
||||
selectionConfig := &SelectionConfig{
|
||||
MaxL1UserTxs: 5,
|
||||
TxProcessorConfig: tpc,
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 8 transfers from the same account
|
||||
|
||||
batchPoolL2 := `
|
||||
Type: PoolL2
|
||||
PoolTransfer(0) A-B: 10 (126) // 1
|
||||
PoolTransfer(0) A-B: 10 (126) // 2
|
||||
PoolTransfer(0) A-B: 10 (126) // 3
|
||||
PoolTransfer(0) A-B: 10 (126) // 4
|
||||
PoolTransfer(0) A-B: 10 (126) // 5
|
||||
PoolTransfer(0) A-B: 10 (126) // 6
|
||||
PoolTransfer(0) A-B: 10 (126) // 7
|
||||
PoolTransfer(0) A-B: 10 (126) // 8
|
||||
PoolTransfer(0) A-B: 10 (126) // 9
|
||||
PoolTransfer(0) A-B: 10 (126) // 10
|
||||
PoolTransfer(0) A-B: 10 (126) // 11
|
||||
`
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 11, len(poolL2Txs))
|
||||
|
||||
// reorder poolL2Txs so that nonces are not sorted
|
||||
poolL2Txs[0], poolL2Txs[7] = poolL2Txs[7], poolL2Txs[0]
|
||||
poolL2Txs[1], poolL2Txs[10] = poolL2Txs[10], poolL2Txs[1]
|
||||
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 7, len(oL2Txs))
|
||||
assert.Equal(t, 1, len(discardedL2Txs))
|
||||
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user