Compare commits

..

2 Commits

Author SHA1 Message Date
arnaucube
22802d6273 Migrate all packages to use Float40
Migrate all packages to use Float40 & Add more test vectors at common
2021-02-11 10:16:13 +01:00
arnaucube
453ecc0504 Add Float40 methods
This commit adds Float40 related methods, and keeps the Float16 version
which will be deleted in a near future once the Float40 migration is
ready.
2021-02-10 14:49:31 +01:00
59 changed files with 1824 additions and 3281 deletions

View File

@@ -4,10 +4,6 @@ Go implementation of the Hermez node.
## Developing ## Developing
### Go version
The `hermez-node` has been tested with go version 1.14
### Unit testing ### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can Running the unit tests requires a connection to a PostgreSQL database. You can

View File

@@ -221,7 +221,7 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants) // Config (smart contract constants)
chainID := uint16(0) chainID := uint16(0)
@@ -585,7 +585,7 @@ func TestTimeout(t *testing.T) {
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO) hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
require.NoError(t, err) require.NoError(t, err)
// L2DB // L2DB
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 0.0, 24*time.Hour, apiConnConTO) l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
// API // API
apiGinTO := gin.Default() apiGinTO := gin.Default()

View File

@@ -10,7 +10,6 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/russross/meddler"
) )
const ( const (
@@ -47,33 +46,24 @@ var (
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error() errMsg := tracerr.Unwrap(err).Error()
retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
} else {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg,
})
}
}
if errMsg == errCtxTimeout { if errMsg == errCtxTimeout {
c.JSON(http.StatusServiceUnavailable, errorMsg{ c.JSON(http.StatusServiceUnavailable, errorMsg{
Message: errSQLTimeout, Message: errSQLTimeout,
}) })
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok { } else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
retDupKey(sqlErr.Code) // https://www.postgresql.org/docs/current/errcodes-appendix.html
} else if sqlErr, ok := meddler.DriverErr(tracerr.Unwrap(err)); ok { if sqlErr.Code == "23505" {
retDupKey(sqlErr.(*pq.Error).Code) c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
}
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
c.JSON(http.StatusNotFound, errorMsg{ c.JSON(http.StatusNotFound, errorMsg{
Message: errMsg, Message: err.Error(),
}) })
} else { } else {
c.JSON(http.StatusInternalServerError, errorMsg{ c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg, Message: err.Error(),
}) })
} }
} }

View File

@@ -2,7 +2,6 @@ package api
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"net/http" "net/http"
@@ -28,7 +27,6 @@ func (a *API) postPoolTx(c *gin.Context) {
retBadReq(err, c) retBadReq(err, c)
return return
} }
writeTx.ClientIP = c.ClientIP()
// Insert to DB // Insert to DB
if err := a.l2.AddTxAPI(writeTx); err != nil { if err := a.l2.AddTxAPI(writeTx); err != nil {
retSQLErr(err, c) retSQLErr(err, c)
@@ -181,11 +179,6 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Validate TokenID
if poolTx.TokenID != account.TokenID {
return tracerr.Wrap(fmt.Errorf("tx.TokenID (%v) != account.TokenID (%v)",
poolTx.TokenID, account.TokenID))
}
// Check signature // Check signature
if !poolTx.VerifySignature(a.chainID, account.BJJ) { if !poolTx.VerifySignature(a.chainID, account.BJJ) {
return tracerr.Wrap(errors.New("wrong signature")) return tracerr.Wrap(errors.New("wrong signature"))

View File

@@ -10,7 +10,6 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// testPoolTxReceive is a struct to be used to assert the response // testPoolTxReceive is a struct to be used to assert the response
@@ -171,9 +170,9 @@ func TestPoolTxs(t *testing.T) {
fetchedTxID := common.TxID{} fetchedTxID := common.TxID{}
for _, tx := range tc.poolTxsToSend { for _, tx := range tc.poolTxsToSend {
jsonTxBytes, err := json.Marshal(tx) jsonTxBytes, err := json.Marshal(tx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
require.NoError( assert.NoError(
t, doGoodReq( t, doGoodReq(
"POST", "POST",
endpoint, endpoint,
@@ -188,42 +187,42 @@ func TestPoolTxs(t *testing.T) {
badTx.Amount = "99950000000000000" badTx.Amount = "99950000000000000"
badTx.Fee = 255 badTx.Fee = 255
jsonTxBytes, err := json.Marshal(badTx) jsonTxBytes, err := json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong signature // Wrong signature
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
badTx.FromIdx = "hez:foo:1000" badTx.FromIdx = "hez:foo:1000"
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong to // Wrong to
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
badTx.ToEthAddr = &ethAddr badTx.ToEthAddr = &ethAddr
badTx.ToIdx = nil badTx.ToIdx = nil
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// Wrong rq // Wrong rq
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
rqFromIdx := "hez:foo:30" rqFromIdx := "hez:foo:30"
badTx.RqFromIdx = &rqFromIdx badTx.RqFromIdx = &rqFromIdx
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
require.NoError(t, err) assert.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
require.NoError(t, err) assert.NoError(t, err)
// GET // GET
endpoint += "/" endpoint += "/"
for _, tx := range tc.poolTxsToReceive { for _, tx := range tc.poolTxsToReceive {
fetchedTx := testPoolTxReceive{} fetchedTx := testPoolTxReceive{}
require.NoError( assert.NoError(
t, doGoodReq( t, doGoodReq(
"GET", "GET",
endpoint+tx.TxID.String(), endpoint+tx.TxID.String(),
@@ -234,10 +233,10 @@ func TestPoolTxs(t *testing.T) {
} }
// 400, due invalid TxID // 400, due invalid TxID
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400) err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
require.NoError(t, err) assert.NoError(t, err)
// 404, due inexistent TxID in DB // 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404) err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
require.NoError(t, err) assert.NoError(t, err)
} }
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) { func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {

View File

@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise // copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
// it can just roll back the internal copy. // it can just roll back the internal copy.
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer)) return bb.localStateDB.Reset(batchNum, fromSynchronizer)
} }
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch

1
cli/node/.gitignore vendored
View File

@@ -1,3 +1,2 @@
cfg.example.secret.toml cfg.example.secret.toml
cfg.toml cfg.toml
node

View File

@@ -2,10 +2,6 @@
This is the main cli for the node This is the main cli for the node
## Go version
The `hermez-node` has been tested with go version 1.14
## Usage ## Usage
``` ```
@@ -69,64 +65,29 @@ when running the coordinator in sync mode
- The node requires a PostgreSQL database. The parameters of the server and - The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section. database must be set in the `PostgreSQL` section.
## Building
*All commands assume you are at the `cli/node` directory.*
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
```
The executable is `node`.
## Usage Examples ## Usage Examples
The following commands assume you have built the node previously. You can also
run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer: Run the node in mode synchronizer:
``` ```
./node --mode sync --cfg cfg.buidler.toml run go run . --mode sync --cfg cfg.buidler.toml run
``` ```
Run the node in mode coordinator: Run the node in mode coordinator:
``` ```
./node --mode coord --cfg cfg.buidler.toml run go run . --mode coord --cfg cfg.buidler.toml run
``` ```
Import an ethereum private key into the keystore: Import an ethereum private key into the keystore:
``` ```
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
``` ```
Generate a new BabyJubJub key pair: Generate a new BabyJubJub key pair:
``` ```
./node --mode coord --cfg cfg.buidler.toml genbjj go run . --mode coord --cfg cfg.buidler.toml genbjj
``` ```
Wipe the entier SQL database (this will destroy all synchronized and pool Wipe the entier SQL database (this will destroy all synchronized and pool data):
data):
``` ```
./node --mode coord --cfg cfg.buidler.toml wipesql go run . --mode coord --cfg cfg.buidler.toml wipesql
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
``` ```

View File

@@ -32,7 +32,6 @@ URL = "http://localhost:8545"
[Synchronizer] [Synchronizer]
SyncLoopInterval = "1s" SyncLoopInterval = "1s"
StatsRefreshPeriod = "1s" StatsRefreshPeriod = "1s"
StoreAccountUpdates = true
[SmartContracts] [SmartContracts]
Rollup = "0x8EEaea23686c319133a7cC110b840d1591d9AeE0" Rollup = "0x8EEaea23686c319133a7cC110b840d1591d9AeE0"
@@ -42,21 +41,15 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZName = "Hermez Network Token" TokenHEZName = "Hermez Network Token"
[Coordinator] [Coordinator]
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator # ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6 L1BatchTimeoutPerc = 0.999
StartSlotBlocksDelay = 2
ScheduleBatchBlocksAheadCheck = 3
SendBatchBlocksMarginCheck = 1
ProofServerPollInterval = "1s" ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms" ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s" SyncRetryInterval = "1s"
ForgeDelay = "10s"
ForgeNoTxsDelay = "0s"
PurgeByExtDelInterval = "1m"
[Coordinator.FeeAccount] [Coordinator.FeeAccount]
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E" Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
@@ -67,7 +60,6 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
[Coordinator.L2DB] [Coordinator.L2DB]
SafetyPeriod = 10 SafetyPeriod = 10
MaxTxs = 512 MaxTxs = 512
MinFeeUSD = 0.0
TTL = "24h" TTL = "24h"
PurgeBatchDelay = 10 PurgeBatchDelay = 10
InvalidateBatchDelay = 20 InvalidateBatchDelay = 20
@@ -88,13 +80,13 @@ MaxTx = 512
NLevels = 32 NLevels = 32
[Coordinator.EthClient] [Coordinator.EthClient]
ReceiptTimeout = "60s"
ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms" CheckLoopInterval = "500ms"
Attempts = 4 Attempts = 4
AttemptsDelay = "500ms" AttemptsDelay = "500ms"
TxResendTimeout = "2m" CallGasLimit = 300000
NoReuseNonce = false GasPriceDiv = 100
MaxGasPrice = "5000000000"
GasPriceIncPerc = 10
[Coordinator.EthClient.Keystore] [Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"

View File

@@ -11,8 +11,6 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/config" "github.com/hermeznetwork/hermez-node/config"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node" "github.com/hermeznetwork/hermez-node/node"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
@@ -25,7 +23,6 @@ const (
flagMode = "mode" flagMode = "mode"
flagSK = "privatekey" flagSK = "privatekey"
flagYes = "yes" flagYes = "yes"
flagBlock = "block"
modeSync = "sync" modeSync = "sync"
modeCoord = "coord" modeCoord = "coord"
) )
@@ -142,48 +139,6 @@ func cmdRun(c *cli.Context) error {
return nil return nil
} }
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
cfg := _cfg.node
blockNum := c.Int64(flagBlock)
log.Infof("Discarding all blocks up to block %v...", blockNum)
db, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.Port,
cfg.PostgreSQL.Host,
cfg.PostgreSQL.User,
cfg.PostgreSQL.Password,
cfg.PostgreSQL.Name,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
historyDB := historydb.NewHistoryDB(db, nil)
if err := historyDB.Reorg(blockNum); err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
}
batchNum, err := historyDB.GetLastBatchNum()
if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
l2DB := l2db.NewL2DB(
db,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)
if err := l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
}
return nil
}
// Config is the configuration of the hermez node execution // Config is the configuration of the hermez node execution
type Config struct { type Config struct {
mode node.Mode mode node.Mode
@@ -284,18 +239,6 @@ func main() {
Usage: "Run the hermez-node in the indicated mode", Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun, Action: cmdRun,
}, },
{
Name: "discard",
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: []cli.Flag{
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}},
},
} }
err := app.Run(os.Args) err := app.Run(os.Args)

View File

@@ -263,13 +263,3 @@ type IdxNonce struct {
Idx Idx `db:"idx"` Idx Idx `db:"idx"`
Nonce Nonce `db:"nonce"` Nonce Nonce `db:"nonce"`
} }
// AccountUpdate represents an account balance and/or nonce update after a
// processed batch
type AccountUpdate struct {
EthBlockNum int64 `meddler:"eth_block_num"`
BatchNum BatchNum `meddler:"batch_num"`
Idx Idx `meddler:"idx"`
Nonce Nonce `meddler:"nonce"`
Balance *big.Int `meddler:"balance,bigint"`
}

View File

@@ -17,11 +17,6 @@ const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollu
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem // EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
const EthMsgPrefix = "\x19Ethereum Signed Message:\n" const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for // AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary // account creations when necessary
type AccountCreationAuth struct { type AccountCreationAuth struct {

View File

@@ -27,24 +27,6 @@ type Batch struct {
TotalFeesUSD *float64 `meddler:"total_fees_usd"` TotalFeesUSD *float64 `meddler:"total_fees_usd"`
} }
// NewEmptyBatch creates a new empty batch
func NewEmptyBatch() *Batch {
return &Batch{
BatchNum: 0,
EthBlockNum: 0,
ForgerAddr: ethCommon.Address{},
CollectedFees: make(map[TokenID]*big.Int),
FeeIdxsCoordinator: make([]Idx, 0),
StateRoot: big.NewInt(0),
NumAccounts: 0,
LastIdx: 0,
ExitRoot: big.NewInt(0),
ForgeL1TxsNum: nil,
SlotNum: 0,
TotalFeesUSD: nil,
}
}
// BatchNum identifies a batch // BatchNum identifies a batch
type BatchNum int64 type BatchNum int64
@@ -77,7 +59,6 @@ type BatchData struct {
L1CoordinatorTxs []L1Tx L1CoordinatorTxs []L1Tx
L2Txs []L2Tx L2Txs []L2Tx
CreatedAccounts []Account CreatedAccounts []Account
UpdatedAccounts []AccountUpdate
ExitTree []ExitInfo ExitTree []ExitInfo
Batch Batch Batch Batch
} }

View File

@@ -33,8 +33,7 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum { if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot) return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// This result will be negative return -1
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// SlotBlocks returns the first and the last block numbers included in that slot // SlotBlocks returns the first and the last block numbers included in that slot

132
common/float16.go Normal file
View File

@@ -0,0 +1,132 @@
// Package common Float16 provides methods to work with Hermez custom half float
// precision, 16 bits, codification internally called Float16 has been adopted
// to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
var (
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
ErrRoundingLoss = errors.New("input value causes rounding loss")
)
// Float16 represents a float in a 16 bit format
type Float16 uint16
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
func (f16 Float16) Bytes() []byte {
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(f16))
return b[:]
}
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 {
// WARNING b[:2] for a b where len(b)<2 can break
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16
}
// BigInt converts the Float16 to a *big.Int integer
func (f16 *Float16) BigInt() *big.Int {
fl := int64(*f16)
m := big.NewInt(fl & 0x3FF)
e := big.NewInt(fl >> 11)
e5 := (fl >> 10) & 0x01
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
res := m.Mul(m, exp)
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
res.Add(res, exp.Div(exp, big.NewInt(2)))
}
return res
}
// floorFix2Float converts a fix to a float, always rounding down
func floorFix2Float(_f *big.Int) Float16 {
zero := big.NewInt(0)
ten := big.NewInt(10)
e := int64(0)
m := big.NewInt(0)
m.Set(_f)
if m.Cmp(zero) == 0 {
return 0
}
s := big.NewInt(0).Rsh(m, 10)
for s.Cmp(zero) != 0 {
m.Div(m, ten)
s.Rsh(m, 10)
e++
}
return Float16(m.Int64() | e<<11)
}
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
// case of loss during the encoding.
func NewFloat16(f *big.Int) (Float16, error) {
fl1 := floorFix2Float(f)
fi1 := fl1.BigInt()
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
m3 := (fl1 & 0x3FF) + 1
e3 := fl1 >> 11
if m3&0x400 == 0 {
m3 = 0x66
e3++
}
fl3 := m3 + e3<<11
fi3 := fl3.BigInt()
res := fl1
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
if d.Cmp(d2) == 1 {
res = fl2
d = d2
}
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
if d.Cmp(d3) == 1 {
res = fl3
}
// Do rounding check
if res.BigInt().Cmp(f) == 0 {
return res, nil
}
return res, tracerr.Wrap(ErrRoundingLoss)
}
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
// case of loss during the encoding.
func NewFloat16Floor(f *big.Int) Float16 {
fl1 := floorFix2Float(f)
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
if fi2.Cmp(f) < 1 {
return fl2
}
return fl1
}

132
common/float16_test.go Normal file
View File

@@ -0,0 +1,132 @@
package common
import (
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
)
func TestConversionsFloat16(t *testing.T) {
testVector := map[Float16]string{
0x307B: "123000000",
0x1DC6: "454500",
0xFFFF: "10235000000000000000000000000000000",
0x0000: "0",
0x0400: "0",
0x0001: "1",
0x0401: "1",
0x0800: "0",
0x0c00: "5",
0x0801: "10",
0x0c01: "15",
}
for test := range testVector {
fix := test.BigInt()
assert.Equal(t, fix.String(), testVector[test])
bi := big.NewInt(0)
bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi)
assert.NoError(t, err)
fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestFloorFix2FloatFloat16(t *testing.T) {
testVector := map[string]Float16{
"87999990000000000": 0x776f,
"87950000000000001": 0x776f,
"87950000000000000": 0x776f,
"87949999999999999": 0x736f,
}
for test := range testVector {
bi := big.NewInt(0)
bi.SetString(test, 10)
testFloat := NewFloat16Floor(bi)
assert.Equal(t, testFloat, testVector[test])
}
}
func TestConversionLossesFloat16(t *testing.T) {
a := big.NewInt(1000)
b, err := NewFloat16(a)
assert.NoError(t, err)
c := b.BigInt()
assert.Equal(t, c, a)
a = big.NewInt(1024)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32767)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32768)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(65536000)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
}
func BenchmarkFloat16(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Bad big int")
}
return bigInt
}
type pair struct {
Float16 Float16
BigInt *big.Int
}
testVector := []pair{
{0x307B, newBigInt("123000000")},
{0x1DC6, newBigInt("454500")},
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
{0x0000, newBigInt("0")},
{0x0400, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1")},
{0x0800, newBigInt("0")},
{0x0c00, newBigInt("5")},
{0x0801, newBigInt("10")},
{0x0c01, newBigInt("15")},
}
b.Run("floorFix2Float()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
}
})
b.Run("NewFloat16()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float16.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
testVector[i%len(testVector)].Float16.BigInt()
}
})
}

View File

@@ -6,6 +6,7 @@
package common package common
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"math/big" "math/big"
@@ -17,14 +18,11 @@ const (
// maxFloat40Value is the maximum value that the Float40 can have // maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1) // (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
) )
var ( var (
// ErrFloat40Overflow is used when a given Float40 overflows the // ErrFloat40Overflow is used when a given nonce overflows the maximum
// maximum capacity of the Float40 (2**40-1) // capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1") ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a // ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40 // *big.Int to Float40
@@ -87,14 +85,15 @@ func NewFloat40(f *big.Int) (Float40, error) {
zero := big.NewInt(0) zero := big.NewInt(0)
ten := big.NewInt(10) ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00) thres := big.NewInt(0x08_00_00_00_00)
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 { for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) &&
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
m = new(big.Int).Div(m, ten) m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1)) e = new(big.Int).Add(e, big.NewInt(1))
} }
if e.Int64() > 31 { if e.Int64() > 31 {
return 0, ErrFloat40E31 return 0, ErrFloat40E31
} }
if m.Cmp(thres) >= 0 { if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
return 0, ErrFloat40NotEnoughPrecission return 0, ErrFloat40NotEnoughPrecission
} }
r := new(big.Int).Add(m, r := new(big.Int).Add(m,

View File

@@ -15,7 +15,7 @@ import (
type L1Tx struct { type L1Tx struct {
// Stored in DB: mandatory fileds // Stored in DB: mandatory fileds
// TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of: // TxID (12 bytes) for L1Tx is:
// bytes: | 1 | 8 | 2 | 1 | // bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) | // values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type: // where type:
@@ -225,7 +225,7 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes) copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes)
} }
// fee = 0 (as is L1Tx) // fee = 0 (as is L1Tx)
return b[:], nil return b[:], nil
@@ -237,7 +237,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen] fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2] toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength] amountBytes := b[idxLen*2 : idxLen*2+5]
l1tx := L1Tx{} l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6)) fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))

View File

@@ -200,8 +200,8 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes) copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes)
b[idxLen*2+Float40BytesLength] = byte(tx.Fee) b[idxLen*2+5] = byte(tx.Fee)
return b[:], nil return b[:], nil
} }
@@ -226,10 +226,10 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt() tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+5]).BigInt()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength]) tx.Fee = FeeSelector(b[idxLen*2+5])
return tx, nil return tx, nil
} }

View File

@@ -36,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"` ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"` ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"` TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"` Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40
Fee FeeSelector `meddler:"fee"` Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"` State PoolL2TxState `meddler:"state"`
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"` RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"` RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"` RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"` RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"` AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -126,7 +126,7 @@ func (tx *PoolL2Tx) SetID() error {
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes // [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation // Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte var b [29]byte
@@ -179,7 +179,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 40 bits ] amountFloat40 // 5 bytes // [ 40 bits ] amountFloat40 // 5 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil { if tx.Amount == nil {
tx.Amount = big.NewInt(0) tx.Amount = big.NewInt(0)
@@ -238,7 +238,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 40 bits ] rqAmountFloat40 // 5 bytes // [ 40 bits ] rqAmountFloat40 // 5 bytes
// [ 48 bits ] rqToIdx // 6 bytes // [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes // [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil { if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0) tx.RqAmount = big.NewInt(0)

View File

@@ -62,17 +62,3 @@ func RmEndingZeroes(siblings []*merkletree.Hash) []*merkletree.Hash {
} }
return siblings[:pos] return siblings[:pos]
} }
// TokensToUSD is a helper function to calculate the USD value of a certain
// amount of tokens considering the normalized token price (which is the price
// commonly reported by exhanges)
func TokensToUSD(amount *big.Int, decimals uint64, valueUSD float64) float64 {
amountF := new(big.Float).SetInt(amount)
// Divide by 10^decimals to normalize the amount
baseF := new(big.Float).SetInt(new(big.Int).Exp(
big.NewInt(10), big.NewInt(int64(decimals)), nil)) //nolint:gomnd
amountF.Mul(amountF, big.NewFloat(valueUSD))
amountF.Quo(amountF, baseF)
amountUSD, _ := amountF.Float64()
return amountUSD
}

View File

@@ -102,8 +102,6 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx] ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr // ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx] ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
// OnChain determines if is L1 (1/true) or L2 (0/false) // OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx] OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -115,7 +113,7 @@ type ZKInputs struct {
// account (fromIdx==0) // account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx] NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float40 // DepositAmountF encoded as float40
DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx] DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
// FromEthAddr // FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx] FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int // FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -328,7 +326,6 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
zki.AuxToIdx = newSlice(maxTx) zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx) zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx) zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx) zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx) zki.NewAccount = newSlice(maxTx)
@@ -479,8 +476,8 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes()) copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
b = append(b, newExitRoot...) b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData // [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 528) l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen) l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ { for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -497,9 +494,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
} }
b = append(b, l1TxsDataAvailability...) b = append(b, l1TxsDataAvailability...)
// [MAX_TX*(2*NLevels + 48) bits] L2TxsData // [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen) l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ { for i := 0; i < len(z.Metadata.L2TxsData); i++ {

View File

@@ -3,7 +3,6 @@ package config
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big"
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
@@ -52,85 +51,33 @@ type Coordinator struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"` L1BatchTimeoutPerc float64 `validate:"required"`
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the // ProofServerPollInterval is the waiting interval between polling the
// ProofServer while waiting for a particular status // ProofServer while waiting for a particular status
ProofServerPollInterval Duration `validate:"required"` ProofServerPollInterval Duration `validate:"required"`
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval Duration `validate:"required"` ForgeRetryInterval Duration `validate:"required"`
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay Duration `validate:"-"`
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay Duration `validate:"-"`
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"` SyncRetryInterval Duration `validate:"required"`
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval Duration `validate:"required"`
// L2DB is the DB that holds the pool of L2Txs // L2DB is the DB that holds the pool of L2Txs
L2DB struct { L2DB struct {
// SafetyPeriod is the number of batches after which // SafetyPeriod is the number of batches after which
// non-pending L2Txs are deleted from the pool // non-pending L2Txs are deleted from the pool
SafetyPeriod common.BatchNum `validate:"required"` SafetyPeriod common.BatchNum `validate:"required"`
// MaxTxs is the maximum number of pending L2Txs that can be // MaxTxs is the number of L2Txs that once reached triggers
// stored in the pool. Once this number of pending L2Txs is // deletion of old L2Txs
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"` MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs // TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted. // L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"` TTL Duration `validate:"required"`
// PurgeBatchDelay is the delay between batches to purge // PurgeBatchDelay is the delay between batches to purge outdated transactions
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBatchDelay int64 `validate:"required"` PurgeBatchDelay int64 `validate:"required"`
// InvalidateBatchDelay is the delay between batches to mark // InvalidateBatchDelay is the delay between batches to mark invalid transactions
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBatchDelay int64 `validate:"required"` InvalidateBatchDelay int64 `validate:"required"`
// PurgeBlockDelay is the delay between blocks to purge // PurgeBlockDelay is the delay between blocks to purge outdated transactions
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBlockDelay int64 `validate:"required"` PurgeBlockDelay int64 `validate:"required"`
// InvalidateBlockDelay is the delay between blocks to mark // InvalidateBlockDelay is the delay between blocks to mark invalid transactions
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBlockDelay int64 `validate:"required"` InvalidateBlockDelay int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
TxSelector struct { TxSelector struct {
@@ -150,13 +97,12 @@ type Coordinator struct {
NLevels int64 `validate:"required"` NLevels int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
EthClient struct { EthClient struct {
// MaxGasPrice is the maximum gas price allowed for ethereum // CallGasLimit is the default gas limit set for ethereum
// transactions // calls, except for methods where a particular gas limit is
MaxGasPrice *big.Int `validate:"required"` // harcoded because it's known to be a big value
// GasPriceIncPerc is the percentage increase of gas price set CallGasLimit uint64 `validate:"required"`
// in an ethereum transaction from the suggested gas price by // GasPriceDiv is the gas price division
// the ehtereum node GasPriceDiv uint64 `validate:"required"`
GasPriceIncPerc int64
// CheckLoopInterval is the waiting interval between receipt // CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
CheckLoopInterval Duration `validate:"required"` CheckLoopInterval Duration `validate:"required"`
@@ -166,13 +112,6 @@ type Coordinator struct {
// AttemptsDelay is delay between attempts do do an eth client // AttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
AttemptsDelay Duration `validate:"required"` AttemptsDelay Duration `validate:"required"`
// TxResendTimeout is the timeout after which a non-mined
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept // Keystore is the ethereum keystore where private keys are kept
Keystore struct { Keystore struct {
// Path to the keystore // Path to the keystore
@@ -240,11 +179,6 @@ type Node struct {
// `Eth.LastBatch`). This value only affects the reported % of // `Eth.LastBatch`). This value only affects the reported % of
// synchronization of blocks and batches, nothing else. // synchronization of blocks and batches, nothing else.
StatsRefreshPeriod Duration `validate:"required"` StatsRefreshPeriod Duration `validate:"required"`
// StoreAccountUpdates when set to true makes the synchronizer
// store every account update in the account_update SQL table.
// This allows querying nonces and balances from the HistoryDB
// via SQL.
StoreAccountUpdates bool
} `validate:"required"` } `validate:"required"`
SmartContracts struct { SmartContracts struct {
// Rollup is the address of the Hermez.sol smart contract // Rollup is the address of the Hermez.sol smart contract

View File

@@ -47,8 +47,6 @@ type Debug struct {
MineBlockNum int64 MineBlockNum int64
// SendBlockNum is the blockNum when the batch was sent to ethereum // SendBlockNum is the blockNum when the batch was sent to ethereum
SendBlockNum int64 SendBlockNum int64
// ResendNum is the number of times the tx has been resent
ResendNum int
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch // LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
// was scheduled // was scheduled
LastScheduledL1BatchBlockNum int64 LastScheduledL1BatchBlockNum int64
@@ -66,17 +64,10 @@ type Debug struct {
// StartToSendDelay is the delay between starting a batch and sending // StartToSendDelay is the delay between starting a batch and sending
// it to ethereum, in seconds // it to ethereum, in seconds
StartToSendDelay float64 StartToSendDelay float64
// StartToMineDelay is the delay between starting a batch and having
// it mined in seconds
StartToMineDelay float64
// SendToMineDelay is the delay between sending a batch tx and having
// it mined in seconds
SendToMineDelay float64
} }
// BatchInfo contans the Batch information // BatchInfo contans the Batch information
type BatchInfo struct { type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
@@ -92,14 +83,7 @@ type BatchInfo struct {
ForgeBatchArgs *eth.RollupForgeBatchArgs ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo // FeesInfo
EthTx *types.Transaction EthTx *types.Transaction
EthTxErr error
// SendTimestamp the time of batch sent to ethereum
SendTimestamp time.Time
Receipt *types.Receipt Receipt *types.Receipt
// Fail is true if:
// - The receipt status is failed
// - A previous parent batch is failed
Fail bool
Debug Debug Debug Debug
} }

View File

@@ -3,8 +3,8 @@ package coordinator
import ( import (
"context" "context"
"fmt" "fmt"
"math/big"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
@@ -24,8 +24,6 @@ import (
var ( var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet") errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
) )
const ( const (
@@ -44,68 +42,18 @@ type Config struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 L1BatchTimeoutPerc float64
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
SendBatchBlocksMarginCheck int64
// EthClientAttempts is the number of attempts to do an eth client RPC // EthClientAttempts is the number of attempts to do an eth client RPC
// call before giving up // call before giving up
EthClientAttempts int EthClientAttempts int
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval time.Duration ForgeRetryInterval time.Duration
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay time.Duration
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay time.Duration
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval time.Duration SyncRetryInterval time.Duration
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval time.Duration
// EthClientAttemptsDelay is delay between attempts do do an eth client // EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
EthClientAttemptsDelay time.Duration EthClientAttemptsDelay time.Duration
// EthTxResendTimeout is the timeout after which a non-mined ethereum
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int
// GasPriceIncPerc is the percentage increase of gas price set in an
// ethereum transaction from the suggested gas price by the ehtereum
// node
GasPriceIncPerc int64
// TxManagerCheckInterval is the waiting interval between receipt // TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration TxManagerCheckInterval time.Duration
@@ -113,8 +61,6 @@ type Config struct {
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
DebugBatchPath string DebugBatchPath string
Purger PurgerCfg Purger PurgerCfg
// VerifierIdx is the index of the verifier contract registered in the
// smart contract
VerifierIdx uint8 VerifierIdx uint8
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -128,17 +74,10 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
} }
} }
type fromBatch struct {
BatchNum common.BatchNum
ForgerAddr ethCommon.Address
StateRoot *big.Int
}
// Coordinator implements the Coordinator type // Coordinator implements the Coordinator type
type Coordinator struct { type Coordinator struct {
// State // State
pipelineNum int // Pipeline sequential number. The first pipeline is 1 pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client provers []prover.Client
consts synchronizer.SCConsts consts synchronizer.SCConsts
vars synchronizer.SCVariables vars synchronizer.SCVariables
@@ -157,17 +96,7 @@ type Coordinator struct {
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
// mutexL2DBUpdateDelete protects updates to the L2DB so that
// these two processes always happen exclusively:
// - Pipeline taking pending txs, running through the TxProcessor and
// marking selected txs as forging
// - Coordinator deleting pending txs that have been marked with
// `external_delete`.
// Without this mutex, the coordinator could delete a pending txs that
// has just been selected by the TxProcessor in the pipeline.
mutexL2DBUpdateDelete sync.Mutex
pipeline *Pipeline pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
purger *Purger purger *Purger
txManager *TxManager txManager *TxManager
@@ -210,12 +139,7 @@ func NewCoordinator(cfg Config,
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{ c := Coordinator{
pipelineNum: 0, pipelineBatchNum: -1,
pipelineFromBatch: fromBatch{
BatchNum: 0,
ForgerAddr: ethCommon.Address{},
StateRoot: big.NewInt(0),
},
provers: serverProofs, provers: serverProofs,
consts: *scConsts, consts: *scConsts,
vars: *initSCVars, vars: *initSCVars,
@@ -259,10 +183,8 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
} }
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) { func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
c.pipelineNum++ return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector, c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager,
c.provers, &c.consts)
} }
// MsgSyncBlock indicates an update to the Synchronizer stats // MsgSyncBlock indicates an update to the Synchronizer stats
@@ -283,9 +205,6 @@ type MsgSyncReorg struct {
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct { type MsgStopPipeline struct {
Reason string Reason string
// FailedBatchNum indicates the first batchNum that failed in the
// pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum
} }
// SendMsg is a thread safe method to pass a message to the Coordinator // SendMsg is a thread safe method to pass a message to the Coordinator
@@ -296,36 +215,27 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
if update.Auction != nil {
vars.Auction = *update.Auction
}
if update.WDelayer != nil {
vars.WDelayer = *update.WDelayer
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) { func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&c.vars, vars) if vars.Rollup != nil {
c.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
c.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
c.vars.WDelayer = *vars.WDelayer
}
} }
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables, func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool { currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
return false
}
var slot *common.Slot var slot *common.Slot
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock { if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
slot = currentSlot slot = currentSlot
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock { } else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
slot = nextSlot slot = nextSlot
} else { } else {
log.Warnw("canForge: requested blockNum is outside current and next slot", log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
"blockNum", blockNum, "currentSlot", currentSlot, "blockNum", blockNum, "currentSlot", currentSlot,
"nextSlot", nextSlot, "nextSlot", nextSlot,
) )
@@ -334,23 +244,16 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
anyoneForge := false anyoneForge := false
if !slot.ForgerCommitment && if !slot.ForgerCommitment &&
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) { auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)", log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
"block", blockNum) "block", blockNum)
anyoneForge = true anyoneForge = true
} }
if slot.Forger == addr || anyoneForge { if slot.Forger == addr || anyoneForge {
return true return true
} }
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
return false return false
} }
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool { func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1 blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction, return canForge(&c.consts.Auction, &c.vars.Auction,
@@ -359,39 +262,21 @@ func (c *Coordinator) canForge() bool {
} }
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error { func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
nextBlock := c.stats.Eth.LastBlock.Num + 1 canForge := c.canForge()
canForge := c.canForgeAt(nextBlock)
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
}
if c.pipeline == nil { if c.pipeline == nil {
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock) if canForge {
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugf("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge {
log.Infow("Coordinator: forging state begin", "block", log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum) stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
fromBatch := fromBatch{ batchNum := common.BatchNum(stats.Sync.LastBatch)
BatchNum: stats.Sync.LastBatch.BatchNum,
ForgerAddr: stats.Sync.LastBatch.ForgerAddr,
StateRoot: stats.Sync.LastBatch.StateRoot,
}
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
fromBatch.BatchNum = c.lastNonFailedBatchNum
fromBatch.ForgerAddr = c.cfg.ForgerAddress
fromBatch.StateRoot = big.NewInt(0)
}
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineFromBatch = fromBatch if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil {
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineBatchNum = batchNum
} }
} else { } else {
if !canForge { if !canForge {
@@ -401,12 +286,25 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
} }
} }
if c.pipeline == nil { if c.pipeline == nil {
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(), // Mark invalid in Pool due to forged L2Txs
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil { // for _, batch := range batches {
// if err := c.l2DB.InvalidateOldNonces(
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
// return err
// }
// }
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, }
int64(stats.Sync.LastBatch.BatchNum)); err != nil { _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err)
}
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -433,43 +331,33 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars) c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
} }
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress && if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
(c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil || // There's been a reorg and the batch from which the pipeline
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) { // was started was in a block that was discarded. The batch
// There's been a reorg and the batch state root from which the // may not be in the main chain, so we stop the pipeline as a
// pipeline was started has changed (probably because it was in // precaution (it will be started again once the node is in
// a block that was discarded), and it was sent by a different // sync).
// coordinator than us. That batch may never be in the main log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
// chain, so we stop the pipeline (it will be started again "sync.LastBatch", c.stats.Sync.LastBatch,
// once the node is in sync). "c.pipelineBatchNum", c.pipelineBatchNum)
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+ if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0, func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
// the next pipeline will start from the last state of the synchronizer,
// otherwise, it will state from failedBatchNum-1.
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1
}
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.Stop(c.ctx) c.pipeline.Stop(c.ctx)
c.pipeline = nil c.pipeline = nil
} }
if err := c.l2DB.Reorg(batchNum); err != nil { if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.lastNonFailedBatchNum = batchNum if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
// TODO: Check that we are in a slot in which we can't forge
}
return nil return nil
} }
@@ -485,7 +373,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
} }
case MsgStopPipeline: case MsgStopPipeline:
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason) log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil { if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err)) return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
} }
default: default:
@@ -508,7 +396,7 @@ func (c *Coordinator) Start() {
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
waitCh := time.After(longWaitDuration) waitDuration := longWaitDuration
for { for {
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
@@ -520,42 +408,23 @@ func (c *Coordinator) Start() {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.handleMsg", "err", err) log.Errorw("Coordinator.handleMsg", "err", err)
waitCh = time.After(c.cfg.SyncRetryInterval) waitDuration = c.cfg.SyncRetryInterval
continue continue
} }
waitCh = time.After(longWaitDuration) waitDuration = longWaitDuration
case <-waitCh: case <-time.After(waitDuration):
if !c.stats.Synced() { if !c.stats.Synced() {
waitCh = time.After(longWaitDuration) waitDuration = longWaitDuration
continue continue
} }
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil { if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
waitCh = time.After(longWaitDuration)
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.syncStats", "err", err) log.Errorw("Coordinator.syncStats", "err", err)
waitCh = time.After(c.cfg.SyncRetryInterval) waitDuration = c.cfg.SyncRetryInterval
continue continue
} }
waitCh = time.After(longWaitDuration) waitDuration = longWaitDuration
}
}
}()
c.wg.Add(1)
go func() {
for {
select {
case <-c.ctx.Done():
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
c.wg.Done()
return
case <-time.After(c.cfg.PurgeByExtDelInterval):
c.mutexL2DBUpdateDelete.Lock()
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
}
c.mutexL2DBUpdateDelete.Unlock()
} }
} }
}() }()

View File

@@ -105,7 +105,7 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
l2DB := l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, nil) historyDB := historydb.NewHistoryDB(db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB") txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
@@ -261,8 +261,8 @@ func TestCoordinatorFlow(t *testing.T) {
var stats synchronizer.Stats var stats synchronizer.Stats
stats.Eth.LastBlock = *ethClient.CtlLastBlock() stats.Eth.LastBlock = *ethClient.CtlLastBlock()
stats.Sync.LastBlock = stats.Eth.LastBlock stats.Sync.LastBlock = stats.Eth.LastBlock
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch() stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum) stats.Sync.LastBatch = stats.Eth.LastBatch
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1) canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
require.NoError(t, err) require.NoError(t, err)
var slot common.Slot var slot common.Slot
@@ -279,7 +279,7 @@ func TestCoordinatorFlow(t *testing.T) {
// Copy stateDB to synchronizer if there was a new batch // Copy stateDB to synchronizer if there was a new batch
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch) source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch) dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
if stats.Sync.LastBatch.BatchNum != 0 { if stats.Sync.LastBatch != 0 {
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
log.Infow("Making pebble checkpoint for sync", log.Infow("Making pebble checkpoint for sync",
"source", source, "dest", dest) "source", source, "dest", dest)

View File

@@ -2,7 +2,6 @@ package coordinator
import ( import (
"context" "context"
"database/sql"
"fmt" "fmt"
"math/big" "math/big"
"sync" "sync"
@@ -25,35 +24,24 @@ type statsVars struct {
Vars synchronizer.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
type state struct {
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
lastSlotForged int64
}
// Pipeline manages the forging of batches with parallel server proofs // Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct { type Pipeline struct {
num int
cfg Config cfg Config
consts synchronizer.SCConsts consts synchronizer.SCConsts
// state // state
state state batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
started bool started bool
rw sync.RWMutex
errAtBatchNum common.BatchNum
lastForgeTime time.Time
proversPool *ProversPool proversPool *ProversPool
provers []prover.Client provers []prover.Client
coord *Coordinator
txManager *TxManager txManager *TxManager
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
l2DB *l2db.L2DB l2DB *l2db.L2DB
txSelector *txselector.TxSelector txSelector *txselector.TxSelector
batchBuilder *batchbuilder.BatchBuilder batchBuilder *batchbuilder.BatchBuilder
mutexL2DBUpdateDelete *sync.Mutex
purger *Purger purger *Purger
stats synchronizer.Stats stats synchronizer.Stats
@@ -65,29 +53,14 @@ type Pipeline struct {
cancel context.CancelFunc cancel context.CancelFunc
} }
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
p.rw.Lock()
defer p.rw.Unlock()
p.errAtBatchNum = batchNum
}
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
p.rw.RLock()
defer p.rw.RUnlock()
return p.errAtBatchNum
}
// NewPipeline creates a new Pipeline // NewPipeline creates a new Pipeline
func NewPipeline(ctx context.Context, func NewPipeline(ctx context.Context,
cfg Config, cfg Config,
num int, // Pipeline sequential number
historyDB *historydb.HistoryDB, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, l2DB *l2db.L2DB,
txSelector *txselector.TxSelector, txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
mutexL2DBUpdateDelete *sync.Mutex,
purger *Purger, purger *Purger,
coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *synchronizer.SCConsts, scConsts *synchronizer.SCConsts,
@@ -106,7 +79,6 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool")) return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
} }
return &Pipeline{ return &Pipeline{
num: num,
cfg: cfg, cfg: cfg,
historyDB: historyDB, historyDB: historyDB,
l2DB: l2DB, l2DB: l2DB,
@@ -114,9 +86,7 @@ func NewPipeline(ctx context.Context,
batchBuilder: batchBuilder, batchBuilder: batchBuilder,
provers: provers, provers: provers,
proversPool: proversPool, proversPool: proversPool,
mutexL2DBUpdateDelete: mutexL2DBUpdateDelete,
purger: purger, purger: purger,
coord: coord,
txManager: txManager, txManager: txManager,
consts: *scConsts, consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
@@ -134,87 +104,47 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.state = state{ p.batchNum = batchNum
batchNum: batchNum, p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0,
lastSlotForged: -1,
}
p.stats = *stats p.stats = *stats
p.vars = *vars p.vars = *vars
p.lastScheduledL1BatchBlockNum = 0
// Reset the StateDB in TxSelector and BatchBuilder from the err := p.txSelector.Reset(p.batchNum)
// synchronizer only if the checkpoint we reset from either:
// a. Doesn't exist in the TxSelector/BatchBuilder
// b. The batch has already been synced by the synchronizer and has a
// different MTRoot than the BatchBuilder
// Otherwise, reset from the local checkpoint.
// First attempt to reset from local checkpoint if such checkpoint exists
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerTxSelector := !existsTxSelector err = p.batchBuilder.Reset(p.batchNum, true)
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
return tracerr.Wrap(err)
}
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerBatchBuilder := !existsBatchBuilder
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
return tracerr.Wrap(err)
}
// After reset, check that if the batch exists in the historyDB, the
// stateRoot matches with the local one, if not, force a reset from
// synchronizer
batch, err := p.historyDB.GetBatch(p.state.batchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
// nothing to do
} else if err != nil {
return tracerr.Wrap(err)
} else {
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
if batch.StateRoot.Cmp(localStateRoot) != 0 {
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
// StateRoot from synchronizer doesn't match StateRoot
// from batchBuilder, force a reset from synchronizer
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
}
}
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&p.vars, vars) if vars.Rollup != nil {
p.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
p.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
p.vars.WDelayer = *vars.WDelayer
}
} }
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs, // handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
// and then waits for an available proof server and sends the zkInputs to it so // and then waits for an available proof server and sends the zkInputs to it so
// that the proof computation begins. // that the proof computation begins.
func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) { func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
p.mutexL2DBUpdateDelete.Lock()
batchInfo, err := p.forgeBatch(batchNum) batchInfo, err := p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil { if ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced { if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err, log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum, "lastForgeL1TxsNum", p.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum) "syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else { } else {
log.Errorw("forgeBatch", "err", err) log.Errorw("forgeBatch", "err", err)
} }
@@ -258,7 +188,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Add(1) p.wg.Add(1)
go func() { go func() {
waitCh := time.After(zeroDuration) waitDuration := zeroDuration
for { for {
select { select {
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -268,42 +198,20 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case statsVars := <-p.statsVarsCh: case statsVars := <-p.statsVarsCh:
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-waitCh: case <-time.After(waitDuration):
// Once errAtBatchNum != 0, we stop forging batchNum = p.batchNum + 1
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue
}
batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum) batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue continue
} else if err != nil { } else if err != nil {
p.setErrAtBatchNum(batchNum) waitDuration = p.cfg.SyncRetryInterval
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum,
})
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue continue
} }
p.lastForgeTime = time.Now() p.batchNum = batchNum
p.state.batchNum = batchNum
select { select {
case batchChSentServerProof <- batchInfo: case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done(): case <-p.ctx.Done():
} }
waitCh = time.After(zeroDuration)
} }
} }
}() }()
@@ -317,28 +225,16 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Done() p.wg.Done()
return return
case batchInfo := <-batchChSentServerProof: case batchInfo := <-batchChSentServerProof:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
err := p.waitServerProof(p.ctx, batchInfo) err := p.waitServerProof(p.ctx, batchInfo)
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
batchInfo.ServerProof = nil
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("waitServerProof", "err", err) log.Errorw("waitServerProof", "err", err)
p.setErrAtBatchNum(batchInfo.BatchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.waitServerProof: %v", err),
FailedBatchNum: batchInfo.BatchNum,
})
continue continue
} }
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
// batchInfo.ServerProof = nil
p.txManager.AddBatch(p.ctx, batchInfo) p.txManager.AddBatch(p.ctx, batchInfo)
} }
} }
@@ -388,10 +284,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Structure to accumulate data and metadata of the batch
now := time.Now() batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum} batchInfo.Debug.StartTimestamp = time.Now()
batchInfo.Debug.StartTimestamp = now
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1 batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
selectionCfg := &txselector.SelectionConfig{ selectionCfg := &txselector.SelectionConfig{
@@ -405,26 +300,22 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var auths [][]byte var auths [][]byte
var coordIdxs []common.Idx var coordIdxs []common.Idx
// Check if the slot is not yet fulfilled
slotCommitted := false
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, errForgeBeforeDelay
}
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) { if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true batchInfo.L1Batch = true
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum { defer func() {
// If there's no error, update the parameters related
// to the last L1Batch forged
if err == nil {
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.lastForgeL1TxsNum++
}
}()
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced) return nil, tracerr.Wrap(errLastL1BatchNotSynced)
} }
// 2a: L1+L2 txs // 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1) l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -443,43 +334,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
l1UserTxsExtra = nil l1UserTxsExtra = nil
} }
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the L1UserTxs in the queue following
// the one we are trying to forge.
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues and forge the
// L1UserTxs in the future. Otherwise, skip.
if len(nextL1UserTxs) == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, errForgeNoTxsBeforeDelay
}
}
if batchInfo.L1Batch {
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
}
// 3. Save metadata from TxSelector output for BatchNum // 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxsExtra = l1UserTxsExtra batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs batchInfo.L1CoordTxs = l1CoordTxs
@@ -524,8 +378,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
p.cfg.debugBatchStore(batchInfo) p.cfg.debugBatchStore(batchInfo)
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum) log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil return batchInfo, nil
} }
@@ -547,12 +399,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool { func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
// Take the lastL1BatchBlockNum as the biggest between the last // Take the lastL1BatchBlockNum as the biggest between the last
// scheduled one, and the synchronized one. // scheduled one, and the synchronized one.
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum { if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
} }
// Set Debug information // Set Debug information
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
batchInfo.Debug.L1BatchBlockScheduleDeadline = batchInfo.Debug.L1BatchBlockScheduleDeadline =

View File

@@ -25,14 +25,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestPipelineShouldL1L2Batch(t *testing.T) { func TestPipelineShouldL1L2Batch(t *testing.T) {
ethClientSetup := test.NewClientSetupExample() ethClientSetup := test.NewClientSetupExample()
ethClientSetup.ChainID = big.NewInt(int64(chainID)) ethClientSetup.ChainID = big.NewInt(int64(chainID))
@@ -85,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
// //
// Scheduled L1Batch // Scheduled L1Batch
// //
pipeline.state.lastScheduledL1BatchBlockNum = startBlock pipeline.lastScheduledL1BatchBlockNum = startBlock
stats.Sync.LastL1BatchBlock = startBlock - 10 stats.Sync.LastL1BatchBlock = startBlock - 10
// We are are one block before the timeout range * 0.5 // We are are one block before the timeout range * 0.5
@@ -136,11 +128,6 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
blocks, err := tc.GenerateBlocksFromInstructions(set) blocks, err := tc.GenerateBlocksFromInstructions(set)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, blocks) require.NotNil(t, blocks)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("0")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
ethAddTokens(blocks, ethClient) ethAddTokens(blocks, ethClient)
err = ethClient.CtlAddBlocks(blocks) err = ethClient.CtlAddBlocks(blocks)
@@ -185,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
// users with positive balances // users with positive balances
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB) tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
syncStats := sync.Stats() syncStats := sync.Stats()
batchNum := syncStats.Sync.LastBatch.BatchNum batchNum := common.BatchNum(syncStats.Sync.LastBatch)
syncSCVars := sync.SCVars() syncSCVars := sync.SCVars()
pipeline, err := coord.newPipeline(ctx) pipeline, err := coord.newPipeline(ctx)

View File

@@ -13,23 +13,13 @@ import (
// PurgerCfg is the purger configuration // PurgerCfg is the purger configuration
type PurgerCfg struct { type PurgerCfg struct {
// PurgeBatchDelay is the delay between batches to purge outdated // PurgeBatchDelay is the delay between batches to purge outdated transactions
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBatchDelay int64 PurgeBatchDelay int64
// InvalidateBatchDelay is the delay between batches to mark invalid // InvalidateBatchDelay is the delay between batches to mark invalid transactions
// transactions due to nonce lower than the account nonce.
InvalidateBatchDelay int64 InvalidateBatchDelay int64
// PurgeBlockDelay is the delay between blocks to purge outdated // PurgeBlockDelay is the delay between blocks to purge outdated transactions
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBlockDelay int64 PurgeBlockDelay int64
// InvalidateBlockDelay is the delay between blocks to mark invalid // InvalidateBlockDelay is the delay between blocks to mark invalid transactions
// transactions due to nonce lower than the account nonce.
InvalidateBlockDelay int64 InvalidateBlockDelay int64
} }

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
return l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil) return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
} }
func newStateDB(t *testing.T) *statedb.LocalStateDB { func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -4,13 +4,11 @@ import (
"context" "context"
"fmt" "fmt"
"math/big" "math/big"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
@@ -37,20 +35,12 @@ type TxManager struct {
vars synchronizer.SCVariables vars synchronizer.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum queue []*BatchInfo
minPipelineNum int
queue Queue
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed // lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
lastSuccessBatch common.BatchNum lastSuccessBatch common.BatchNum
// lastPendingBatch common.BatchNum lastPendingBatch common.BatchNum
// accNonce is the account nonce in the last mined block (due to mined txs) lastSuccessNonce uint64
accNonce uint64 lastPendingNonce uint64
// accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64
lastSentL1BatchBlockNum int64
} }
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
@@ -64,11 +54,19 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil) lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Infow("TxManager started", "nonce", accNonce) lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
if err != nil {
return nil, err
}
if lastSuccessNonce != lastPendingNonce {
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
lastSuccessNonce, lastPendingNonce))
}
log.Infow("TxManager started", "nonce", lastSuccessNonce)
return &TxManager{ return &TxManager{
cfg: *cfg, cfg: *cfg,
ethClient: ethClient, ethClient: ethClient,
@@ -76,7 +74,6 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
coord: coord, coord: coord,
batchCh: make(chan *BatchInfo, queueLen), batchCh: make(chan *BatchInfo, queueLen),
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
discardPipelineCh: make(chan int, queueLen),
account: accounts.Account{ account: accounts.Account{
Address: *address, Address: *address,
}, },
@@ -85,10 +82,8 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
vars: *initSCVars, vars: *initSCVars,
minPipelineNum: 0, lastSuccessNonce: lastSuccessNonce,
queue: NewQueue(), lastPendingNonce: lastPendingNonce,
accNonce: accNonce,
accNextNonce: accNonce,
}, nil }, nil
} }
@@ -109,17 +104,16 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
} }
} }
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
// due to a reorg
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
select {
case t.discardPipelineCh <- pipelineNum:
case <-ctx.Done():
}
}
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) { func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&t.vars, vars) if vars.Rollup != nil {
t.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
t.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
t.vars.WDelayer = *vars.WDelayer
}
} }
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
@@ -128,14 +122,10 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if t.cfg.GasPriceIncPerc != 0 {
inc := new(big.Int).Set(gasPrice) inc := new(big.Int).Set(gasPrice)
inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc)) const gasPriceDiv = 100
// nolint reason: to calculate percentages we use 100 inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd
gasPrice.Add(gasPrice, inc) gasPrice.Add(gasPrice, inc)
}
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice) // log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID) auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
@@ -144,13 +134,6 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
} }
auth.Value = big.NewInt(0) // in wei auth.Value = big.NewInt(0) // in wei
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs // TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
// This requires a function that estimates the gas usage of the
// forgeBatch call based on the contents of the ForgeBatch args:
// - length of l2txs
// - length of l1Usertxs
// - length of l1CoordTxs with authorization signature
// - length of l1CoordTxs without authoriation signature
// - etc.
auth.GasLimit = 1000000 auth.GasLimit = 1000000
auth.GasPrice = gasPrice auth.GasPrice = gasPrice
auth.Nonce = nil auth.Nonce = nil
@@ -158,83 +141,34 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return auth, nil return auth, nil
} }
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error { func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
nextBlock := t.stats.Eth.LastBlock.Num + 1 // TODO: Check if we can forge in the next blockNum, abort if we can't
if !t.canForgeAt(nextBlock) { batchInfo.Debug.Status = StatusSent
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock)) batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
} batchInfo.Debug.SendTimestamp = time.Now()
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch { batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock)) batchInfo.Debug.StartTimestamp).Seconds()
}
margin := t.cfg.SendBatchBlocksMarginCheck
if margin != 0 {
if !t.canForgeAt(nextBlock + margin) {
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
margin, nextBlock))
}
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
margin, nextBlock))
}
}
return nil
}
func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percentages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
auth, err := t.NewAuth(ctx) auth, err := t.NewAuth(ctx)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
auth.Nonce = big.NewInt(int64(t.accNextNonce)) auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
if resend { t.lastPendingNonce++
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
}
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ { for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
auth.GasPrice, t.cfg.MaxGasPrice))
}
// RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth) ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
// We check the errors via strings because we match the if err != nil {
// definition of the error from geth, with the string returned // if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
// via RPC obtained by the client. // log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
if err == nil { // "block", t.stats.Eth.LastBlock.Num+1)
break // return tracerr.Wrap(err)
} else if strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) { // }
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
attempt--
} else if strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt--
} else if strings.Contains(err.Error(), core.ErrReplaceUnderpriced.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if strings.Contains(err.Error(), core.ErrUnderpriced.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if err != nil {
log.Errorw("TxManager ethClient.RollupForgeBatch", log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1, "attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum) "batchNum", batchInfo.BatchNum)
} else {
break
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -245,29 +179,10 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err)) return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
} }
if !resend {
t.accNextNonce = auth.Nonce.Uint64() + 1
}
batchInfo.EthTx = ethTx batchInfo.EthTx = ethTx
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash()) log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
now := time.Now()
batchInfo.SendTimestamp = now
if resend {
batchInfo.Debug.ResendNum++
}
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
t.lastPendingBatch = batchInfo.BatchNum
if !resend {
if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil { if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -310,20 +225,13 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) { func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
receipt := batchInfo.Receipt receipt := batchInfo.Receipt
if receipt != nil { if receipt != nil {
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
t.accNonce = batchInfo.EthTx.Nonce() + 1
}
if receipt.Status == types.ReceiptStatusFailed { if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber) _, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash, log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(), "batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err) "err", err)
batchInfo.EthTxErr = err
if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1
}
t.cfg.debugBatchStore(batchInfo)
return nil, tracerr.Wrap(fmt.Errorf( return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err)) "ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful { } else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -331,17 +239,6 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64() batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum - batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
batchInfo.Debug.StartBlockNum batchInfo.Debug.StartBlockNum
if batchInfo.Debug.StartToMineDelay == 0 {
if block, err := t.ethClient.EthBlockByNumber(ctx,
receipt.BlockNumber.Int64()); err != nil {
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
} else {
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.SendTimestamp).Seconds()
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
}
}
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
if batchInfo.BatchNum > t.lastSuccessBatch { if batchInfo.BatchNum > t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum t.lastSuccessBatch = batchInfo.BatchNum
@@ -353,73 +250,10 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
return nil, nil return nil, nil
} }
// TODO:
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
// Queue of BatchInfos
type Queue struct {
list []*BatchInfo
// nonceByBatchNum map[common.BatchNum]uint64
next int
}
// NewQueue returns a new queue
func NewQueue() Queue {
return Queue{
list: make([]*BatchInfo, 0),
// nonceByBatchNum: make(map[common.BatchNum]uint64),
next: 0,
}
}
// Len is the length of the queue
func (q *Queue) Len() int {
return len(q.list)
}
// At returns the BatchInfo at position (or nil if position is out of bounds)
func (q *Queue) At(position int) *BatchInfo {
if position >= len(q.list) {
return nil
}
return q.list[position]
}
// Next returns the next BatchInfo (or nil if queue is empty)
func (q *Queue) Next() (int, *BatchInfo) {
if len(q.list) == 0 {
return 0, nil
}
defer func() { q.next = (q.next + 1) % len(q.list) }()
return q.next, q.list[q.next]
}
// Remove removes the BatchInfo at position
func (q *Queue) Remove(position int) {
// batchInfo := q.list[position]
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
q.list = append(q.list[:position], q.list[position+1:]...)
if len(q.list) == 0 {
q.next = 0
} else {
q.next = position % len(q.list)
}
}
// Push adds a new BatchInfo
func (q *Queue) Push(batchInfo *BatchInfo) {
q.list = append(q.list, batchInfo)
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
}
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
// nonce, ok := q.nonceByBatchNum[batchNum]
// return nonce, ok
// }
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
waitCh := time.After(longWaitDuration) next := 0
waitDuration := longWaitDuration
var statsVars statsVars var statsVars statsVars
select { select {
@@ -429,7 +263,7 @@ func (t *TxManager) Run(ctx context.Context) {
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
for { for {
select { select {
@@ -439,27 +273,8 @@ func (t *TxManager) Run(ctx context.Context) {
case statsVars := <-t.statsVarsCh: case statsVars := <-t.statsVarsCh:
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
case pipelineNum := <-t.discardPipelineCh:
t.minPipelineNum = pipelineNum + 1
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
}
case batchInfo := <-t.batchCh: case batchInfo := <-t.batchCh:
if batchInfo.PipelineNum < t.minPipelineNum { if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
}
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
log.Warnw("TxManager: shouldSend", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
continue
}
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
// If we reach here it's because our ethNode has // If we reach here it's because our ethNode has
@@ -467,20 +282,19 @@ func (t *TxManager) Run(ctx context.Context) {
// ethereum. This could be due to the ethNode // ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that // failure, or an invalid transaction (that
// can't be mined) // can't be mined)
log.Warnw("TxManager: forgeBatch send failed", "err", err, t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
continue continue
} }
t.queue.Push(batchInfo) t.queue = append(t.queue, batchInfo)
waitCh = time.After(t.cfg.TxManagerCheckInterval) waitDuration = t.cfg.TxManagerCheckInterval
case <-waitCh: case <-time.After(waitDuration):
queuePosition, batchInfo := t.queue.Next() if len(t.queue) == 0 {
if batchInfo == nil { waitDuration = longWaitDuration
waitCh = time.After(longWaitDuration)
continue continue
} }
current := next
next = (current + 1) % len(t.queue)
batchInfo := t.queue[current]
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
@@ -490,8 +304,7 @@ func (t *TxManager) Run(ctx context.Context) {
// if it was not mined, mined and succesfull or // if it was not mined, mined and succesfull or
// mined and failed. This could be due to the // mined and failed. This could be due to the
// ethNode failure. // ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{ t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
} }
confirm, err := t.handleReceipt(ctx, batchInfo) confirm, err := t.handleReceipt(ctx, batchInfo)
@@ -499,106 +312,32 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
// Transaction was rejected // Transaction was rejected
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil { t.queue = append(t.queue[:current], t.queue[current+1:]...)
continue if len(t.queue) == 0 {
} else if err != nil { next = 0
log.Errorw("TxManager: removeBadBatchInfos", "err", err) } else {
continue next = current % len(t.queue)
} }
t.coord.SendMsg(ctx, MsgStopPipeline{ t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
continue
} }
now := time.Now()
if !t.cfg.EthNoReuseNonce && confirm == nil &&
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
continue
} else if err != nil {
// If we reach here it's because our ethNode has
// been unable to send the transaction to
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
continue
}
}
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks { if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
log.Debugw("TxManager: forgeBatch tx confirmed", log.Debugw("TxManager tx for RollupForgeBatch confirmed",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum) "batch", batchInfo.BatchNum)
t.queue.Remove(queuePosition) t.queue = append(t.queue[:current], t.queue[current+1:]...)
if len(t.queue) == 0 {
next = 0
} else {
next = current % len(t.queue)
}
} }
} }
} }
} }
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error { // nolint reason: this function will be used in the future
next := 0 //nolint:unused
for { func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
batchInfo := t.queue.At(next)
if batchInfo == nil {
break
}
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
return nil
} else if err != nil {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
next++
continue
}
confirm, err := t.handleReceipt(ctx, batchInfo)
if ctx.Err() != nil {
return nil
} else if err != nil {
// Transaction was rejected
if t.minPipelineNum <= batchInfo.PipelineNum {
t.minPipelineNum = batchInfo.PipelineNum + 1
}
t.queue.Remove(next)
continue
}
// If tx is pending but is from a cancelled pipeline, remove it
// from the queue
if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum {
t.queue.Remove(next)
continue
}
}
next++
}
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
if err != nil {
return err
}
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}
func (t *TxManager) canForgeAt(blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction, return canForge(&t.consts.Auction, &t.vars.Auction,
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot, &stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum) t.cfg.ForgerAddress, blockNum)
} }
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
}
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
}

View File

@@ -1,15 +0,0 @@
package coordinator
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddPerc(t *testing.T) {
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
}

View File

@@ -61,7 +61,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
timestamp, timestamp,
hash hash
) VALUES %s;`, ) VALUES %s;`,
blocks, blocks[:],
)) ))
} }
@@ -164,19 +164,6 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
return nil return nil
} }
// GetBatch returns the batch with the given batchNum
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
batchNum,
)
return &batch, err
}
// GetAllBatches retrieve all batches from the DB // GetAllBatches retrieve all batches from the DB
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) { func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
@@ -221,18 +208,6 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
return batchNum, tracerr.Wrap(row.Scan(&batchNum)) return batchNum, tracerr.Wrap(row.Scan(&batchNum))
} }
// GetLastBatch returns the last forged batch
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
)
return &batch, err
}
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch // GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) { func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch
@@ -273,7 +248,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;", "INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
bids, bids[:],
)) ))
} }
@@ -324,7 +299,7 @@ func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordi
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;", "INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
coordinators, coordinators[:],
)) ))
} }
@@ -340,7 +315,7 @@ func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) erro
d, d,
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+ "INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;", "instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
exitTree, exitTree[:],
)) ))
} }
@@ -443,12 +418,11 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
symbol, symbol,
decimals decimals
) VALUES %s;`, ) VALUES %s;`,
tokens, tokens[:],
)) ))
} }
// UpdateTokenValue updates the USD value of a token. Value is the price in // UpdateTokenValue updates the USD value of a token
// USD of a normalized token (token amount divided by 10^decimals)
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error { func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
// Sanitize symbol // Sanitize symbol
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ") tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
@@ -515,7 +489,7 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
bjj, bjj,
eth_addr eth_addr
) VALUES %s;`, ) VALUES %s;`,
accounts, accounts[:],
)) ))
} }
@@ -529,37 +503,6 @@ func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err) return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
} }
// AddAccountUpdates inserts accUpdates into the DB
func (hdb *HistoryDB) AddAccountUpdates(accUpdates []common.AccountUpdate) error {
return tracerr.Wrap(hdb.addAccountUpdates(hdb.db, accUpdates))
}
func (hdb *HistoryDB) addAccountUpdates(d meddler.DB, accUpdates []common.AccountUpdate) error {
if len(accUpdates) == 0 {
return nil
}
return tracerr.Wrap(db.BulkInsert(
d,
`INSERT INTO account_update (
eth_block_num,
batch_num,
idx,
nonce,
balance
) VALUES %s;`,
accUpdates,
))
}
// GetAllAccountUpdates returns all the AccountUpdate from the DB
func (hdb *HistoryDB) GetAllAccountUpdates() ([]common.AccountUpdate, error) {
var accUpdates []*common.AccountUpdate
err := meddler.QueryAll(
hdb.db, &accUpdates,
"SELECT eth_block_num, batch_num, idx, nonce, balance FROM account_update ORDER BY idx;",
)
return db.SlicePtrsToSlice(accUpdates).([]common.AccountUpdate), tracerr.Wrap(err)
}
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user, // If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx. // BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
@@ -678,7 +621,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
fee, fee,
nonce nonce
) VALUES %s;`, ) VALUES %s;`,
txs, txs[:],
)) ))
} }
@@ -813,7 +756,7 @@ func (hdb *HistoryDB) addBucketUpdates(d meddler.DB, bucketUpdates []common.Buck
block_stamp, block_stamp,
withdrawals withdrawals
) VALUES %s;`, ) VALUES %s;`,
bucketUpdates, bucketUpdates[:],
)) ))
} }
@@ -845,7 +788,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
eth_addr, eth_addr,
value_usd value_usd
) VALUES %s;`, ) VALUES %s;`,
tokenExchanges, tokenExchanges[:],
)) ))
} }
@@ -873,7 +816,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
token_addr, token_addr,
amount amount
) VALUES %s;`, ) VALUES %s;`,
escapeHatchWithdrawals, escapeHatchWithdrawals[:],
)) ))
} }
@@ -1050,11 +993,6 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Add accountBalances if it exists
if err := hdb.addAccountUpdates(txn, batch.UpdatedAccounts); err != nil {
return tracerr.Wrap(err)
}
// Set the EffectiveAmount and EffectiveDepositAmount of all the // Set the EffectiveAmount and EffectiveDepositAmount of all the
// L1UserTxs that have been forged in this batch // L1UserTxs that have been forged in this batch
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil { if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {

View File

@@ -203,10 +203,6 @@ func TestBatches(t *testing.T) {
fetchedLastBatchNum, err := historyDB.GetLastBatchNum() fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum) assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
// Test GetLastBatch
fetchedLastBatch, err := historyDB.GetLastBatch()
assert.NoError(t, err)
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
// Test GetLastL1TxsNum // Test GetLastL1TxsNum
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum() fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
assert.NoError(t, err) assert.NoError(t, err)
@@ -215,12 +211,6 @@ func TestBatches(t *testing.T) {
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum() fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum) assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
// Test GetBatch
fetchedBatch, err := historyDB.GetBatch(1)
require.NoError(t, err)
assert.Equal(t, &batches[0], fetchedBatch)
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
} }
func TestBids(t *testing.T) { func TestBids(t *testing.T) {
@@ -377,22 +367,6 @@ func TestAccounts(t *testing.T) {
accs[i].Balance = nil accs[i].Balance = nil
assert.Equal(t, accs[i], acc) assert.Equal(t, accs[i], acc)
} }
// Test AccountBalances
accUpdates := make([]common.AccountUpdate, len(accs))
for i, acc := range accs {
accUpdates[i] = common.AccountUpdate{
EthBlockNum: batches[acc.BatchNum-1].EthBlockNum,
BatchNum: acc.BatchNum,
Idx: acc.Idx,
Nonce: common.Nonce(i),
Balance: big.NewInt(int64(i)),
}
}
err = historyDB.AddAccountUpdates(accUpdates)
require.NoError(t, err)
fetchedAccBalances, err := historyDB.GetAllAccountUpdates()
require.NoError(t, err)
assert.Equal(t, accUpdates, fetchedAccBalances)
} }
func TestTxs(t *testing.T) { func TestTxs(t *testing.T) {
@@ -1211,8 +1185,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
set = append(set, til.Instruction{Typ: til.TypeNewBlock}) set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// Transfers // Transfers
const numBlocks int = 30 for x := 0; x < 6000; x++ {
for x := 0; x < numBlocks; x++ {
set = append(set, til.Instruction{ set = append(set, til.Instruction{
Typ: common.TxTypeTransfer, Typ: common.TxTypeTransfer,
TokenID: common.TokenID(0), TokenID: common.TokenID(0),
@@ -1236,20 +1209,19 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
err = tc.FillBlocksExtra(blocks, &tilCfgExtra) err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err) require.NoError(t, err)
const numBatches int = 2 + numBlocks const numBatches int = 6002
const blockNum = 4 + numBlocks const numTx int = 6003
const blockNum = 6005 - 1
// Sanity check // Sanity check
require.Equal(t, blockNum, len(blocks)) require.Equal(t, blockNum, len(blocks))
// Adding one batch per block // Adding one batch per block
// batch frequency can be chosen // batch frequency can be chosen
const blockTime time.Duration = 3600 * time.Second const frequency int = 15
now := time.Now()
require.NoError(t, err)
for i := range blocks { for i := range blocks {
blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime) blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i)))
err = historyDB.AddBlockSCData(&blocks[i]) err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -1257,10 +1229,16 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1) assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1) // Frequency is not exactly the desired one, some decimals may appear
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1) assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency))
assert.Less(t, res.BatchFrequency, float64(frequency+1))
// Truncate frecuency into an int to do an exact check
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees

View File

@@ -425,13 +425,12 @@ func (k *KVDB) MakeCheckpoint() error {
} }
// if checkpoint BatchNum already exist in disk, delete it // if checkpoint BatchNum already exist in disk, delete it
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(checkpointPath); err != nil { if err := os.RemoveAll(checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
// execute Checkpoint // execute Checkpoint
@@ -452,25 +451,12 @@ func (k *KVDB) MakeCheckpoint() error {
return nil return nil
} }
// CheckpointExists returns true if the checkpoint exists
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum // DeleteCheckpoint removes if exist the checkpoint of the given batchNum
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error { func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum)) checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum)) return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
} else if err != nil {
return tracerr.Wrap(err)
} }
return os.RemoveAll(checkpointPath) return os.RemoveAll(checkpointPath)
@@ -534,8 +520,6 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
if _, err := os.Stat(source); os.IsNotExist(err) { if _, err := os.Stat(source); os.IsNotExist(err) {
// if kvdb does not have checkpoint at batchNum, return err // if kvdb does not have checkpoint at batchNum, return err
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source)) return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
} else if err != nil {
return tracerr.Wrap(err)
} }
// By locking we allow calling MakeCheckpointFromTo from multiple // By locking we allow calling MakeCheckpointFromTo from multiple
// places at the same time for the same stateDB. This allows the // places at the same time for the same stateDB. This allows the
@@ -549,13 +533,12 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
func pebbleMakeCheckpoint(source, dest string) error { func pebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint // Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); !os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(dest); err != nil { if err := os.RemoveAll(dest); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
sto, err := pebble.NewPebbleStorage(source, false) sto, err := pebble.NewPebbleStorage(source, false)

View File

@@ -1,18 +1,12 @@
package l2db package l2db
import ( import (
"fmt"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/russross/meddler" "github.com/russross/meddler"
) )
var (
errPoolFull = fmt.Errorf("the pool is at full capacity. More transactions are not accepted currently")
)
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB // AddAccountCreationAuthAPI inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
cancel, err := l2db.apiConnCon.Acquire() cancel, err := l2db.apiConnCon.Acquire()
@@ -48,54 +42,20 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
row := l2db.db.QueryRow(
row := l2db.db.QueryRow(`SELECT "SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
($1::NUMERIC * token.usd * fee_percentage($2::NUMERIC)) / common.PoolL2TxStatePending,
(10.0 ^ token.decimals::NUMERIC) )
FROM token WHERE token.token_id = $3;`, var totalTxs uint32
tx.AmountFloat, tx.Fee, tx.TokenID) if err := row.Scan(&totalTxs); err != nil {
var feeUSD float64
if err := row.Scan(&feeUSD); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if feeUSD < l2db.minFeeUSD { if totalTxs >= l2db.maxTxs {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)", return tracerr.New(
feeUSD, l2db.minFeeUSD)) "The pool is at full capacity. More transactions are not accepted currently",
)
} }
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
// Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
if err != nil {
return err
}
valuesPart, err := meddler.Default.PlaceholdersString(tx, false)
if err != nil {
return err
}
values, err := meddler.Default.Values(tx, false)
if err != nil {
return err
}
q := fmt.Sprintf(
`INSERT INTO tx_pool (%s)
SELECT %s
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
namesPart, valuesPart,
len(values)+1, len(values)+2) //nolint:gomnd
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
res, err := l2db.db.Exec(q, values...)
if err != nil {
return tracerr.Wrap(err)
}
rowsAffected, err := res.RowsAffected()
if err != nil {
return tracerr.Wrap(err)
}
if rowsAffected == 0 {
return tracerr.Wrap(errPoolFull)
}
return nil
} }
// selectPoolTxAPI select part of queries to get PoolL2TxRead // selectPoolTxAPI select part of queries to get PoolL2TxRead

View File

@@ -25,7 +25,6 @@ type L2DB struct {
safetyPeriod common.BatchNum safetyPeriod common.BatchNum
ttl time.Duration ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
@@ -36,7 +35,6 @@ func NewL2DB(
db *sqlx.DB, db *sqlx.DB,
safetyPeriod common.BatchNum, safetyPeriod common.BatchNum,
maxTxs uint32, maxTxs uint32,
minFeeUSD float64,
TTL time.Duration, TTL time.Duration,
apiConnCon *db.APIConnectionController, apiConnCon *db.APIConnectionController,
) *L2DB { ) *L2DB {
@@ -45,7 +43,6 @@ func NewL2DB(
safetyPeriod: safetyPeriod, safetyPeriod: safetyPeriod,
ttl: TTL, ttl: TTL,
maxTxs: maxTxs, maxTxs: maxTxs,
minFeeUSD: minFeeUSD,
apiConnCon: apiConnCon, apiConnCon: apiConnCon,
} }
} }
@@ -76,6 +73,24 @@ func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.Accoun
)) ))
} }
// AddTx inserts a tx to the pool
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
common.PoolL2TxStatePending,
)
var totalTxs uint32
if err := row.Scan(&totalTxs); err != nil {
return tracerr.Wrap(err)
}
if totalTxs >= l2db.maxTxs {
return tracerr.New(
"The pool is at full capacity. More transactions are not accepted currently",
)
}
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
}
// UpdateTxsInfo updates the parameter Info of the pool transactions // UpdateTxsInfo updates the parameter Info of the pool transactions
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error { func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
if len(txs) == 0 { if len(txs) == 0 {
@@ -107,8 +122,9 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
return nil return nil
} }
// NewPoolL2TxWriteFromPoolL2Tx creates a new PoolL2TxWrite from a PoolL2Tx // AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite { // but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
// transform tx from *common.PoolL2Tx to PoolL2TxWrite // transform tx from *common.PoolL2Tx to PoolL2TxWrite
insertTx := &PoolL2TxWrite{ insertTx := &PoolL2TxWrite{
TxID: tx.TxID, TxID: tx.TxID,
@@ -150,13 +166,6 @@ func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite {
f := new(big.Float).SetInt(tx.Amount) f := new(big.Float).SetInt(tx.Amount)
amountF, _ := f.Float64() amountF, _ := f.Float64()
insertTx.AmountFloat = amountF insertTx.AmountFloat = amountF
return insertTx
}
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
// but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
insertTx := NewPoolL2TxWriteFromPoolL2Tx(tx)
// insert tx // insert tx
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx)) return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
} }
@@ -167,8 +176,7 @@ tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx, tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount, rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type, tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
(fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f) / fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f AS fee_usd, token.usd_update
(10.0 ^ token.decimals::NUMERIC) AS fee_usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id ` FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// GetTx return the specified Tx in common.PoolL2Tx format // GetTx return the specified Tx in common.PoolL2Tx format
@@ -346,14 +354,3 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
) )
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// PurgeByExternalDelete deletes all pending transactions marked with true in
// the `external_delete` column. An external process can set this column to
// true to instruct the coordinator to delete the tx when possible.
func (l2db *L2DB) PurgeByExternalDelete() error {
_, err := l2db.db.Exec(
`DELETE from tx_pool WHERE (external_delete = true AND state = $1);`,
common.PoolL2TxStatePending,
)
return tracerr.Wrap(err)
}

View File

@@ -1,8 +1,8 @@
package l2db package l2db
import ( import (
"database/sql" "math"
"fmt" "math/big"
"os" "os"
"testing" "testing"
"time" "time"
@@ -20,14 +20,12 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var decimals = uint64(3)
var tokenValue = 1.0 // The price update gives a value of 1.0 USD to the token
var l2DB *L2DB var l2DB *L2DB
var l2DBWithACC *L2DB var l2DBWithACC *L2DB
var historyDB *historydb.HistoryDB var historyDB *historydb.HistoryDB
var tc *til.Context var tc *til.Context
var tokens map[common.TokenID]historydb.TokenWithUSD var tokens map[common.TokenID]historydb.TokenWithUSD
var tokensValue map[common.TokenID]float64
var accs map[common.Idx]common.Account var accs map[common.Idx]common.Account
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@@ -37,9 +35,9 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
l2DB = NewL2DB(db, 10, 1000, 0.0, 24*time.Hour, nil) l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, nil) historyDB = historydb.NewHistoryDB(db, nil)
// Run tests // Run tests
@@ -60,10 +58,10 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
AddToken(1) AddToken(1)
AddToken(2) AddToken(2)
CreateAccountDeposit(1) A: 20000 CreateAccountDeposit(1) A: 2000
CreateAccountDeposit(2) A: 20000 CreateAccountDeposit(2) A: 2000
CreateAccountDeposit(1) B: 10000 CreateAccountDeposit(1) B: 1000
CreateAccountDeposit(2) B: 10000 CreateAccountDeposit(2) B: 1000
> batchL1 > batchL1
> batchL1 > batchL1
> block > block
@@ -84,23 +82,15 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for i := range blocks {
block := &blocks[i]
for j := range block.Rollup.AddedTokens {
token := &block.Rollup.AddedTokens[j]
token.Name = fmt.Sprintf("Token %d", token.TokenID)
token.Symbol = fmt.Sprintf("TK%d", token.TokenID)
token.Decimals = decimals
}
}
tokens = make(map[common.TokenID]historydb.TokenWithUSD) tokens = make(map[common.TokenID]historydb.TokenWithUSD)
// tokensValue = make(map[common.TokenID]float64) tokensValue = make(map[common.TokenID]float64)
accs = make(map[common.Idx]common.Account) accs = make(map[common.Idx]common.Account)
value := 5 * 5.389329
now := time.Now().UTC() now := time.Now().UTC()
// Add all blocks except for the last one // Add all blocks except for the last one
for i := range blocks[:len(blocks)-1] { for i := range blocks[:len(blocks)-1] {
if err := historyDB.AddBlockSCData(&blocks[i]); err != nil { err = historyDB.AddBlockSCData(&blocks[i])
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for _, batch := range blocks[i].Rollup.Batches { for _, batch := range blocks[i].Rollup.Batches {
@@ -116,38 +106,39 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
Name: token.Name, Name: token.Name,
Symbol: token.Symbol, Symbol: token.Symbol,
Decimals: token.Decimals, Decimals: token.Decimals,
USD: &tokenValue,
USDUpdate: &now,
} }
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
readToken.USDUpdate = &now
readToken.USD = &value
tokens[token.TokenID] = readToken tokens[token.TokenID] = readToken
// Set value to the tokens }
err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD) // Set value to the tokens (tokens have no symbol)
tokenSymbol := ""
err := historyDB.UpdateTokenValue(tokenSymbol, value)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
}
return nil return nil
} }
func generatePoolL2Txs() ([]common.PoolL2Tx, error) { func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
// Fee = 126 corresponds to ~10%
setPool := ` setPool := `
Type: PoolL2 Type: PoolL2
PoolTransfer(1) A-B: 6000 (126) PoolTransfer(1) A-B: 6 (4)
PoolTransfer(2) A-B: 3000 (126) PoolTransfer(2) A-B: 3 (1)
PoolTransfer(1) B-A: 5000 (126) PoolTransfer(1) B-A: 5 (2)
PoolTransfer(2) B-A: 10000 (126) PoolTransfer(2) B-A: 10 (3)
PoolTransfer(1) A-B: 7000 (126) PoolTransfer(1) A-B: 7 (2)
PoolTransfer(2) A-B: 2000 (126) PoolTransfer(2) A-B: 2 (1)
PoolTransfer(1) B-A: 8000 (126) PoolTransfer(1) B-A: 8 (2)
PoolTransfer(2) B-A: 1000 (126) PoolTransfer(2) B-A: 1 (1)
PoolTransfer(1) A-B: 3000 (126) PoolTransfer(1) A-B: 3 (1)
PoolTransferToEthAddr(2) B-A: 5000 (126) PoolTransferToEthAddr(2) B-A: 5 (2)
PoolTransferToBJJ(2) B-A: 5000 (126) PoolTransferToBJJ(2) B-A: 5 (2)
PoolExit(1) A: 5000 (126) PoolExit(1) A: 5 (2)
PoolExit(2) B: 3000 (126) PoolExit(2) B: 3 (1)
` `
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool) poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
if err != nil { if err != nil {
@@ -162,74 +153,25 @@ func TestAddTxTest(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err) assert.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx) assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone() nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone) assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset) assert.Equal(t, 0, offset)
} }
} }
func TestAddTxAPI(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
oldMaxTxs := l2DBWithACC.maxTxs
// set max number of pending txs that can be kept in the pool to 5
l2DBWithACC.maxTxs = 5
poolL2Txs, err := generatePoolL2Txs()
txs := make([]*PoolL2TxWrite, len(poolL2Txs))
for i := range poolL2Txs {
txs[i] = NewPoolL2TxWriteFromPoolL2Tx(&poolL2Txs[i])
}
require.NoError(t, err)
require.GreaterOrEqual(t, len(poolL2Txs), 8)
for i := range txs[:5] {
err := l2DBWithACC.AddTxAPI(txs[i])
require.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset)
}
err = l2DBWithACC.AddTxAPI(txs[5])
assert.Equal(t, errPoolFull, tracerr.Unwrap(err))
// reset maxTxs to original value
l2DBWithACC.maxTxs = oldMaxTxs
// set minFeeUSD to a high value than the tx feeUSD to test the error
// of inserting a tx with lower than min fee
oldMinFeeUSD := l2DBWithACC.minFeeUSD
tx := txs[5]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount, decimals, tokenValue)
// set minFeeUSD higher than the tx fee to trigger the error
l2DBWithACC.minFeeUSD = feeAmountUSD + 1
err = l2DBWithACC.AddTxAPI(tx)
require.Error(t, err)
assert.Regexp(t, "tx.feeUSD (.*) < minFeeUSD (.*)", err.Error())
// reset minFeeUSD to original value
l2DBWithACC.minFeeUSD = oldMinFeeUSD
}
func TestUpdateTxsInfo(t *testing.T) { func TestUpdateTxsInfo(t *testing.T) {
err := prepareHistoryDB(historyDB) err := prepareHistoryDB(historyDB)
if err != nil { if err != nil {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) require.NoError(t, err)
@@ -243,7 +185,7 @@ func TestUpdateTxsInfo(t *testing.T) {
for i := range poolL2Txs { for i := range poolL2Txs {
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "test", fetchedTx.Info) assert.Equal(t, "test", fetchedTx.Info)
} }
} }
@@ -261,8 +203,9 @@ func assertTx(t *testing.T, expected, actual *common.PoolL2Tx) {
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix()) assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
// Set expected fee // Set expected fee
amountUSD := common.TokensToUSD(expected.Amount, token.Decimals, *token.USD) f := new(big.Float).SetInt(expected.Amount)
expected.AbsoluteFee = amountUSD * expected.Fee.Percentage() amountF, _ := f.Float64()
expected.AbsoluteFee = *token.USD * amountF * expected.Fee.Percentage()
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee) test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
} }
assert.Equal(t, expected, actual) assert.Equal(t, expected, actual)
@@ -287,28 +230,19 @@ func TestGetPending(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var pendingTxs []*common.PoolL2Tx var pendingTxs []*common.PoolL2Tx
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
pendingTxs = append(pendingTxs, &poolL2Txs[i]) pendingTxs = append(pendingTxs, &poolL2Txs[i])
} }
fetchedTxs, err := l2DB.GetPendingTxs() fetchedTxs, err := l2DB.GetPendingTxs()
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs)) assert.Equal(t, len(pendingTxs), len(fetchedTxs))
for i := range fetchedTxs { for i := range fetchedTxs {
assertTx(t, pendingTxs[i], &fetchedTxs[i]) assertTx(t, pendingTxs[i], &fetchedTxs[i])
} }
// Check AbsoluteFee amount
for i := range fetchedTxs {
tx := &fetchedTxs[i]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount,
tokens[tx.TokenID].Decimals, *tokens[tx.TokenID].USD)
assert.InEpsilon(t, feeAmountUSD, tx.AbsoluteFee, 0.01)
}
} }
func TestStartForging(t *testing.T) { func TestStartForging(t *testing.T) {
@@ -319,13 +253,13 @@ func TestStartForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -333,11 +267,11 @@ func TestStartForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range startForgingTxIDs { for _, id := range startForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -351,13 +285,13 @@ func TestDoneForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -365,7 +299,7 @@ func TestDoneForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -377,12 +311,12 @@ func TestDoneForging(t *testing.T) {
} }
// Done forging txs // Done forging txs
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range doneForgingTxIDs { for _, id := range doneForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -396,13 +330,13 @@ func TestInvalidate(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
var invalidTxIDs []common.TxID var invalidTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 { if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
randomizer++ randomizer++
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
@@ -410,11 +344,11 @@ func TestInvalidate(t *testing.T) {
} }
// Invalidate txs // Invalidate txs
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -428,7 +362,7 @@ func TestInvalidateOldNonces(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
// Update Accounts currentNonce // Update Accounts currentNonce
var updateAccounts []common.IdxNonce var updateAccounts []common.IdxNonce
var currentNonce = common.Nonce(1) var currentNonce = common.Nonce(1)
@@ -445,13 +379,13 @@ func TestInvalidateOldNonces(t *testing.T) {
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
} }
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
} }
// sanity check // sanity check
require.Greater(t, len(invalidTxIDs), 0) require.Greater(t, len(invalidTxIDs), 0)
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum) err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
@@ -473,7 +407,7 @@ func TestReorg(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -484,7 +418,7 @@ func TestReorg(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -496,7 +430,7 @@ func TestReorg(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -521,22 +455,22 @@ func TestReorg(t *testing.T) {
// Invalidate txs BEFORE reorgBatch --> nonReorg // Invalidate txs BEFORE reorgBatch --> nonReorg
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch) err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
// Done forging txs in reorgBatch --> Reorg // Done forging txs in reorgBatch --> Reorg
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch) err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
require.NoError(t, err) assert.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -553,7 +487,7 @@ func TestReorg2(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -564,7 +498,7 @@ func TestReorg2(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) assert.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -576,7 +510,7 @@ func TestReorg2(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -598,22 +532,22 @@ func TestReorg2(t *testing.T) {
} }
// Done forging txs BEFORE reorgBatch --> nonReorg // Done forging txs BEFORE reorgBatch --> nonReorg
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch) err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
// Invalidate txs in reorgBatch --> Reorg // Invalidate txs in reorgBatch --> Reorg
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch) err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
require.NoError(t, err) assert.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
require.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -629,7 +563,7 @@ func TestPurge(t *testing.T) {
var poolL2Tx []common.PoolL2Tx var poolL2Tx []common.PoolL2Tx
for i := 0; i < generateTx; i++ { for i := 0; i < generateTx; i++ {
poolL2TxAux, err := generatePoolL2Txs() poolL2TxAux, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
poolL2Tx = append(poolL2Tx, poolL2TxAux...) poolL2Tx = append(poolL2Tx, poolL2TxAux...)
} }
@@ -656,7 +590,7 @@ func TestPurge(t *testing.T) {
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID) deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
} }
err := l2DB.AddTxTest(&tx) err := l2DB.AddTxTest(&tx)
require.NoError(t, err) assert.NoError(t, err)
} }
// Set batchNum keeped txs // Set batchNum keeped txs
for i := range keepedIDs { for i := range keepedIDs {
@@ -664,17 +598,17 @@ func TestPurge(t *testing.T) {
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;", "UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
safeBatchNum, keepedIDs[i], safeBatchNum, keepedIDs[i],
) )
require.NoError(t, err) assert.NoError(t, err)
} }
// Start forging txs and set batchNum // Start forging txs and set batchNum
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Done forging txs and set batchNum // Done forging txs and set batchNum
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Invalidate txs and set batchNum // Invalidate txs and set batchNum
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Update timestamp of afterTTL txs // Update timestamp of afterTTL txs
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0) deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
for _, id := range afterTTLIDs { for _, id := range afterTTLIDs {
@@ -683,12 +617,12 @@ func TestPurge(t *testing.T) {
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;", "UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
deleteTimestamp, common.PoolL2TxStatePending, id, deleteTimestamp, common.PoolL2TxStatePending, id,
) )
require.NoError(t, err) assert.NoError(t, err)
} }
// Purge txs // Purge txs
err = l2DB.Purge(safeBatchNum) err = l2DB.Purge(safeBatchNum)
require.NoError(t, err) assert.NoError(t, err)
// Check results // Check results
for _, id := range deletedIDs { for _, id := range deletedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
@@ -696,7 +630,7 @@ func TestPurge(t *testing.T) {
} }
for _, id := range keepedIDs { for _, id := range keepedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
require.NoError(t, err) assert.NoError(t, err)
} }
} }
@@ -710,10 +644,10 @@ func TestAuth(t *testing.T) {
for i := 0; i < len(auths); i++ { for i := 0; i < len(auths); i++ {
// Add to the DB // Add to the DB
err := l2DB.AddAccountCreationAuth(auths[i]) err := l2DB.AddAccountCreationAuth(auths[i])
require.NoError(t, err) assert.NoError(t, err)
// Fetch from DB // Fetch from DB
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr) auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
require.NoError(t, err) assert.NoError(t, err)
// Check fetched vs generated // Check fetched vs generated
assert.Equal(t, auths[i].EthAddr, auth.EthAddr) assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
assert.Equal(t, auths[i].BJJ, auth.BJJ) assert.Equal(t, auths[i].BJJ, auth.BJJ)
@@ -731,7 +665,7 @@ func TestAddGet(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
require.NoError(t, err) assert.NoError(t, err)
// We will work with only 3 txs // We will work with only 3 txs
require.GreaterOrEqual(t, len(poolL2Txs), 3) require.GreaterOrEqual(t, len(poolL2Txs), 3)
@@ -767,56 +701,3 @@ func TestAddGet(t *testing.T) {
assert.Equal(t, txs[i], *dbTx) assert.Equal(t, txs[i], *dbTx)
} }
} }
func TestPurgeByExternalDelete(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
txs, err := generatePoolL2Txs()
require.NoError(t, err)
// We will work with 8 txs
require.GreaterOrEqual(t, len(txs), 8)
txs = txs[:8]
for i := range txs {
require.NoError(t, l2DB.AddTxTest(&txs[i]))
}
// We will recreate this scenario:
// tx index, status , external_delete
// 0 , pending, false
// 1 , pending, false
// 2 , pending, true // will be deleted
// 3 , pending, true // will be deleted
// 4 , fging , false
// 5 , fging , false
// 6 , fging , true
// 7 , fging , true
require.NoError(t, l2DB.StartForging(
[]common.TxID{txs[4].TxID, txs[5].TxID, txs[6].TxID, txs[7].TxID},
1))
_, err = l2DB.db.Exec(
`UPDATE tx_pool SET external_delete = true WHERE
tx_id IN ($1, $2, $3, $4)
;`,
txs[2].TxID, txs[3].TxID, txs[6].TxID, txs[7].TxID,
)
require.NoError(t, err)
require.NoError(t, l2DB.PurgeByExternalDelete())
// Query txs that are have been not deleted
for _, i := range []int{0, 1, 4, 5, 6, 7} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.NoError(t, err)
}
// Query txs that have been deleted
for _, i := range []int{2, 3} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
}
}

View File

@@ -34,7 +34,6 @@ type PoolL2TxWrite struct {
RqFee *common.FeeSelector `meddler:"rq_fee"` RqFee *common.FeeSelector `meddler:"rq_fee"`
RqNonce *common.Nonce `meddler:"rq_nonce"` RqNonce *common.Nonce `meddler:"rq_nonce"`
Type common.TxType `meddler:"tx_type"` Type common.TxType `meddler:"tx_type"`
ClientIP string `meddler:"client_ip"`
} }
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API // PoolTxAPI represents a L2 Tx pool with extra metadata used by the API

View File

@@ -47,7 +47,7 @@ CREATE TABLE token (
name VARCHAR(20) NOT NULL, name VARCHAR(20) NOT NULL,
symbol VARCHAR(10) NOT NULL, symbol VARCHAR(10) NOT NULL,
decimals INT NOT NULL, decimals INT NOT NULL,
usd NUMERIC, -- value of a normalized token (divided by 10^decimals) usd NUMERIC,
usd_update TIMESTAMP WITHOUT TIME ZONE usd_update TIMESTAMP WITHOUT TIME ZONE
); );
@@ -100,15 +100,6 @@ CREATE TABLE account (
eth_addr BYTEA NOT NULL eth_addr BYTEA NOT NULL
); );
CREATE TABLE account_update (
item_id SERIAL,
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
nonce BIGINT NOT NULL,
balance BYTEA NOT NULL
);
CREATE TABLE exit_tree ( CREATE TABLE exit_tree (
item_id SERIAL PRIMARY KEY, item_id SERIAL PRIMARY KEY,
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE, batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
@@ -627,9 +618,7 @@ CREATE TABLE tx_pool (
rq_amount BYTEA, rq_amount BYTEA,
rq_fee SMALLINT, rq_fee SMALLINT,
rq_nonce BIGINT, rq_nonce BIGINT,
tx_type VARCHAR(40) NOT NULL, tx_type VARCHAR(40) NOT NULL
client_ip VARCHAR,
external_delete BOOLEAN NOT NULL DEFAULT false
); );
-- +migrate StatementBegin -- +migrate StatementBegin
@@ -685,7 +674,6 @@ DROP TABLE token_exchange;
DROP TABLE wdelayer_vars; DROP TABLE wdelayer_vars;
DROP TABLE tx; DROP TABLE tx;
DROP TABLE exit_tree; DROP TABLE exit_tree;
DROP TABLE account_update;
DROP TABLE account; DROP TABLE account;
DROP TABLE token; DROP TABLE token;
DROP TABLE bid; DROP TABLE bid;

View File

@@ -498,17 +498,11 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
}, nil }, nil
} }
// CheckpointExists returns true if the checkpoint exists
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it // Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. // gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints. // If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
if fromSynchronizer { if fromSynchronizer {
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil { if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }

View File

@@ -13,9 +13,9 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate" migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler" "github.com/russross/meddler"
"golang.org/x/sync/semaphore"
) )
var migrations *migrate.PackrMigrationSource var migrations *migrate.PackrMigrationSource
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
// APIConnectionController is used to limit the SQL open connections used by the API // APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct { type APIConnectionController struct {
smphr *semaphore.Weighted smphr semaphore.Semaphore
timeout time.Duration timeout time.Duration
} }
// NewAPICnnectionController initialize APIConnectionController // NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController { func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{ return &APIConnectionController{
smphr: semaphore.NewWeighted(int64(maxConnections)), smphr: semaphore.New(maxConnections),
timeout: timeout, timeout: timeout,
} }
} }

View File

@@ -324,6 +324,5 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction,
Value: tx.Value(), Value: tx.Value(),
Data: tx.Data(), Data: tx.Data(),
} }
result, err := c.client.CallContract(ctx, msg, blockNum) return c.client.CallContract(ctx, msg, blockNum)
return result, tracerr.Wrap(err)
} }

View File

@@ -316,7 +316,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
} }
consts, err := c.RollupConstants() consts, err := c.RollupConstants()
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err)) return nil, tracerr.Wrap(err)
} }
c.consts = consts c.consts = consts
return c, nil return c, nil
@@ -327,7 +327,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
if auth == nil { if auth == nil {
auth, err = c.client.NewAuth() auth, err = c.client.NewAuth()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, err
} }
auth.GasLimit = 1000000 auth.GasLimit = 1000000
} }
@@ -393,7 +393,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch, l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
args.ProofA, args.ProofB, args.ProofC) args.ProofA, args.ProofB, args.ProofC)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Hermez.ForgeBatch: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Failed Hermez.ForgeBatch: %w", err))
} }
return tx, nil return tx, nil
} }
@@ -939,7 +939,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
FeeIdxCoordinator: []common.Idx{}, FeeIdxCoordinator: []common.Idx{},
} }
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1) lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes

2
go.mod
View File

@@ -17,6 +17,7 @@ require (
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0 github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0 github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure v1.0.0
@@ -28,6 +29,5 @@ require (
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
gopkg.in/go-playground/validator.v9 v9.29.1 gopkg.in/go-playground/validator.v9 v9.29.1
) )

13
go.sum
View File

@@ -24,8 +24,6 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
@@ -86,8 +84,6 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
@@ -173,8 +169,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -421,6 +415,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@@ -602,8 +599,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -622,8 +617,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=

View File

@@ -67,11 +67,6 @@ func Init(levelStr string, outputs []string) {
func sprintStackTrace(st []tracerr.Frame) string { func sprintStackTrace(st []tracerr.Frame) string {
builder := strings.Builder{} builder := strings.Builder{}
// Skip deepest frame because it belongs to the go runtime and we don't
// care about it.
if len(st) > 0 {
st = st[:len(st)-1]
}
for _, f := range st { for _, f := range st {
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func)) builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
} }

View File

@@ -2,7 +2,6 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"sync" "sync"
@@ -103,8 +102,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
var keyStore *ethKeystore.KeyStore var keyStore *ethKeystore.KeyStore
if mode == ModeCoordinator { if mode == ModeCoordinator {
ethCfg = eth.EthereumConfig{ ethCfg = eth.EthereumConfig{
CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit, CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit,
GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv, GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv,
} }
scryptN := ethKeystore.StandardScryptN scryptN := ethKeystore.StandardScryptN
@@ -184,7 +183,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB, synchronizer.Config{ sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB, synchronizer.Config{
StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration, StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration,
StoreAccountUpdates: cfg.Synchronizer.StoreAccountUpdates,
ChainID: chainIDU16, ChainID: chainIDU16,
}) })
if err != nil { if err != nil {
@@ -205,7 +203,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
db, db,
cfg.Coordinator.L2DB.SafetyPeriod, cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration, cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon, apiConnCon,
) )
@@ -301,16 +298,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks, ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc, L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration, ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration, SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts, EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration, TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath, DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
@@ -497,15 +487,11 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
if stats.Synced() { if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo( if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum), common.BatchNum(stats.Eth.LastBatch),
stats.Sync.Auction.CurrentSlot.SlotNum, stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil { ); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err) log.Errorw("API.UpdateNetworkInfo", "err", err)
} }
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
} }
} }
} }
@@ -587,16 +573,10 @@ func (n *Node) StartSynchronizer() {
if n.ctx.Err() != nil { if n.ctx.Err() != nil {
continue continue
} }
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
log.Warnw("Synchronizer.Sync", "err", err)
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
log.Warnw("Synchronizer.Sync", "err", err)
} else {
log.Errorw("Synchronizer.Sync", "err", err) log.Errorw("Synchronizer.Sync", "err", err)
} }
} }
} }
}
}() }()
n.wg.Add(1) n.wg.Add(1)

View File

@@ -18,19 +18,6 @@ import (
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
const (
// errStrUnknownBlock is the string returned by geth when querying an
// unknown block
errStrUnknownBlock = "unknown block"
)
var (
// ErrUnknownBlock is the error returned by the Synchronizer when a
// block is queried by hash but the ethereum node doesn't find it due
// to it being discarded from a reorg.
ErrUnknownBlock = fmt.Errorf("unknown block")
)
// Stats of the syncrhonizer // Stats of the syncrhonizer
type Stats struct { type Stats struct {
Eth struct { Eth struct {
@@ -38,12 +25,12 @@ type Stats struct {
Updated time.Time Updated time.Time
FirstBlockNum int64 FirstBlockNum int64
LastBlock common.Block LastBlock common.Block
LastBatchNum int64 LastBatch int64
} }
Sync struct { Sync struct {
Updated time.Time Updated time.Time
LastBlock common.Block LastBlock common.Block
LastBatch common.Batch LastBatch int64
// LastL1BatchBlock is the last ethereum block in which an // LastL1BatchBlock is the last ethereum block in which an
// l1Batch was forged // l1Batch was forged
LastL1BatchBlock int64 LastL1BatchBlock int64
@@ -90,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
} }
// UpdateSync updates the synchronizer stats // UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch, func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) { lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
now := time.Now() now := time.Now()
s.rw.Lock() s.rw.Lock()
s.Sync.LastBlock = *lastBlock s.Sync.LastBlock = *lastBlock
if lastBatch != nil { if lastBatch != nil {
s.Sync.LastBatch = *lastBatch s.Sync.LastBatch = int64(*lastBatch)
} }
if lastL1BatchBlock != nil { if lastL1BatchBlock != nil {
s.Sync.LastL1BatchBlock = *lastL1BatchBlock s.Sync.LastL1BatchBlock = *lastL1BatchBlock
@@ -118,16 +105,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1) lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) return tracerr.Wrap(err)
} }
lastBatchNum, err := ethClient.RollupLastForgedBatch() lastBatch, err := ethClient.RollupLastForgedBatch()
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err)) return tracerr.Wrap(err)
} }
s.rw.Lock() s.rw.Lock()
s.Eth.Updated = now s.Eth.Updated = now
s.Eth.LastBlock = *lastBlock s.Eth.LastBlock = *lastBlock
s.Eth.LastBatchNum = lastBatchNum s.Eth.LastBatch = lastBatch
s.rw.Unlock() s.rw.Unlock()
return nil return nil
} }
@@ -152,10 +139,6 @@ func (s *StatsHolder) CopyStats() *Stats {
sCopy.Sync.Auction.NextSlot.DefaultSlotBid = sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid) common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
} }
if s.Sync.LastBatch.StateRoot != nil {
sCopy.Sync.LastBatch.StateRoot =
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
}
s.rw.RUnlock() s.rw.RUnlock()
return &sCopy return &sCopy
} }
@@ -169,9 +152,9 @@ func (s *StatsHolder) blocksPerc() float64 {
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1)) float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
} }
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 { func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
return float64(batchNum) * 100.0 / return float64(batchNum) * 100.0 /
float64(s.Eth.LastBatchNum) float64(s.Eth.LastBatch)
} }
// StartBlockNums sets the first block used to start tracking the smart // StartBlockNums sets the first block used to start tracking the smart
@@ -207,7 +190,6 @@ type SCConsts struct {
// Config is the Synchronizer configuration // Config is the Synchronizer configuration
type Config struct { type Config struct {
StatsRefreshPeriod time.Duration StatsRefreshPeriod time.Duration
StoreAccountUpdates bool
ChainID uint16 ChainID uint16
} }
@@ -347,25 +329,23 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil return nil
} }
// updateCurrentSlot updates the slot with information of the current slot. // firstBatchBlockNum is the blockNum of first batch in that block, if any
// The information abouth which coordinator is allowed to forge is only updated func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
// when we are Synced. slot := common.Slot{
// hasBatch is true when the last synced block contained at least one batch. SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error { ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
}
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset { if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum) dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
hasBatch = false firstBatchBlockNum = nil
} else { } else {
hasBatch = true firstBatchBlockNum = &dbFirstBatchBlockNum
firstBatchBlockNum = dbFirstBatchBlockNum
} }
slot.ForgerCommitment = false slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum { } else if slotNum > slot.SlotNum {
@@ -376,11 +356,11 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil { if err := s.setSlotCoordinator(&slot); err != nil {
return tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if hasBatch && if firstBatchBlockNum != nil &&
s.consts.Auction.RelativeBlock(firstBatchBlockNum) < s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) { int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true slot.ForgerCommitment = true
} }
@@ -389,61 +369,57 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err)) return nil, tracerr.Wrap(err)
} }
if !canForge { if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return nil return &slot, nil
} }
// updateNextSlot updates the slot with information of the next slot. func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1 slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot.SlotNum = slotNum slot := common.Slot{
slot.ForgerCommitment = false SlotNum: slotNum,
ForgerCommitment: false,
}
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil { if err := s.setSlotCoordinator(&slot); err != nil {
return tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// TODO: Remove this SANITY CHECK once this code is tested enough // TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err)) return nil, tracerr.Wrap(err)
} }
if !canForge { if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return nil return &slot, nil
} }
// updateCurrentNextSlotIfSync updates the current and next slot. Information func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
// about forger address that is allowed to forge is only updated if we are current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
// Synced. if err != nil {
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if err := s.updateNextSlot(&next); err != nil { next, err := s.getNextSlot()
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
s.stats.UpdateCurrentNextSlot(&current, &next) s.stats.UpdateCurrentNextSlot(current, next)
return nil return nil
} }
@@ -482,9 +458,9 @@ func (s *Synchronizer) init() error {
"ethLastBlock", s.stats.Eth.LastBlock, "ethLastBlock", s.stats.Eth.LastBlock,
) )
log.Infow("Sync init batch", log.Infow("Sync init batch",
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum, "syncLastBatch", s.stats.Sync.LastBatch,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatch,
) )
return nil return nil
} }
@@ -545,7 +521,7 @@ func (s *Synchronizer) Sync2(ctx context.Context,
if tracerr.Unwrap(err) == ethereum.NotFound { if tracerr.Unwrap(err) == ethereum.NotFound {
return nil, nil, nil return nil, nil, nil
} else if err != nil { } else if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
@@ -651,29 +627,29 @@ func (s *Synchronizer) Sync2(ctx context.Context,
} }
} }
s.stats.UpdateSync(ethBlock, s.stats.UpdateSync(ethBlock,
&rollupData.Batches[batchesLen-1].Batch, &rollupData.Batches[batchesLen-1].Batch.BatchNum,
lastL1BatchBlock, lastForgeL1TxsNum) lastL1BatchBlock, lastForgeL1TxsNum)
} }
hasBatch := false var firstBatchBlockNum *int64
if len(rollupData.Batches) > 0 { if len(rollupData.Batches) > 0 {
hasBatch = true firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
} }
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil { if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum,
)
}
log.Debugw("Synced block", log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num, "syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(), "syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlockNum", s.stats.Eth.LastBlock.Num, "ethLastBlockNum", s.stats.Eth.LastBlock.Num,
) )
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
"ethLastBatch", s.stats.Eth.LastBatch,
)
}
return blockData, nil, nil return blockData, nil, nil
} }
@@ -724,15 +700,15 @@ func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) { consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit() auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit() wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
@@ -777,15 +753,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer s.vars.WDelayer = *wDelayer
} }
batch, err := s.historyDB.GetLastBatch() batchNum, err := s.historyDB.GetLastBatchNum()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
} }
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
batch = &common.Batch{} batchNum = 0
} }
err = s.stateDB.Reset(batch.BatchNum) err = s.stateDB.Reset(batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err)) return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
} }
@@ -807,9 +783,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n lastForgeL1TxsNum = &n
} }
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum) s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil { if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -825,10 +801,8 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// Get rollup events in the block, and make sure the block hash matches // Get rollup events in the block, and make sure the block hash matches
// the expected one. // the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash) rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock) return nil, tracerr.Wrap(err)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if rollupEvents == nil { if rollupEvents == nil {
@@ -945,15 +919,9 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if s.stateDB.CurrentBatch() != batchNum { if s.stateDB.CurrentBatch() != batchNum {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+ return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)",
"evtForgeBatch.BatchNum = (%v)",
s.stateDB.CurrentBatch(), batchNum)) s.stateDB.CurrentBatch(), batchNum))
} }
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
"forgeBatchArgs.NewStRoot (%v)",
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
}
// Transform processed PoolL2 txs to L2 and store in BatchData // Transform processed PoolL2 txs to L2 and store in BatchData
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
@@ -994,21 +962,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
} }
batchData.CreatedAccounts = processTxsOut.CreatedAccounts batchData.CreatedAccounts = processTxsOut.CreatedAccounts
if s.cfg.StoreAccountUpdates {
batchData.UpdatedAccounts = make([]common.AccountUpdate, 0,
len(processTxsOut.UpdatedAccounts))
for _, acc := range processTxsOut.UpdatedAccounts {
batchData.UpdatedAccounts = append(batchData.UpdatedAccounts,
common.AccountUpdate{
EthBlockNum: blockNum,
BatchNum: batchNum,
Idx: acc.Idx,
Nonce: acc.Nonce,
Balance: acc.Balance,
})
}
}
slotNum := int64(0) slotNum := int64(0)
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum { if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) / slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
@@ -1152,10 +1105,8 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
// Get auction events in the block // Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash) auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock) return nil, tracerr.Wrap(err)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if auctionEvents == nil { if auctionEvents == nil {
@@ -1251,10 +1202,8 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
// Get wDelayer events in the block // Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash) wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock { if err != nil {
return nil, tracerr.Wrap(ErrUnknownBlock) return nil, tracerr.Wrap(err)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if wDelayerEvents == nil { if wDelayerEvents == nil {

View File

@@ -17,6 +17,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
@@ -171,8 +172,6 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
*exit = syncBatch.ExitTree[j] *exit = syncBatch.ExitTree[j]
} }
assert.Equal(t, batch.Batch, syncBatch.Batch) assert.Equal(t, batch.Batch, syncBatch.Batch)
// Ignore updated accounts
syncBatch.UpdatedAccounts = nil
assert.Equal(t, batch, syncBatch) assert.Equal(t, batch, syncBatch)
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
@@ -322,14 +321,6 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
return stateDB, historyDB return stateDB, historyDB
} }
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestSyncGeneral(t *testing.T) { func TestSyncGeneral(t *testing.T) {
// //
// Setup // Setup
@@ -347,8 +338,8 @@ func TestSyncGeneral(t *testing.T) {
// Create Synchronizer // Create Synchronizer
s, err := NewSynchronizer(client, historyDB, stateDB, Config{ s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
StoreAccountUpdates: true,
}) })
log.Error(err)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@@ -443,22 +434,12 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs)) require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
// blocks 1 (blockNum=3) // blocks 1 (blockNum=3)
i = 1 i = 1
require.Equal(t, 3, int(blocks[i].Block.Num)) require.Equal(t, 3, int(blocks[i].Block.Num))
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs)) require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
// Generate extra required data // Generate extra required data
ethAddTokens(blocks, client) ethAddTokens(blocks, client)
@@ -633,12 +614,6 @@ func TestSyncGeneral(t *testing.T) {
blocks, err = tc.GenerateBlocks(set2) blocks, err = tc.GenerateBlocks(set2)
require.NoError(t, err) require.NoError(t, err)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
client.CtlRollback() client.CtlRollback()
} }
@@ -739,7 +714,6 @@ func TestSyncForgerCommitment(t *testing.T) {
// Create Synchronizer // Create Synchronizer
s, err := NewSynchronizer(client, historyDB, stateDB, Config{ s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
StoreAccountUpdates: true,
}) })
require.NoError(t, err) require.NoError(t, err)
@@ -840,7 +814,6 @@ func TestSyncForgerCommitment(t *testing.T) {
s2, err := NewSynchronizer(client, historyDB, stateDB, Config{ s2, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
StoreAccountUpdates: true,
}) })
require.NoError(t, err) require.NoError(t, err)
stats = s2.Stats() stats = s2.Stats()

View File

@@ -145,7 +145,7 @@ const longWaitDuration = 999 * time.Hour
// const provingDuration = 2 * time.Second // const provingDuration = 2 * time.Second
func (s *Mock) runProver(ctx context.Context) { func (s *Mock) runProver(ctx context.Context) {
waitCh := time.After(longWaitDuration) waitDuration := longWaitDuration
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -153,21 +153,21 @@ func (s *Mock) runProver(ctx context.Context) {
case msg := <-s.msgCh: case msg := <-s.msgCh:
switch msg.value { switch msg.value {
case "cancel": case "cancel":
waitCh = time.After(longWaitDuration) waitDuration = longWaitDuration
s.Lock() s.Lock()
if !s.status.IsReady() { if !s.status.IsReady() {
s.status = prover.StatusCodeAborted s.status = prover.StatusCodeAborted
} }
s.Unlock() s.Unlock()
case "prove": case "prove":
waitCh = time.After(s.provingDuration) waitDuration = s.provingDuration
s.Lock() s.Lock()
s.status = prover.StatusCodeBusy s.status = prover.StatusCodeBusy
s.Unlock() s.Unlock()
} }
msg.ackCh <- true msg.ackCh <- true
case <-waitCh: case <-time.After(waitDuration):
waitCh = time.After(longWaitDuration) waitDuration = longWaitDuration
s.Lock() s.Lock()
if s.status != prover.StatusCodeBusy { if s.status != prover.StatusCodeBusy {
s.Unlock() s.Unlock()

View File

@@ -142,12 +142,10 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
// same values than in the js test // same values than in the js test
users = GenerateJsUsers(t) users = GenerateJsUsers(t)
depositAmount, err := common.Float40(10400).BigInt()
require.Nil(t, err)
l1UserTxs = []common.L1Tx{ l1UserTxs = []common.L1Tx{
{ {
FromIdx: 0, FromIdx: 0,
DepositAmount: depositAmount, DepositAmount: big.NewInt(16000000),
Amount: big.NewInt(0), Amount: big.NewInt(0),
TokenID: 1, TokenID: 1,
FromBJJ: users[0].BJJ.Public().Compress(), FromBJJ: users[0].BJJ.Public().Compress(),

View File

@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB") dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)
@@ -311,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
h, err := zki.HashGlobalData() h, err := zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String()) assert.Equal(t, "12174727174629825205577542675894290689387326670869871089988393208259924373499", h.String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)
// batch3 // batch3
@@ -334,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
h, err = zki.HashGlobalData() h, err = zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String()) assert.Equal(t, "16351950370739934361208977436603065280805499094788807090831605833717933916063", h.String())
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0]) assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
assert.Equal(t, "0", zki.EthAddr3[1].String()) assert.Equal(t, "0", zki.EthAddr3[1].String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)

View File

@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
os.Exit(exitVal) os.Exit(exitVal)
} }
const MaxTx = 352 const MaxTx = 376
const NLevels = 32 const NLevels = 32
const MaxL1Tx = 256 const MaxL1Tx = 256
const MaxFeeTx = 64 const MaxFeeTx = 64
@@ -61,7 +61,6 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
return return
} }
log.Infof("sending proof to %s", proofServerURL)
// Store zkinputs json for debugging purposes // Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(zki) zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -27,20 +27,15 @@ type TxProcessor struct {
// AccumulatedFees contains the accumulated fees for each token (Coord // AccumulatedFees contains the accumulated fees for each token (Coord
// Idx) in the processed batch // Idx) in the processed batch
AccumulatedFees map[common.Idx]*big.Int AccumulatedFees map[common.Idx]*big.Int
// updatedAccounts stores the last version of the account when it has
// been created/updated by any of the processed transactions.
updatedAccounts map[common.Idx]*common.Account
config Config config Config
} }
// Config contains the TxProcessor configuration parameters // Config contains the TxProcessor configuration parameters
type Config struct { type Config struct {
NLevels uint32 NLevels uint32
// MaxFeeTx is the maximum number of coordinator accounts that can receive fees
MaxFeeTx uint32 MaxFeeTx uint32
MaxTx uint32 MaxTx uint32
MaxL1Tx uint32 MaxL1Tx uint32
// ChainID of the blockchain
ChainID uint16 ChainID uint16
} }
@@ -58,9 +53,6 @@ type ProcessTxOutput struct {
CreatedAccounts []common.Account CreatedAccounts []common.Account
CoordinatorIdxsMap map[common.TokenID]common.Idx CoordinatorIdxsMap map[common.TokenID]common.Idx
CollectedFees map[common.TokenID]*big.Int CollectedFees map[common.TokenID]*big.Int
// UpdatedAccounts returns the current state of each account
// created/updated by any of the processed transactions.
UpdatedAccounts map[common.Idx]*common.Account
} }
func newErrorNotEnoughBalance(tx common.Tx) error { func newErrorNotEnoughBalance(tx common.Tx) error {
@@ -133,10 +125,6 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx)) return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx))
} }
if tp.s.Type() == statedb.TypeSynchronizer {
tp.updatedAccounts = make(map[common.Idx]*common.Account)
}
exits := make([]processedExit, nTx) exits := make([]processedExit, nTx)
if tp.s.Type() == statedb.TypeBatchBuilder { if tp.s.Type() == statedb.TypeBatchBuilder {
@@ -208,7 +196,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
} }
} }
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder { if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if exitIdx != nil && exitTree != nil && exitAccount != nil { if exitIdx != nil && exitTree != nil {
exits[tp.i] = processedExit{ exits[tp.i] = processedExit{
exit: true, exit: true,
newExit: newExit, newExit: newExit,
@@ -392,7 +380,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr) tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr)
} }
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee) accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
pFee, err := tp.updateAccount(idx, accCoord) pFee, err := tp.s.UpdateAccount(idx, accCoord)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -417,7 +405,6 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
return nil, nil return nil, nil
} }
if tp.s.Type() == statedb.TypeSynchronizer {
// once all txs processed (exitTree root frozen), for each Exit, // once all txs processed (exitTree root frozen), for each Exit,
// generate common.ExitInfo data // generate common.ExitInfo data
var exitInfos []common.ExitInfo var exitInfos []common.ExitInfo
@@ -449,7 +436,8 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
} }
} }
// retun exitInfos, createdAccounts and collectedFees, so Synchronizer will if tp.s.Type() == statedb.TypeSynchronizer {
// retuTypeexitInfos, createdAccounts and collectedFees, so Synchronizer will
// be able to store it into HistoryDB for the concrete BatchNum // be able to store it into HistoryDB for the concrete BatchNum
return &ProcessTxOutput{ return &ProcessTxOutput{
ZKInputs: nil, ZKInputs: nil,
@@ -457,7 +445,6 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
CreatedAccounts: createdAccounts, CreatedAccounts: createdAccounts,
CoordinatorIdxsMap: coordIdxsMap, CoordinatorIdxsMap: coordIdxsMap,
CollectedFees: collectedFees, CollectedFees: collectedFees,
UpdatedAccounts: tp.updatedAccounts,
}, nil }, nil
} }
@@ -528,20 +515,6 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
tp.zki.ISOnChain[tp.i] = big.NewInt(1) tp.zki.ISOnChain[tp.i] = big.NewInt(1)
} }
if tx.Type == common.TxTypeForceTransfer ||
tx.Type == common.TxTypeDepositTransfer ||
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
tx.Type == common.TxTypeForceExit {
// in the cases where at L1Tx there is usage of the
// Amount parameter, add it at the ZKInputs.AmountF
// slot
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
}
} }
switch tx.Type { switch tx.Type {
@@ -684,11 +657,6 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr) tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
tp.zki.OnChain[tp.i] = big.NewInt(0) tp.zki.OnChain[tp.i] = big.NewInt(0)
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
tp.zki.NewAccount[tp.i] = big.NewInt(0) tp.zki.NewAccount[tp.i] = big.NewInt(0)
// L2Txs // L2Txs
@@ -752,7 +720,7 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
EthAddr: tx.FromEthAddr, EthAddr: tx.FromEthAddr,
} }
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), account) p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), account)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -787,28 +755,6 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1) return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1)
} }
// createAccount is a wrapper over the StateDB.CreateAccount method that also
// stores the created account in the updatedAccounts map in case the StateDB is
// of TypeSynchronizer
func (tp *TxProcessor) createAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
if tp.s.Type() == statedb.TypeSynchronizer {
account.Idx = idx
tp.updatedAccounts[idx] = account
}
return tp.s.CreateAccount(idx, account)
}
// updateAccount is a wrapper over the StateDB.UpdateAccount method that also
// stores the updated account in the updatedAccounts map in case the StateDB is
// of TypeSynchronizer
func (tp *TxProcessor) updateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
if tp.s.Type() == statedb.TypeSynchronizer {
account.Idx = idx
tp.updatedAccounts[idx] = account
}
return tp.s.UpdateAccount(idx, account)
}
// applyDeposit updates the balance in the account of the depositer, if // applyDeposit updates the balance in the account of the depositer, if
// andTransfer parameter is set to true, the method will also apply the // andTransfer parameter is set to true, the method will also apply the
// Transfer of the L1Tx/DepositTransfer // Transfer of the L1Tx/DepositTransfer
@@ -839,7 +785,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
} }
// update sender account in localStateDB // update sender account in localStateDB
p, err := tp.updateAccount(tx.FromIdx, accSender) p, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -876,7 +822,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
// update receiver account in localStateDB // update receiver account in localStateDB
p, err := tp.updateAccount(tx.ToIdx, accReceiver) p, err := tp.s.UpdateAccount(tx.ToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -959,7 +905,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
} }
// update sender account in localStateDB // update sender account in localStateDB
pSender, err := tp.updateAccount(tx.FromIdx, accSender) pSender, err := tp.s.UpdateAccount(tx.FromIdx, accSender)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -998,7 +944,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
// update receiver account in localStateDB // update receiver account in localStateDB
pReceiver, err := tp.updateAccount(auxToIdx, accReceiver) pReceiver, err := tp.s.UpdateAccount(auxToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1041,7 +987,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
} }
// create Account of the Sender // create Account of the Sender
p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), accSender) p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1089,7 +1035,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
// update receiver account in localStateDB // update receiver account in localStateDB
p, err = tp.updateAccount(tx.ToIdx, accReceiver) p, err = tp.s.UpdateAccount(tx.ToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1163,7 +1109,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} }
} }
p, err := tp.updateAccount(tx.FromIdx, acc) p, err := tp.s.UpdateAccount(tx.FromIdx, acc)
if err != nil { if err != nil {
return nil, false, tracerr.Wrap(err) return nil, false, tracerr.Wrap(err)
} }
@@ -1174,11 +1120,6 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
if exitTree == nil { if exitTree == nil {
return nil, false, nil return nil, false, nil
} }
if tx.Amount.Cmp(big.NewInt(0)) == 0 { // Amount == 0
// if the Exit Amount==0, the Exit is not added to the ExitTree
return nil, false, nil
}
exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx) exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx)
if tracerr.Unwrap(err) == db.ErrNotFound { if tracerr.Unwrap(err) == db.ErrNotFound {
// 1a. if idx does not exist in exitTree: // 1a. if idx does not exist in exitTree:

View File

@@ -4,7 +4,6 @@ import (
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"os" "os"
"sort"
"testing" "testing"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
@@ -912,198 +911,3 @@ func TestTwoExits(t *testing.T) {
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof) assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
} }
func TestExitOf0Amount(t *testing.T) {
// Test to check that when doing an Exit with amount 0 the Exit Root
// does not change (as there is no new Exit Leaf created)
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 32})
assert.NoError(t, err)
chainID := uint16(1)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
set := `
Type: Blockchain
CreateAccountDeposit(0) A: 100
CreateAccountDeposit(0) B: 100
> batchL1 // batch1: freeze L1User{2}
> batchL1 // batch2: forge L1User{2}
ForceExit(0) A: 10
ForceExit(0) B: 0
> batchL1 // batch3: freeze L1User{2}
> batchL1 // batch4: forge L1User{2}
ForceExit(0) A: 10
> batchL1 // batch5: freeze L1User{1}
> batchL1 // batch6: forge L1User{1}
ForceExit(0) A: 0
> batchL1 // batch7: freeze L1User{1}
> batchL1 // batch8: forge L1User{1}
> block
`
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
// Sanity check
require.Equal(t, 2, len(blocks[0].Rollup.Batches[1].L1UserTxs))
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
require.Equal(t, big.NewInt(10), blocks[0].Rollup.Batches[3].L1UserTxs[0].Amount)
require.Equal(t, big.NewInt(0), blocks[0].Rollup.Batches[3].L1UserTxs[1].Amount)
config := Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
ChainID: chainID,
}
tp := NewTxProcessor(sdb, config)
// For this test are only processed the batches with transactions:
// - Batch2, equivalent to Batches[1]
// - Batch4, equivalent to Batches[3]
// - Batch6, equivalent to Batches[5]
// - Batch8, equivalent to Batches[7]
// process Batch2:
_, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[1].L1UserTxs, nil, nil)
require.NoError(t, err)
// process Batch4:
ptOut, err := tp.ProcessTxs(nil, blocks[0].Rollup.Batches[3].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
exitRootBatch4 := ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String()
// process Batch6:
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[5].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
// Expect that the ExitRoot for the Batch6 will be equal than for the
// Batch4, as the Batch4 & Batch6 have the same tx with Exit Amount=10,
// and Batch4 has a 2nd tx with Exit Amount=0.
assert.Equal(t, exitRootBatch4, ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
// For the Batch8, as there is only 1 exit with Amount=0, the ExitRoot
// should be 0.
// process Batch8:
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[7].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
}
func TestUpdatedAccounts(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
assert.NoError(t, err)
set := `
Type: Blockchain
AddToken(1)
CreateAccountCoordinator(0) Coord // 256
CreateAccountCoordinator(1) Coord // 257
> batch // 1
CreateAccountDeposit(0) A: 50 // 258
CreateAccountDeposit(0) B: 60 // 259
CreateAccountDeposit(1) A: 70 // 260
CreateAccountDeposit(1) B: 80 // 261
> batchL1 // 2
> batchL1 // 3
Transfer(0) A-B: 5 (126)
> batch // 4
Exit(1) B: 5 (126)
> batch // 5
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "Coord",
}
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
tc.FillBlocksL1UserTxsBatchNum(blocks)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
require.Equal(t, 5, len(blocks[0].Rollup.Batches))
config := Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
ChainID: chainID,
}
tp := NewTxProcessor(sdb, config)
sortedKeys := func(m map[common.Idx]*common.Account) []int {
keys := make([]int, 0)
for k := range m {
keys = append(keys, int(k))
}
sort.Ints(keys)
return keys
}
for _, batch := range blocks[0].Rollup.Batches {
l2Txs := common.L2TxsToPoolL2Txs(batch.L2Txs)
ptOut, err := tp.ProcessTxs(batch.Batch.FeeIdxsCoordinator, batch.L1UserTxs,
batch.L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
switch batch.Batch.BatchNum {
case 1:
assert.Equal(t, 2, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{256, 257}, sortedKeys(ptOut.UpdatedAccounts))
case 2:
assert.Equal(t, 0, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{}, sortedKeys(ptOut.UpdatedAccounts))
case 3:
assert.Equal(t, 4, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{258, 259, 260, 261}, sortedKeys(ptOut.UpdatedAccounts))
case 4:
assert.Equal(t, 2+1, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{256, 258, 259}, sortedKeys(ptOut.UpdatedAccounts))
case 5:
assert.Equal(t, 1+1, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{257, 261}, sortedKeys(ptOut.UpdatedAccounts))
}
for idx, updAcc := range ptOut.UpdatedAccounts {
acc, err := sdb.GetAccount(idx)
require.NoError(t, err)
// If acc.Balance is 0, set it to 0 with big.NewInt so
// that the comparison succeeds. Without this, the
// comparison will not succeed because acc.Balance is
// set from a slice, and thus the internal big.Int
// buffer is not nil (big.Int.abs)
if acc.Balance.BitLen() == 0 {
acc.Balance = big.NewInt(0)
}
assert.Equal(t, acc, updAcc)
}
}
}

File diff suppressed because one or more lines are too long

View File

@@ -3,6 +3,7 @@ package txselector
// current: very simple version of TxSelector // current: very simple version of TxSelector
import ( import (
"bytes"
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
@@ -18,6 +19,19 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// txs implements the interface Sort for an array of Tx
type txs []common.PoolL2Tx
func (t txs) Len() int {
return len(t)
}
func (t txs) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t txs) Less(i, j int) bool {
return t[i].AbsoluteFee > t[j].AbsoluteFee
}
// CoordAccount contains the data of the Coordinator account, that will be used // CoordAccount contains the data of the Coordinator account, that will be used
// to create new transactions of CreateAccountDeposit type to add new TokenID // to create new transactions of CreateAccountDeposit type to add new TokenID
// accounts for the Coordinator to receive the fees. // accounts for the Coordinator to receive the fees.
@@ -75,8 +89,12 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
// Reset tells the TxSelector to get it's internal AccountsDB // Reset tells the TxSelector to get it's internal AccountsDB
// from the required `batchNum` // from the required `batchNum`
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer)) err := txsel.localAccountsDB.Reset(batchNum, true)
if err != nil {
return tracerr.Wrap(err)
}
return nil
} }
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) { func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
@@ -177,16 +195,14 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
} }
// discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch // discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch
var discardedL2Txs []common.PoolL2Tx
var l1CoordinatorTxs []common.L1Tx var l1CoordinatorTxs []common.L1Tx
positionL1 := len(l1UserTxs) positionL1 := len(l1UserTxs)
var accAuths [][]byte var accAuths [][]byte
// sort l2TxsRaw (cropping at MaxTx at this point) // sort l2TxsRaw (cropping at MaxTx at this point)
l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx) l2Txs0 := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
for i := range discardedL2Txs {
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee"
}
noncesMap := make(map[common.Idx]common.Nonce) noncesMap := make(map[common.Idx]common.Nonce)
var l2Txs []common.PoolL2Tx var l2Txs []common.PoolL2Tx
@@ -220,8 +236,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of // discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " + l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator"
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs0[i]) discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue continue
} }
@@ -246,9 +261,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Amount with current Balance. Discard L2Tx, // not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to // and update Info parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String())
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -260,8 +273,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Nonce at tx. Discard L2Tx, and update Info // not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs // parameter of the tx, and add it to the discardedTxs
// array // array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -279,31 +291,18 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i]) len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil { if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err) log.Debug(err)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s", l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error())
err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
if l1CoordinatorTx != nil { if accAuth != nil && l1CoordinatorTx != nil {
// If ToEthAddr == 0xff.. this means that we
// are handling a TransferToBJJ, which doesn't
// require an authorization because it doesn't
// contain a valid ethereum address.
// Otherwise only create the account if we have
// the corresponding authorization
if validL2Tx.ToEthAddr == common.FFAddr {
accAuths = append(accAuths, common.EmptyEthSignature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
} else if accAuth != nil {
accAuths = append(accAuths, accAuth.Signature) accAuths = append(accAuths, accAuth.Signature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx) l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++ positionL1++
} }
}
if validL2Tx != nil { if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx) validTxs = append(validTxs, *validL2Tx)
} }
@@ -315,8 +314,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
"ToIdx", l2Txs[i].ToIdx) "ToIdx", l2Txs[i].ToIdx)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d",
"ToIdx: %d", l2Txs[i].ToIdx) l2Txs[i].ToIdx)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -328,9 +327,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
"does not correspond to the Account.EthAddr. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -344,9 +341,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+ l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
"does not correspond to the Account.BJJ. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -420,7 +415,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
log.Error(err) log.Error(err)
// Discard L2Tx, and update Info parameter of the tx, // Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array // and add it to the discardedTxs array
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error()) selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i]) discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
continue continue
} }
@@ -476,7 +471,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
var l1CoordinatorTx *common.L1Tx var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth var accAuth *common.AccountCreationAuth
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr { if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) &&
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
// case: ToEthAddr != 0x00 neither 0xff // case: ToEthAddr != 0x00 neither 0xff
if l2Tx.ToBJJ != common.EmptyBJJComp { if l2Tx.ToBJJ != common.EmptyBJJComp {
// case: ToBJJ!=0: // case: ToBJJ!=0:
@@ -532,7 +528,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
DepositAmount: big.NewInt(0), DepositAmount: big.NewInt(0),
Type: common.TxTypeCreateAccountDeposit, Type: common.TxTypeCreateAccountDeposit,
} }
} else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp { } else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) &&
l2Tx.ToBJJ != common.EmptyBJJComp {
// if idx exist for EthAddr&BJJ use it // if idx exist for EthAddr&BJJ use it
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ, _, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
l2Tx.TokenID) l2Tx.TokenID)
@@ -558,8 +555,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
} }
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
// L2Tx discarded // L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " + return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx"))
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
} }
return &l2Tx, l1CoordinatorTx, accAuth, nil return &l2Tx, l1CoordinatorTx, accAuth, nil
@@ -568,7 +564,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ { for i := 0; i < len(l1CoordinatorTxs); i++ {
if l1CoordinatorTxs[i].FromEthAddr == addr && if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) &&
l1CoordinatorTxs[i].TokenID == tokenID && l1CoordinatorTxs[i].TokenID == tokenID &&
l1CoordinatorTxs[i].FromBJJ == bjj { l1CoordinatorTxs[i].FromBJJ == bjj {
return true return true
@@ -578,33 +574,21 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
} }
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce // getL2Profitable returns the profitable selection of L2Txssorted by Nonce
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx, func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx {
[]common.PoolL2Tx) { sort.Sort(txs(l2Txs))
// First sort by nonce so that txs from the same account are sorted so if len(l2Txs) < int(max) {
// that they could be applied in succession. return l2Txs
sort.Slice(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
// Sort by absolute fee with SliceStable, so that txs with same
// AbsoluteFee are not rearranged and nonce order is kept in such case
sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
})
discardedL2Txs := []common.PoolL2Tx{}
if len(l2Txs) > int(max) {
discardedL2Txs = l2Txs[max:]
l2Txs = l2Txs[:max]
} }
l2Txs = l2Txs[:max]
// sort l2Txs by Nonce. This can be done in many different ways, what // sort l2Txs by Nonce. This can be done in many different ways, what
// is needed is to output the l2Txs where the Nonce of l2Txs for each // is needed is to output the l2Txs where the Nonce of l2Txs for each
// Account is sorted, but the l2Txs can not be grouped by sender Account // Account is sorted, but the l2Txs can not be grouped by sender Account
// neither by Fee. This is because later on the Nonces will need to be // neither by Fee. This is because later on the Nonces will need to be
// sequential for the zkproof generation. // sequential for the zkproof generation.
sort.Slice(l2Txs, func(i, j int) bool { sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce return l2Txs[i].Nonce < l2Txs[j].Nonce
}) })
return l2Txs, discardedL2Txs return l2Txs
} }

View File

@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
@@ -48,7 +48,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil, AccountCreationAuth: nil,
} }
// fmt.Printf("%v\n", coordAccount) fmt.Printf("%v", coordAccount)
auth := common.AccountCreationAuth{ auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr, EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
@@ -497,215 +497,3 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch()) err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err) require.NoError(t, err)
} }
func TestTransferToBjj(t *testing.T) {
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
CreateAccountDeposit(1) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
addTokens(t, tc, txsel.l2db.DB())
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 20,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to freeze L1UserTxs that will create some accounts with
// positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx.
batchPoolL2 := `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// Now the BJJ-only account for B is already created, so the transfer
// happens without an L1CoordTx that creates the user account.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx. Since it's a
// transfer of a token for which the coordinator doesn't have a fee
// account, another L1CoordTx will be created for the coordinator to
// receive the fees.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(1) B-A: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
// to receive the fees by the coordinator and another one for the
// recipient of the l2tx
assert.Equal(t, 2, len(oL1CoordTxs))
// [0] Coordinator account cration for token 1
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
// [1] User A BJJ-only account creation for token 1
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestTransferManyFromSameAccount(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// 8 transfers from the same account
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) A-B: 10 (126) // 1
PoolTransfer(0) A-B: 10 (126) // 2
PoolTransfer(0) A-B: 10 (126) // 3
PoolTransfer(0) A-B: 10 (126) // 4
PoolTransfer(0) A-B: 10 (126) // 5
PoolTransfer(0) A-B: 10 (126) // 6
PoolTransfer(0) A-B: 10 (126) // 7
PoolTransfer(0) A-B: 10 (126) // 8
PoolTransfer(0) A-B: 10 (126) // 9
PoolTransfer(0) A-B: 10 (126) // 10
PoolTransfer(0) A-B: 10 (126) // 11
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 11, len(poolL2Txs))
// reorder poolL2Txs so that nonces are not sorted
poolL2Txs[0], poolL2Txs[7] = poolL2Txs[7], poolL2Txs[0]
poolL2Txs[1], poolL2Txs[10] = poolL2Txs[10], poolL2Txs[1]
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 7, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}