Compare commits

...

14 Commits

Author SHA1 Message Date
Eduard S
b2b86d0069 Add logs 2021-02-11 18:27:01 +01:00
Eduard S
60b7bef8ab WIP6 2021-02-11 18:27:01 +01:00
Eduard S
bea042ea52 Verify stateroot at sync 2021-02-11 18:27:01 +01:00
Eduard S
db46f61d64 WIP5 2021-02-11 18:26:59 +01:00
Eduard S
cdf110b8e7 WIP4 2021-02-11 18:22:54 +01:00
Eduard S
10edd5f2c2 WIP3 2021-02-11 18:22:54 +01:00
Eduard S
124d2e84f2 WIP 2021-02-11 18:22:54 +01:00
Eduard S
aa9367f7af Make TxManager more robust 2021-02-11 18:22:52 +01:00
Eduard S
7e6d820ac1 WIP 2021-02-11 18:21:36 +01:00
Eduard S
7982d9292a Update coordinator to work better under real net
- cli / node
    - Update handler of SIGINT so that after 3 SIGINTs, the process terminates
      unconditionally
- coordinator
    - Store stats without pointer
    - In all functions that send a variable via channel, check for context done
      to avoid deadlock (due to no process reading from the channel, which has
      no queue) when the node is stopped.
    - Abstract `canForge` so that it can be used outside of the `Coordinator`
    - In `canForge` check the blockNumber in current and next slot.
    - Update tests due to smart contract changes in slot handling, and minimum
      bid defaults
    - TxManager
        - Add consts, vars and stats to allow evaluating `canForge`
        - Add `canForge` method (not used yet)
        - Store batch and nonces status (last success and last pending)
        - Track nonces internally instead of relying on the ethereum node (this
          is required to work with ganache when there are pending txs)
        - Handle the (common) case of the receipt not being found after the tx
          is sent.
        - Don't start the main loop until we get an initial messae fo the stats
          and vars (so that in the loop the stats and vars are set to
          synchronizer values)
- eth / ethereum client
    - Add necessary methods to create the auth object for transactions manually
      so that we can set the nonce, gas price, gas limit, etc manually
    - Update `RollupForgeBatch` to take an auth object as input (so that the
      coordinator can set parameters manually)
- synchronizer
    - In stats, add `NextSlot`
2021-02-11 18:21:36 +01:00
Eduard S
2a77dac9c1 Merge pull request #536 from hermeznetwork/feature/sql-semaphore
Add semaphore for API queries to SQL
2021-02-10 13:47:27 +01:00
Arnau B
ac1fd9acf7 Add semaphore for API queries to SQL 2021-02-10 13:36:17 +01:00
arnau
1bf29636db Merge pull request #537 from hermeznetwork/fix/l2dbreorg
Fix l2db reorg of forging l2txs
2021-02-10 13:02:21 +01:00
Eduard S
2bf3b843ed Fix l2db reorg of forging l2txs 2021-02-09 17:13:29 +01:00
40 changed files with 2254 additions and 1246 deletions

View File

@@ -26,7 +26,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
return return
} }
// Insert to DB // Insert to DB
if err := a.l2.AddAccountCreationAuth(commonAuth); err != nil { if err := a.l2.AddAccountCreationAuthAPI(commonAuth); err != nil {
retSQLErr(err, c) retSQLErr(err, c)
return return
} }

View File

@@ -11,6 +11,7 @@ import (
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"sync"
"testing" "testing"
"time" "time"
@@ -27,6 +28,7 @@ import (
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
"github.com/hermeznetwork/hermez-node/test/txsets" "github.com/hermeznetwork/hermez-node/test/txsets"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/require"
) )
// Pendinger is an interface that allows getting last returned item ID and PendingItems to be used for building fromItem // Pendinger is an interface that allows getting last returned item ID and PendingItems to be used for building fromItem
@@ -199,7 +201,8 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
hdb := historydb.NewHistoryDB(database) apiConnCon := db.NewAPICnnectionController(1, time.Second)
hdb := historydb.NewHistoryDB(database, apiConnCon)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -218,7 +221,7 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour) l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants) // Config (smart contract constants)
chainID := uint16(0) chainID := uint16(0)
@@ -574,6 +577,82 @@ func TestMain(m *testing.M) {
os.Exit(result) os.Exit(result)
} }
func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
require.NoError(t, err)
// L2DB
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
// API
apiGinTO := gin.Default()
finishWait := make(chan interface{})
startWait := make(chan interface{})
apiGinTO.GET("/wait", func(c *gin.Context) {
cancel, err := apiConnConTO.Acquire()
defer cancel()
require.NoError(t, err)
defer apiConnConTO.Release()
startWait <- nil
<-finishWait
})
// Start server
serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
go func() {
if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
require.NoError(t, err)
}
}()
_config := getConfigTest(0)
_, err = NewAPI(
true,
true,
apiGinTO,
hdbTO,
nil,
l2DBTO,
&_config,
)
require.NoError(t, err)
client := &http.Client{}
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil)
require.NoError(t, err)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil)
require.NoError(t, err)
// Request that will get timed out
var wg sync.WaitGroup
wg.Add(1)
go func() {
// Request that will make the API busy
_, err = client.Do(httpReqWait)
require.NoError(t, err)
wg.Done()
}()
<-startWait
resp, err := client.Do(httpReq)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
defer resp.Body.Close() //nolint
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
// Unmarshal body into return struct
msg := &errorMsg{}
err = json.Unmarshal(body, msg)
require.NoError(t, err)
// Check that the error was the expected down
require.Equal(t, errSQLTimeout, msg.Message)
finishWait <- nil
// Stop server
wg.Wait()
require.NoError(t, serverTO.Shutdown(context.Background()))
require.NoError(t, databaseTO.Close())
}
func doGoodReqPaginated( func doGoodReqPaginated(
path, order string, path, order string,
iterStruct Pendinger, iterStruct Pendinger,

View File

@@ -108,7 +108,7 @@ func (a *API) getFullBatch(c *gin.Context) {
} }
// Fetch txs forged in the batch from historyDB // Fetch txs forged in the batch from historyDB
maxTxsPerBatch := uint(2048) //nolint:gomnd maxTxsPerBatch := uint(2048) //nolint:gomnd
txs, _, err := a.h.GetHistoryTxs( txs, _, err := a.h.GetTxsAPI(
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc, nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
) )
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {

View File

@@ -30,6 +30,12 @@ const (
// Error for duplicated key // Error for duplicated key
errDuplicatedKey = "Item already exists" errDuplicatedKey = "Item already exists"
// Error for timeout due to SQL connection
errSQLTimeout = "The node is under heavy preasure, please try again later"
// Error message returned when context reaches timeout
errCtxTimeout = "context deadline exceeded"
) )
var ( var (
@@ -38,16 +44,20 @@ var (
) )
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warn("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok { errMsg := tracerr.Unwrap(err).Error()
if errMsg == errCtxTimeout {
c.JSON(http.StatusServiceUnavailable, errorMsg{
Message: errSQLTimeout,
})
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
// https://www.postgresql.org/docs/current/errcodes-appendix.html // https://www.postgresql.org/docs/current/errcodes-appendix.html
if sqlErr.Code == "23505" { if sqlErr.Code == "23505" {
c.JSON(http.StatusInternalServerError, errorMsg{ c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey, Message: errDuplicatedKey,
}) })
} }
} } else if tracerr.Unwrap(err) == sql.ErrNoRows {
if tracerr.Unwrap(err) == sql.ErrNoRows {
c.JSON(http.StatusNotFound, errorMsg{ c.JSON(http.StatusNotFound, errorMsg{
Message: err.Error(), Message: err.Error(),
}) })
@@ -59,7 +69,7 @@ func retSQLErr(err error, c *gin.Context) {
} }
func retBadReq(err error, c *gin.Context) { func retBadReq(err error, c *gin.Context) {
log.Warn("HTTP API Bad request error", "err", err) log.Warnw("HTTP API Bad request error", "err", err)
c.JSON(http.StatusBadRequest, errorMsg{ c.JSON(http.StatusBadRequest, errorMsg{
Message: err.Error(), Message: err.Error(),
}) })

View File

@@ -97,12 +97,12 @@ func (a *API) getSlot(c *gin.Context) {
retBadReq(err, c) retBadReq(err, c)
return return
} }
currentBlock, err := a.h.GetLastBlock() currentBlock, err := a.h.GetLastBlockAPI()
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
auctionVars, err := a.h.GetAuctionVars() auctionVars, err := a.h.GetAuctionVarsAPI()
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
@@ -200,12 +200,12 @@ func (a *API) getSlots(c *gin.Context) {
return return
} }
currentBlock, err := a.h.GetLastBlock() currentBlock, err := a.h.GetLastBlockAPI()
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
auctionVars, err := a.h.GetAuctionVars() auctionVars, err := a.h.GetAuctionVarsAPI()
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
@@ -220,13 +220,13 @@ func (a *API) getSlots(c *gin.Context) {
retBadReq(errors.New("It is necessary to add maxSlotNum filter"), c) retBadReq(errors.New("It is necessary to add maxSlotNum filter"), c)
return return
} else if *finishedAuction { } else if *finishedAuction {
currentBlock, err := a.h.GetLastBlock() currentBlock, err := a.h.GetLastBlockAPI()
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
currentSlot := a.getCurrentSlot(currentBlock.Num) currentSlot := a.getCurrentSlot(currentBlock.Num)
auctionVars, err := a.h.GetAuctionVars() auctionVars, err := a.h.GetAuctionVarsAPI()
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return

View File

@@ -141,7 +141,7 @@ func (a *API) UpdateNetworkInfo(
a.status.Network.NextForgers = nextForgers a.status.Network.NextForgers = nextForgers
// Update buckets withdrawals // Update buckets withdrawals
bucketsUpdate, err := a.h.GetBucketUpdates() bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
bucketsUpdate = nil bucketsUpdate = nil
} else if err != nil { } else if err != nil {
@@ -201,7 +201,7 @@ func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot
}} }}
} else { } else {
// Get all the relevant updates from the DB // Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNum(lastClosedSlot, int(lastClosedSlot-currentSlot)+1) minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -279,7 +279,7 @@ func (a *API) UpdateMetrics() error {
} }
batchNum := a.status.Network.LastBatch.BatchNum batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock() a.status.RUnlock()
metrics, err := a.h.GetMetrics(batchNum) metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -293,7 +293,7 @@ func (a *API) UpdateMetrics() error {
// UpdateRecommendedFee update Status.RecommendedFee information // UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error { func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFee() feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }

View File

@@ -22,7 +22,7 @@ func (a *API) getToken(c *gin.Context) {
} }
tokenID := common.TokenID(*tokenIDUint) tokenID := common.TokenID(*tokenIDUint)
// Fetch token from historyDB // Fetch token from historyDB
token, err := a.h.GetToken(tokenID) token, err := a.h.GetTokenAPI(tokenID)
if err != nil { if err != nil {
retSQLErr(err, c) retSQLErr(err, c)
return return
@@ -45,7 +45,7 @@ func (a *API) getTokens(c *gin.Context) {
return return
} }
// Fetch exits from historyDB // Fetch exits from historyDB
tokens, pendingItems, err := a.h.GetTokens( tokens, pendingItems, err := a.h.GetTokensAPI(
tokenIDs, symbols, name, fromItem, limit, order, tokenIDs, symbols, name, fromItem, limit, order,
) )
if err != nil { if err != nil {

View File

@@ -34,7 +34,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
} }
// Fetch txs from historyDB // Fetch txs from historyDB
txs, pendingItems, err := a.h.GetHistoryTxs( txs, pendingItems, err := a.h.GetTxsAPI(
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order, addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
) )
if err != nil { if err != nil {
@@ -61,7 +61,7 @@ func (a *API) getHistoryTx(c *gin.Context) {
return return
} }
// Fetch tx from historyDB // Fetch tx from historyDB
tx, err := a.h.GetHistoryTx(txID) tx, err := a.h.GetTxAPI(txID)
if err != nil { if err != nil {
retSQLErr(err, c) retSQLErr(err, c)
return return

View File

@@ -28,7 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
return return
} }
// Insert to DB // Insert to DB
if err := a.l2.AddTx(writeTx); err != nil { if err := a.l2.AddTxAPI(writeTx); err != nil {
retSQLErr(err, c) retSQLErr(err, c)
return return
} }

View File

@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise // copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
// it can just roll back the internal copy. // it can just roll back the internal copy.
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return bb.localStateDB.Reset(batchNum, fromSynchronizer) return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
} }
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch

View File

@@ -3,6 +3,8 @@ Address = "localhost:8086"
Explorer = true Explorer = true
UpdateMetricsInterval = "10s" UpdateMetricsInterval = "10s"
UpdateRecommendedFeeInterval = "10s" UpdateRecommendedFeeInterval = "10s"
MaxSQLConnections = 100
SQLConnectionTimeout = "2s"
[PriceUpdater] [PriceUpdater]
Interval = "10s" Interval = "10s"
@@ -39,12 +41,15 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZName = "Hermez Network Token" TokenHEZName = "Hermez Network Token"
[Coordinator] [Coordinator]
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator # ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.999 L1BatchTimeoutPerc = 0.6
StartSlotBlocksDelay = 2
ScheduleBatchBlocksAheadCheck = 3
SendBatchBlocksMarginCheck = 1
ProofServerPollInterval = "1s" ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms" ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s" SyncRetryInterval = "1s"
@@ -83,8 +88,11 @@ ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms" CheckLoopInterval = "500ms"
Attempts = 4 Attempts = 4
AttemptsDelay = "500ms" AttemptsDelay = "500ms"
TxResendTimeout = "2m"
NoReuseNonce = false
CallGasLimit = 300000 CallGasLimit = 300000
GasPriceDiv = 100 GasPriceDiv = 100
MaxGasPrice = "5000000000"
[Coordinator.EthClient.Keystore] [Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"

View File

@@ -27,6 +27,24 @@ type Batch struct {
TotalFeesUSD *float64 `meddler:"total_fees_usd"` TotalFeesUSD *float64 `meddler:"total_fees_usd"`
} }
// NewEmptyBatch creates a new empty batch
func NewEmptyBatch() *Batch {
return &Batch{
BatchNum: 0,
EthBlockNum: 0,
ForgerAddr: ethCommon.Address{},
CollectedFees: make(map[TokenID]*big.Int),
FeeIdxsCoordinator: make([]Idx, 0),
StateRoot: big.NewInt(0),
NumAccounts: 0,
LastIdx: 0,
ExitRoot: big.NewInt(0),
ForgeL1TxsNum: nil,
SlotNum: 0,
TotalFeesUSD: nil,
}
}
// BatchNum identifies a batch // BatchNum identifies a batch
type BatchNum int64 type BatchNum int64
@@ -75,3 +93,23 @@ func NewBatchData() *BatchData {
Batch: Batch{}, Batch: Batch{},
} }
} }
// BatchSync is a subset of Batch that contains fileds needed for the
// synchronizer and coordinator
// type BatchSync struct {
// BatchNum BatchNum `meddler:"batch_num"`
// EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
// ForgerAddr ethCommon.Address `meddler:"forger_addr"`
// StateRoot *big.Int `meddler:"state_root,bigint"`
// SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
// }
//
// func NewBatchSync() *BatchSync {
// return &BatchSync{
// BatchNum: 0,
// EthBlockNum: 0,
// ForgerAddr: ethCommon.Address,
// StateRoot: big.NewInt(0),
// SlotNum: 0,
// }
// }

View File

@@ -33,7 +33,8 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum { if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot) return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
return -1 // This result will be negative
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// SlotBlocks returns the first and the last block numbers included in that slot // SlotBlocks returns the first and the last block numbers included in that slot

View File

@@ -3,6 +3,7 @@ package config
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big"
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
@@ -51,6 +52,27 @@ type Coordinator struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"` L1BatchTimeoutPerc float64 `validate:"required"`
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the // ProofServerPollInterval is the waiting interval between polling the
// ProofServer while waiting for a particular status // ProofServer while waiting for a particular status
ProofServerPollInterval Duration `validate:"required"` ProofServerPollInterval Duration `validate:"required"`
@@ -101,6 +123,9 @@ type Coordinator struct {
// calls, except for methods where a particular gas limit is // calls, except for methods where a particular gas limit is
// harcoded because it's known to be a big value // harcoded because it's known to be a big value
CallGasLimit uint64 `validate:"required"` CallGasLimit uint64 `validate:"required"`
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int `validate:"required"`
// GasPriceDiv is the gas price division // GasPriceDiv is the gas price division
GasPriceDiv uint64 `validate:"required"` GasPriceDiv uint64 `validate:"required"`
// CheckLoopInterval is the waiting interval between receipt // CheckLoopInterval is the waiting interval between receipt
@@ -112,6 +137,13 @@ type Coordinator struct {
// AttemptsDelay is delay between attempts do do an eth client // AttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
AttemptsDelay Duration `validate:"required"` AttemptsDelay Duration `validate:"required"`
// TxResendTimeout is the timeout after which a non-mined
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept // Keystore is the ethereum keystore where private keys are kept
Keystore struct { Keystore struct {
// Path to the keystore // Path to the keystore
@@ -204,9 +236,14 @@ type Node struct {
// UpdateMetricsInterval is the interval between updates of the // UpdateMetricsInterval is the interval between updates of the
// API metrics // API metrics
UpdateMetricsInterval Duration UpdateMetricsInterval Duration
// UpdateMetricsInterval is the interval between updates of the // UpdateRecommendedFeeInterval is the interval between updates of the
// recommended fees // recommended fees
UpdateRecommendedFeeInterval Duration UpdateRecommendedFeeInterval Duration
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"` } `validate:"required"`
Debug struct { Debug struct {
// APIAddress is the address where the debugAPI will listen if // APIAddress is the address where the debugAPI will listen if

View File

@@ -47,6 +47,8 @@ type Debug struct {
MineBlockNum int64 MineBlockNum int64
// SendBlockNum is the blockNum when the batch was sent to ethereum // SendBlockNum is the blockNum when the batch was sent to ethereum
SendBlockNum int64 SendBlockNum int64
// ResendNum is the number of times the tx has been resent
ResendNum int
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch // LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
// was scheduled // was scheduled
LastScheduledL1BatchBlockNum int64 LastScheduledL1BatchBlockNum int64
@@ -64,10 +66,17 @@ type Debug struct {
// StartToSendDelay is the delay between starting a batch and sending // StartToSendDelay is the delay between starting a batch and sending
// it to ethereum, in seconds // it to ethereum, in seconds
StartToSendDelay float64 StartToSendDelay float64
// StartToMineDelay is the delay between starting a batch and having
// it mined in seconds
StartToMineDelay float64
// SendToMineDelay is the delay between sending a batch tx and having
// it mined in seconds
SendToMineDelay float64
} }
// BatchInfo contans the Batch information // BatchInfo contans the Batch information
type BatchInfo struct { type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
@@ -82,9 +91,15 @@ type BatchInfo struct {
CoordIdxs []common.Idx CoordIdxs []common.Idx
ForgeBatchArgs *eth.RollupForgeBatchArgs ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo // FeesInfo
EthTx *types.Transaction EthTx *types.Transaction
Receipt *types.Receipt // SendTimestamp the time of batch sent to ethereum
Debug Debug SendTimestamp time.Time
Receipt *types.Receipt
// Fail is true if:
// - The receipt status is failed
// - A previous parent batch is failed
Fail bool
Debug Debug
} }
// DebugStore is a debug function to store the BatchInfo as a json text file in // DebugStore is a debug function to store the BatchInfo as a json text file in

View File

@@ -3,8 +3,8 @@ package coordinator
import ( import (
"context" "context"
"fmt" "fmt"
"math/big"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
@@ -42,6 +42,29 @@ type Config struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 L1BatchTimeoutPerc float64
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
SendBatchBlocksMarginCheck int64
// EthClientAttempts is the number of attempts to do an eth client RPC // EthClientAttempts is the number of attempts to do an eth client RPC
// call before giving up // call before giving up
EthClientAttempts int EthClientAttempts int
@@ -54,13 +77,25 @@ type Config struct {
// EthClientAttemptsDelay is delay between attempts do do an eth client // EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
EthClientAttemptsDelay time.Duration EthClientAttemptsDelay time.Duration
// EthTxResendTimeout is the timeout after which a non-mined ethereum
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int
// TxManagerCheckInterval is the waiting interval between receipt // TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration TxManagerCheckInterval time.Duration
// DebugBatchPath if set, specifies the path where batchInfo is stored // DebugBatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
DebugBatchPath string DebugBatchPath string
Purger PurgerCfg Purger PurgerCfg
// VerifierIdx is the index of the verifier contract registered in the
// smart contract
VerifierIdx uint8 VerifierIdx uint8
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -74,15 +109,22 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
} }
} }
type fromBatch struct {
BatchNum common.BatchNum
ForgerAddr ethCommon.Address
StateRoot *big.Int
}
// Coordinator implements the Coordinator type // Coordinator implements the Coordinator type
type Coordinator struct { type Coordinator struct {
// State // State
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline pipelineNum int // Pipeline sequential number. The first pipeline is 1
provers []prover.Client pipelineFromBatch fromBatch // batch from which we started the pipeline
consts synchronizer.SCConsts provers []prover.Client
vars synchronizer.SCVariables consts synchronizer.SCConsts
stats synchronizer.Stats vars synchronizer.SCVariables
started bool stats synchronizer.Stats
started bool
cfg Config cfg Config
@@ -96,7 +138,8 @@ type Coordinator struct {
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
pipeline *Pipeline pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
purger *Purger purger *Purger
txManager *TxManager txManager *TxManager
@@ -139,10 +182,15 @@ func NewCoordinator(cfg Config,
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{ c := Coordinator{
pipelineBatchNum: -1, pipelineNum: 0,
provers: serverProofs, pipelineFromBatch: fromBatch{
consts: *scConsts, BatchNum: 0,
vars: *initSCVars, ForgerAddr: ethCommon.Address{},
StateRoot: big.NewInt(0),
},
provers: serverProofs,
consts: *scConsts,
vars: *initSCVars,
cfg: cfg, cfg: cfg,
@@ -183,8 +231,9 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
} }
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) { func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector, c.pipelineNum++
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts) return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
} }
// MsgSyncBlock indicates an update to the Synchronizer stats // MsgSyncBlock indicates an update to the Synchronizer stats
@@ -205,6 +254,9 @@ type MsgSyncReorg struct {
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct { type MsgStopPipeline struct {
Reason string Reason string
// FailedBatchNum indicates the first batchNum that faile in the
// pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum
} }
// SendMsg is a thread safe method to pass a message to the Coordinator // SendMsg is a thread safe method to pass a message to the Coordinator
@@ -215,27 +267,36 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
if update.Auction != nil {
vars.Auction = *update.Auction
}
if update.WDelayer != nil {
vars.WDelayer = *update.WDelayer
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) { func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
if vars.Rollup != nil { updateSCVars(&c.vars, vars)
c.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
c.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
c.vars.WDelayer = *vars.WDelayer
}
} }
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables, func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool { currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
return false
}
var slot *common.Slot var slot *common.Slot
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock { if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
slot = currentSlot slot = currentSlot
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock { } else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
slot = nextSlot slot = nextSlot
} else { } else {
log.Warnw("Coordinator: requested blockNum for canForge is outside slot", log.Warnw("canForge: requested blockNum is outside current and next slot",
"blockNum", blockNum, "currentSlot", currentSlot, "blockNum", blockNum, "currentSlot", currentSlot,
"nextSlot", nextSlot, "nextSlot", nextSlot,
) )
@@ -244,16 +305,23 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
anyoneForge := false anyoneForge := false
if !slot.ForgerCommitment && if !slot.ForgerCommitment &&
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) { auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)", log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
"block", blockNum) "block", blockNum)
anyoneForge = true anyoneForge = true
} }
if slot.Forger == addr || anyoneForge { if slot.Forger == addr || anyoneForge {
return true return true
} }
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
return false return false
} }
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool { func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1 blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction, return canForge(&c.consts.Auction, &c.vars.Auction,
@@ -262,12 +330,24 @@ func (c *Coordinator) canForge() bool {
} }
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error { func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
canForge := c.canForge() nextBlock := c.stats.Eth.LastBlock.Num + 1
canForge := c.canForgeAt(nextBlock)
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
}
if c.pipeline == nil { if c.pipeline == nil {
if canForge { relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugw("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge {
log.Infow("Coordinator: forging state begin", "block", log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch) stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
batchNum := common.BatchNum(stats.Sync.LastBatch) batchNum := stats.Sync.LastBatch.BatchNum
if c.lastNonFailedBatchNum > batchNum {
batchNum = c.lastNonFailedBatchNum
}
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -276,7 +356,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineBatchNum = batchNum // c.pipelineBatchNum = batchNum
} }
} else { } else {
if !canForge { if !canForge {
@@ -293,18 +373,17 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
// return err // return err
// } // }
// } // }
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) { // if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)) {
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil { // if err := c.txSelector.Reset(stats.Sync.LastBatch.BatchNum); err != nil {
return tracerr.Wrap(err) // return tracerr.Wrap(err)
} // }
} // }
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(), if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, stats.Sync.LastBatch) stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch) if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
if err != nil { int64(stats.Sync.LastBatch.BatchNum)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -331,33 +410,42 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars) c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
} }
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum { if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
// There's been a reorg and the batch from which the pipeline c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0 {
// was started was in a block that was discarded. The batch // There's been a reorg and the batch state root from which the
// may not be in the main chain, so we stop the pipeline as a // pipeline was started has changed (probably because it was in
// precaution (it will be started again once the node is in // a block that was discarded), and it was sent by a different
// sync). // coordinator than us. That batch may never be in the main
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum", // chain, so we stop the pipeline (it will be started again
"sync.LastBatch", c.stats.Sync.LastBatch, // once the node is in sync).
"c.pipelineBatchNum", c.pipelineBatchNum) log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
if err := c.handleStopPipeline(ctx, "reorg"); err != nil { "& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error { // handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil { // the next pipeline will start from the last state of the synchronizer,
return tracerr.Wrap(err) // otherwise, it will state from failedBatchNum-1.
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1
} }
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.Stop(c.ctx) c.pipeline.Stop(c.ctx)
c.pipeline = nil c.pipeline = nil
} }
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck if err := c.l2DB.Reorg(batchNum); err != nil {
// TODO: Check that we are in a slot in which we can't forge return tracerr.Wrap(err)
} }
c.lastNonFailedBatchNum = batchNum
return nil return nil
} }
@@ -373,7 +461,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
} }
case MsgStopPipeline: case MsgStopPipeline:
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason) log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil { if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err)) return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
} }
default: default:

View File

@@ -2,6 +2,7 @@ package coordinator
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
@@ -11,6 +12,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
@@ -105,8 +107,8 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db) historyDB := historydb.NewHistoryDB(db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB") txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
require.NoError(t, err) require.NoError(t, err)
@@ -261,8 +263,8 @@ func TestCoordinatorFlow(t *testing.T) {
var stats synchronizer.Stats var stats synchronizer.Stats
stats.Eth.LastBlock = *ethClient.CtlLastBlock() stats.Eth.LastBlock = *ethClient.CtlLastBlock()
stats.Sync.LastBlock = stats.Eth.LastBlock stats.Sync.LastBlock = stats.Eth.LastBlock
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch() stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch = stats.Eth.LastBatch stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1) canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
require.NoError(t, err) require.NoError(t, err)
var slot common.Slot var slot common.Slot
@@ -279,7 +281,7 @@ func TestCoordinatorFlow(t *testing.T) {
// Copy stateDB to synchronizer if there was a new batch // Copy stateDB to synchronizer if there was a new batch
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch) source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch) dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
if stats.Sync.LastBatch != 0 { if stats.Sync.LastBatch.BatchNum != 0 {
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
log.Infow("Making pebble checkpoint for sync", log.Infow("Making pebble checkpoint for sync",
"source", source, "dest", dest) "source", source, "dest", dest)
@@ -566,3 +568,8 @@ func TestCoordinatorStress(t *testing.T) {
// TODO: Test forgeBatch // TODO: Test forgeBatch
// TODO: Test waitServerProof // TODO: Test waitServerProof
// TODO: Test handleReorg // TODO: Test handleReorg
func TestFoo(t *testing.T) {
a := tracerr.Wrap(fmt.Errorf("AAA: %w", core.ErrNonceTooLow))
fmt.Println(errors.Is(a, core.ErrNonceTooLow))
}

View File

@@ -2,6 +2,7 @@ package coordinator
import ( import (
"context" "context"
"database/sql"
"fmt" "fmt"
"math/big" "math/big"
"sync" "sync"
@@ -24,19 +25,30 @@ type statsVars struct {
Vars synchronizer.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
type state struct {
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
}
// Pipeline manages the forging of batches with parallel server proofs // Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct { type Pipeline struct {
num int
cfg Config cfg Config
consts synchronizer.SCConsts consts synchronizer.SCConsts
// state // state
batchNum common.BatchNum state state
lastScheduledL1BatchBlockNum int64 // batchNum common.BatchNum
lastForgeL1TxsNum int64 // lastScheduledL1BatchBlockNum int64
started bool // lastForgeL1TxsNum int64
started bool
rw sync.RWMutex
errAtBatchNum common.BatchNum
proversPool *ProversPool proversPool *ProversPool
provers []prover.Client provers []prover.Client
coord *Coordinator
txManager *TxManager txManager *TxManager
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
l2DB *l2db.L2DB l2DB *l2db.L2DB
@@ -53,14 +65,28 @@ type Pipeline struct {
cancel context.CancelFunc cancel context.CancelFunc
} }
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
p.rw.Lock()
defer p.rw.Unlock()
p.errAtBatchNum = batchNum
}
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
p.rw.RLock()
defer p.rw.RUnlock()
return p.errAtBatchNum
}
// NewPipeline creates a new Pipeline // NewPipeline creates a new Pipeline
func NewPipeline(ctx context.Context, func NewPipeline(ctx context.Context,
cfg Config, cfg Config,
num int, // Pipeline sequential number
historyDB *historydb.HistoryDB, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, l2DB *l2db.L2DB,
txSelector *txselector.TxSelector, txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
purger *Purger, purger *Purger,
coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *synchronizer.SCConsts, scConsts *synchronizer.SCConsts,
@@ -79,6 +105,7 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool")) return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
} }
return &Pipeline{ return &Pipeline{
num: num,
cfg: cfg, cfg: cfg,
historyDB: historyDB, historyDB: historyDB,
l2DB: l2DB, l2DB: l2DB,
@@ -87,6 +114,7 @@ func NewPipeline(ctx context.Context,
provers: provers, provers: provers,
proversPool: proversPool, proversPool: proversPool,
purger: purger, purger: purger,
coord: coord,
txManager: txManager, txManager: txManager,
consts: *scConsts, consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
@@ -104,33 +132,67 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.batchNum = batchNum p.state = state{
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0,
}
p.stats = *stats p.stats = *stats
p.vars = *vars p.vars = *vars
p.lastScheduledL1BatchBlockNum = 0
err := p.txSelector.Reset(p.batchNum) // Reset the StateDB in TxSelector and BatchBuilder from the
// synchronizer only if the checkpoint we reset from either:
// a. Doesn't exist in the TxSelector/BatchBuilder
// b. The batch has already been synced by the synchronizer and has a
// different MTRoot than the BatchBuilder
// Otherwise, reset from the local checkpoint.
// First attempt to reset from local checkpoint if such checkpoint exists
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
err = p.batchBuilder.Reset(p.batchNum, true) fromSynchronizerTxSelector := !existsTxSelector
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
return tracerr.Wrap(err)
}
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerBatchBuilder := !existsBatchBuilder
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
return tracerr.Wrap(err)
}
// After reset, check that if the batch exists in the historyDB, the
// stateRoot matches with the local one, if not, force a reset from
// synchronizer
batch, err := p.historyDB.GetBatch(p.state.batchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
// nothing to do
} else if err != nil {
return tracerr.Wrap(err)
} else {
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
if batch.StateRoot.Cmp(localStateRoot) != 0 {
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
// StateRoot from synchronizer doesn't match StateRoot
// from batchBuilder, force a reset from synchronizer
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
}
}
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
if vars.Rollup != nil { updateSCVars(&p.vars, vars)
p.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
p.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
p.vars.WDelayer = *vars.WDelayer
}
} }
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs, // handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
@@ -143,7 +205,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
} else if err != nil { } else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced { if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err, log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.lastForgeL1TxsNum, "lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum) "syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else { } else {
log.Errorw("forgeBatch", "err", err) log.Errorw("forgeBatch", "err", err)
@@ -199,15 +261,32 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-time.After(waitDuration): case <-time.After(waitDuration):
batchNum = p.batchNum + 1 // Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
waitDuration = p.cfg.ForgeRetryInterval
continue
}
batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum) batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
waitDuration = p.cfg.ForgeRetryInterval
continue
} else if err != nil { } else if err != nil {
waitDuration = p.cfg.SyncRetryInterval p.setErrAtBatchNum(batchNum)
waitDuration = p.cfg.ForgeRetryInterval
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum,
})
continue continue
} }
p.batchNum = batchNum
p.state.batchNum = batchNum
select { select {
case batchChSentServerProof <- batchInfo: case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -225,16 +304,28 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Done() p.wg.Done()
return return
case batchInfo := <-batchChSentServerProof: case batchInfo := <-batchChSentServerProof:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
err := p.waitServerProof(p.ctx, batchInfo) err := p.waitServerProof(p.ctx, batchInfo)
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
batchInfo.ServerProof = nil
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("waitServerProof", "err", err) log.Errorw("waitServerProof", "err", err)
p.setErrAtBatchNum(batchInfo.BatchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.waitServerProof: %v", err),
FailedBatchNum: batchInfo.BatchNum,
})
continue continue
} }
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
// batchInfo.ServerProof = nil
p.txManager.AddBatch(p.ctx, batchInfo) p.txManager.AddBatch(p.ctx, batchInfo)
} }
} }
@@ -284,8 +375,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Structure to accumulate data and metadata of the batch
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
batchInfo.Debug.StartTimestamp = time.Now() batchInfo.Debug.StartTimestamp = time.Now()
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1 batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
@@ -300,22 +391,19 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var auths [][]byte var auths [][]byte
var coordIdxs []common.Idx var coordIdxs []common.Idx
// TODO: If there are no txs and we are behind the timeout, skip
// forging a batch and return a particular error that can be handleded
// in the loop where handleForgeBatch is called to retry after an
// interval
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) { if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true batchInfo.L1Batch = true
defer func() { if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
// If there's no error, update the parameters related
// to the last L1Batch forged
if err == nil {
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.lastForgeL1TxsNum++
}
}()
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced) return nil, tracerr.Wrap(errLastL1BatchNotSynced)
} }
// 2a: L1+L2 txs // 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1) l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -324,6 +412,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
} else { } else {
// 2b: only L2 txs // 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
@@ -399,12 +490,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool { func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
// Take the lastL1BatchBlockNum as the biggest between the last // Take the lastL1BatchBlockNum as the biggest between the last
// scheduled one, and the synchronized one. // scheduled one, and the synchronized one.
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum { if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
} }
// Set Debug information // Set Debug information
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
batchInfo.Debug.L1BatchBlockScheduleDeadline = batchInfo.Debug.L1BatchBlockScheduleDeadline =

View File

@@ -77,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
// //
// Scheduled L1Batch // Scheduled L1Batch
// //
pipeline.lastScheduledL1BatchBlockNum = startBlock pipeline.state.lastScheduledL1BatchBlockNum = startBlock
stats.Sync.LastL1BatchBlock = startBlock - 10 stats.Sync.LastL1BatchBlock = startBlock - 10
// We are are one block before the timeout range * 0.5 // We are are one block before the timeout range * 0.5
@@ -172,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
// users with positive balances // users with positive balances
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB) tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
syncStats := sync.Stats() syncStats := sync.Stats()
batchNum := common.BatchNum(syncStats.Sync.LastBatch) batchNum := syncStats.Sync.LastBatch.BatchNum
syncSCVars := sync.SCVars() syncSCVars := sync.SCVars()
pipeline, err := coord.newPipeline(ctx) pipeline, err := coord.newPipeline(ctx)

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
return l2db.NewL2DB(db, 10, 100, 24*time.Hour) return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
} }
func newStateDB(t *testing.T) *statedb.LocalStateDB { func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -2,6 +2,7 @@ package coordinator
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"time" "time"
@@ -9,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
@@ -35,12 +37,22 @@ type TxManager struct {
vars synchronizer.SCVariables vars synchronizer.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
queue []*BatchInfo discardPipelineCh chan int // int refers to the pipelineNum
minPipelineNum int
queue Queue
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed // lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
lastSuccessBatch common.BatchNum lastSuccessBatch common.BatchNum
lastPendingBatch common.BatchNum // lastPendingBatch common.BatchNum
lastSuccessNonce uint64 // accNonce is the account nonce in the last mined block (due to mined txs)
lastPendingNonce uint64 accNonce uint64
// accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64
// accPendingNonce is the pending nonce of the account due to pending txs
// accPendingNonce uint64
lastSentL1BatchBlockNum int64
} }
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
@@ -54,26 +66,27 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil) accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address) // accPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
if err != nil { // if err != nil {
return nil, err // return nil, err
} // }
if lastSuccessNonce != lastPendingNonce { // if accNonce != accPendingNonce {
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)", // return nil, tracerr.Wrap(fmt.Errorf("currentNonce (%v) != accPendingNonce (%v)",
lastSuccessNonce, lastPendingNonce)) // accNonce, accPendingNonce))
} // }
log.Infow("TxManager started", "nonce", lastSuccessNonce) log.Infow("TxManager started", "nonce", accNonce)
return &TxManager{ return &TxManager{
cfg: *cfg, cfg: *cfg,
ethClient: ethClient, ethClient: ethClient,
l2DB: l2DB, l2DB: l2DB,
coord: coord, coord: coord,
batchCh: make(chan *BatchInfo, queueLen), batchCh: make(chan *BatchInfo, queueLen),
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
discardPipelineCh: make(chan int, queueLen),
account: accounts.Account{ account: accounts.Account{
Address: *address, Address: *address,
}, },
@@ -82,8 +95,11 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
vars: *initSCVars, vars: *initSCVars,
lastSuccessNonce: lastSuccessNonce, minPipelineNum: 0,
lastPendingNonce: lastPendingNonce, queue: NewQueue(),
accNonce: accNonce,
accNextNonce: accNonce,
// accPendingNonce: accPendingNonce,
}, nil }, nil
} }
@@ -104,16 +120,17 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
} }
} }
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
// due to a reorg
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
select {
case t.discardPipelineCh <- pipelineNum:
case <-ctx.Done():
}
}
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) { func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
if vars.Rollup != nil { updateSCVars(&t.vars, vars)
t.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
t.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
t.vars.WDelayer = *vars.WDelayer
}
} }
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
@@ -123,6 +140,7 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
inc := new(big.Int).Set(gasPrice) inc := new(big.Int).Set(gasPrice)
// TODO: Replace this by a value of percentage
const gasPriceDiv = 100 const gasPriceDiv = 100
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv)) inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
gasPrice.Add(gasPrice, inc) gasPrice.Add(gasPrice, inc)
@@ -141,29 +159,75 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return auth, nil return auth, nil
} }
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error { func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
// TODO: Check if we can forge in the next blockNum, abort if we can't nextBlock := t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.Status = StatusSent if !t.canForgeAt(nextBlock) {
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1 return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
batchInfo.Debug.SendTimestamp = time.Now() }
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub( if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
batchInfo.Debug.StartTimestamp).Seconds() return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
}
margin := t.cfg.SendBatchBlocksMarginCheck
if margin != 0 {
if !t.canForgeAt(nextBlock + margin) {
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
margin, nextBlock))
}
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
margin, nextBlock))
}
}
return nil
}
func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percetnages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
auth, err := t.NewAuth(ctx) auth, err := t.NewAuth(ctx)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
auth.Nonce = big.NewInt(int64(t.lastPendingNonce)) auth.Nonce = big.NewInt(int64(t.accNextNonce))
t.lastPendingNonce++ if resend {
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
}
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ { for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
auth.GasPrice, t.cfg.MaxGasPrice))
}
// RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth) ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
if err != nil { if errors.Is(err, core.ErrNonceTooLow) {
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) { log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err, "err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
// "block", t.stats.Eth.LastBlock.Num+1) auth.Nonce.Add(auth.Nonce, big.NewInt(1))
// return tracerr.Wrap(err) attempt--
// } } else if errors.Is(err, core.ErrNonceTooHigh) {
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt--
} else if errors.Is(err, core.ErrUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if errors.Is(err, core.ErrReplaceUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if err != nil {
log.Errorw("TxManager ethClient.RollupForgeBatch", log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1, "attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum) "batchNum", batchInfo.BatchNum)
@@ -179,10 +243,30 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err)) return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
} }
if !resend {
t.accNextNonce = auth.Nonce.Uint64() + 1
}
batchInfo.EthTx = ethTx batchInfo.EthTx = ethTx
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex()) log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
now := time.Now()
batchInfo.SendTimestamp = now
if resend {
batchInfo.Debug.ResendNum++
}
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
t.lastPendingBatch = batchInfo.BatchNum
// t.lastPendingBatch = batchInfo.BatchNum
if !resend {
if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil { if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -225,13 +309,19 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) { func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
receipt := batchInfo.Receipt receipt := batchInfo.Receipt
if receipt != nil { if receipt != nil {
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
t.accNonce = batchInfo.EthTx.Nonce() + 1
}
if receipt.Status == types.ReceiptStatusFailed { if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber) _, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(), log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(), "batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err) "err", err)
if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1
}
return nil, tracerr.Wrap(fmt.Errorf( return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err)) "ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful { } else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -239,6 +329,17 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64() batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum - batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
batchInfo.Debug.StartBlockNum batchInfo.Debug.StartBlockNum
if batchInfo.Debug.StartToMineDelay == 0 {
if block, err := t.ethClient.EthBlockByNumber(ctx,
receipt.BlockNumber.Int64()); err != nil {
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
} else {
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.SendTimestamp).Seconds()
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
}
}
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
if batchInfo.BatchNum > t.lastSuccessBatch { if batchInfo.BatchNum > t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum t.lastSuccessBatch = batchInfo.BatchNum
@@ -250,9 +351,72 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
return nil, nil return nil, nil
} }
// TODO:
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
// Queue of BatchInfos
type Queue struct {
list []*BatchInfo
// nonceByBatchNum map[common.BatchNum]uint64
next int
}
// NewQueue returns a new queue
func NewQueue() Queue {
return Queue{
list: make([]*BatchInfo, 0),
// nonceByBatchNum: make(map[common.BatchNum]uint64),
next: 0,
}
}
// Len is the length of the queue
func (q *Queue) Len() int {
return len(q.list)
}
// At returns the BatchInfo at position (or nil if position is out of bounds)
func (q *Queue) At(position int) *BatchInfo {
if position >= len(q.list) {
return nil
}
return q.list[position]
}
// Next returns the next BatchInfo (or nil if queue is empty)
func (q *Queue) Next() (int, *BatchInfo) {
if len(q.list) == 0 {
return 0, nil
}
defer func() { q.next = (q.next + 1) % len(q.list) }()
return q.next, q.list[q.next]
}
// Remove removes the BatchInfo at position
func (q *Queue) Remove(position int) {
// batchInfo := q.list[position]
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
q.list = append(q.list[:position], q.list[position+1:]...)
if len(q.list) == 0 {
q.next = 0
} else {
q.next = position % len(q.list)
}
}
// Push adds a new BatchInfo
func (q *Queue) Push(batchInfo *BatchInfo) {
q.list = append(q.list, batchInfo)
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
}
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
// nonce, ok := q.nonceByBatchNum[batchNum]
// return nonce, ok
// }
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
next := 0
waitDuration := longWaitDuration waitDuration := longWaitDuration
var statsVars statsVars var statsVars statsVars
@@ -263,7 +427,7 @@ func (t *TxManager) Run(ctx context.Context) {
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
for { for {
select { select {
@@ -273,8 +437,27 @@ func (t *TxManager) Run(ctx context.Context) {
case statsVars := <-t.statsVarsCh: case statsVars := <-t.statsVarsCh:
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
case pipelineNum := <-t.discardPipelineCh:
t.minPipelineNum = pipelineNum + 1
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
}
case batchInfo := <-t.batchCh: case batchInfo := <-t.batchCh:
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil { if batchInfo.PipelineNum < t.minPipelineNum {
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
}
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
log.Warnw("TxManager: shouldSend", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
continue
}
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
// If we reach here it's because our ethNode has // If we reach here it's because our ethNode has
@@ -282,19 +465,20 @@ func (t *TxManager) Run(ctx context.Context) {
// ethereum. This could be due to the ethNode // ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that // failure, or an invalid transaction (that
// can't be mined) // can't be mined)
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)}) log.Warnw("TxManager: forgeBatch send failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
continue continue
} }
t.queue = append(t.queue, batchInfo) t.queue.Push(batchInfo)
waitDuration = t.cfg.TxManagerCheckInterval waitDuration = t.cfg.TxManagerCheckInterval
case <-time.After(waitDuration): case <-time.After(waitDuration):
if len(t.queue) == 0 { queuePosition, batchInfo := t.queue.Next()
if batchInfo == nil {
waitDuration = longWaitDuration waitDuration = longWaitDuration
continue continue
} }
current := next
next = (current + 1) % len(t.queue)
batchInfo := t.queue[current]
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
@@ -304,7 +488,8 @@ func (t *TxManager) Run(ctx context.Context) {
// if it was not mined, mined and succesfull or // if it was not mined, mined and succesfull or
// mined and failed. This could be due to the // mined and failed. This could be due to the
// ethNode failure. // ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)}) t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
} }
confirm, err := t.handleReceipt(ctx, batchInfo) confirm, err := t.handleReceipt(ctx, batchInfo)
@@ -312,32 +497,108 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
// Transaction was rejected // Transaction was rejected
t.queue = append(t.queue[:current], t.queue[current+1:]...) if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
if len(t.queue) == 0 { continue
next = 0 } else if err != nil {
} else { log.Errorw("TxManager: removeBadBatchInfos", "err", err)
next = current % len(t.queue) continue
} }
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)}) t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
continue
} }
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks { now := time.Now()
log.Debugw("TxManager tx for RollupForgeBatch confirmed", if !t.cfg.EthNoReuseNonce && confirm == nil &&
"batch", batchInfo.BatchNum) now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
t.queue = append(t.queue[:current], t.queue[current+1:]...) log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
if len(t.queue) == 0 { "tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
next = 0 if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
} else { continue
next = current % len(t.queue) } else if err != nil {
// If we reach here it's because our ethNode has
// been unable to send the transaction to
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
continue
} }
} }
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
log.Debugw("TxManager: forgeBatch tx confirmed",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
t.queue.Remove(queuePosition)
}
} }
} }
} }
// nolint reason: this function will be used in the future func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
//nolint:unused next := 0
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool { // batchNum := 0
for {
batchInfo := t.queue.At(next)
if batchInfo == nil {
break
}
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
return nil
} else if err != nil {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
next++
continue
}
confirm, err := t.handleReceipt(ctx, batchInfo)
if ctx.Err() != nil {
return nil
} else if err != nil {
// Transaction was rejected
if t.minPipelineNum <= batchInfo.PipelineNum {
t.minPipelineNum = batchInfo.PipelineNum + 1
}
t.queue.Remove(next)
continue
}
// If tx is pending but is from a cancelled pipeline, remove it
// from the queue
if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum {
// batchNum++
t.queue.Remove(next)
continue
}
}
next++
}
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
if err != nil {
return err
}
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}
func (t *TxManager) canForgeAt(blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction, return canForge(&t.consts.Auction, &t.vars.Auction,
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot, &t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum) t.cfg.ForgerAddress, blockNum)
} }
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
}
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
}

View File

@@ -0,0 +1,15 @@
package coordinator
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddPerc(t *testing.T) {
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
}

1026
db/historydb/apiqueries.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -22,6 +22,7 @@ import (
) )
var historyDB *HistoryDB var historyDB *HistoryDB
var historyDBWithACC *HistoryDB
// In order to run the test you need to run a Posgres DB with // In order to run the test you need to run a Posgres DB with
// a database named "history" that is accessible by // a database named "history" that is accessible by
@@ -38,10 +39,12 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
historyDB = NewHistoryDB(db) historyDB = NewHistoryDB(db, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
historyDBWithACC = NewHistoryDB(db, apiConnCon)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -85,7 +88,7 @@ func TestBlocks(t *testing.T) {
blocks..., blocks...,
) )
// Get all blocks from DB // Get all blocks from DB
fetchedBlocks, err := historyDB.GetBlocks(fromBlock, toBlock) fetchedBlocks, err := historyDB.getBlocks(fromBlock, toBlock)
assert.Equal(t, len(blocks), len(fetchedBlocks)) assert.Equal(t, len(blocks), len(fetchedBlocks))
// Compare generated vs getted blocks // Compare generated vs getted blocks
assert.NoError(t, err) assert.NoError(t, err)
@@ -200,6 +203,10 @@ func TestBatches(t *testing.T) {
fetchedLastBatchNum, err := historyDB.GetLastBatchNum() fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum) assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
// Test GetLastBatch
fetchedLastBatch, err := historyDB.GetLastBatch()
assert.NoError(t, err)
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
// Test GetLastL1TxsNum // Test GetLastL1TxsNum
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum() fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
assert.NoError(t, err) assert.NoError(t, err)
@@ -208,6 +215,12 @@ func TestBatches(t *testing.T) {
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum() fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum) assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
// Test GetBatch
fetchedBatch, err := historyDB.GetBatch(1)
require.NoError(t, err)
assert.Equal(t, &batches[0], fetchedBatch)
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
} }
func TestBids(t *testing.T) { func TestBids(t *testing.T) {
@@ -245,9 +258,8 @@ func TestTokens(t *testing.T) {
err := historyDB.AddTokens(tokens) err := historyDB.AddTokens(tokens)
assert.NoError(t, err) assert.NoError(t, err)
tokens = append([]common.Token{ethToken}, tokens...) tokens = append([]common.Token{ethToken}, tokens...)
limit := uint(10)
// Fetch tokens // Fetch tokens
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc) fetchedTokens, err := historyDB.GetTokensTest()
assert.NoError(t, err) assert.NoError(t, err)
// Compare fetched tokens vs generated tokens // Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger // All the tokens should have USDUpdate setted by the DB trigger
@@ -267,7 +279,7 @@ func TestTokens(t *testing.T) {
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value)) assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
} }
// Fetch tokens // Fetch tokens
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc) fetchedTokens, err = historyDB.GetTokensTest()
assert.NoError(t, err) assert.NoError(t, err)
// Compare fetched tokens vs generated tokens // Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger // All the tokens should have USDUpdate setted by the DB trigger
@@ -302,9 +314,8 @@ func TestTokensUTF8(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Work with nonUTFTokens as tokens one gets updated and non UTF-8 characters are lost // Work with nonUTFTokens as tokens one gets updated and non UTF-8 characters are lost
nonUTFTokens = append([]common.Token{ethToken}, nonUTFTokens...) nonUTFTokens = append([]common.Token{ethToken}, nonUTFTokens...)
limit := uint(10)
// Fetch tokens // Fetch tokens
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc) fetchedTokens, err := historyDB.GetTokensTest()
assert.NoError(t, err) assert.NoError(t, err)
// Compare fetched tokens vs generated tokens // Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger // All the tokens should have USDUpdate setted by the DB trigger
@@ -324,7 +335,7 @@ func TestTokensUTF8(t *testing.T) {
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value)) assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
} }
// Fetch tokens // Fetch tokens
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc) fetchedTokens, err = historyDB.GetTokensTest()
assert.NoError(t, err) assert.NoError(t, err)
// Compare fetched tokens vs generated tokens // Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger // All the tokens should have USDUpdate setted by the DB trigger
@@ -1087,9 +1098,8 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
assert.Equal(t, escapeHatchWithdrawals, dbEscapeHatchWithdrawals) assert.Equal(t, escapeHatchWithdrawals, dbEscapeHatchWithdrawals)
} }
func TestGetMetrics(t *testing.T) { func TestGetMetricsAPI(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
set := ` set := `
Type: Blockchain Type: Blockchain
@@ -1146,7 +1156,7 @@ func TestGetMetrics(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDB.GetMetrics(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch) assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
@@ -1165,7 +1175,7 @@ func TestGetMetrics(t *testing.T) {
assert.Equal(t, float64(0), res.AvgTransactionFee) assert.Equal(t, float64(0), res.AvgTransactionFee)
} }
func TestGetMetricsMoreThan24Hours(t *testing.T) { func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
testUsersLen := 3 testUsersLen := 3
@@ -1226,7 +1236,7 @@ func TestGetMetricsMoreThan24Hours(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDB.GetMetrics(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001) assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
@@ -1245,15 +1255,15 @@ func TestGetMetricsMoreThan24Hours(t *testing.T) {
assert.Equal(t, float64(0), res.AvgTransactionFee) assert.Equal(t, float64(0), res.AvgTransactionFee)
} }
func TestGetMetricsEmpty(t *testing.T) { func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
_, err := historyDB.GetMetrics(0) _, err := historyDBWithACC.GetMetricsAPI(0)
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestGetAvgTxFeeEmpty(t *testing.T) { func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
_, err := historyDB.GetAvgTxFee() _, err := historyDBWithACC.GetAvgTxFeeAPI()
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@@ -425,12 +425,13 @@ func (k *KVDB) MakeCheckpoint() error {
} }
// if checkpoint BatchNum already exist in disk, delete it // if checkpoint BatchNum already exist in disk, delete it
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(checkpointPath); err != nil { if err := os.RemoveAll(checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
// execute Checkpoint // execute Checkpoint
@@ -451,12 +452,25 @@ func (k *KVDB) MakeCheckpoint() error {
return nil return nil
} }
// CheckpointExists returns true if the checkpoint exists
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum // DeleteCheckpoint removes if exist the checkpoint of the given batchNum
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error { func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum)) checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum)) return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
} else if err != nil {
return tracerr.Wrap(err)
} }
return os.RemoveAll(checkpointPath) return os.RemoveAll(checkpointPath)
@@ -520,6 +534,8 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
if _, err := os.Stat(source); os.IsNotExist(err) { if _, err := os.Stat(source); os.IsNotExist(err) {
// if kvdb does not have checkpoint at batchNum, return err // if kvdb does not have checkpoint at batchNum, return err
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source)) return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
} else if err != nil {
return tracerr.Wrap(err)
} }
// By locking we allow calling MakeCheckpointFromTo from multiple // By locking we allow calling MakeCheckpointFromTo from multiple
// places at the same time for the same stateDB. This allows the // places at the same time for the same stateDB. This allows the
@@ -533,12 +549,13 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
func pebbleMakeCheckpoint(source, dest string) error { func pebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint // Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); !os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(dest); err != nil { if err := os.RemoveAll(dest); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
sto, err := pebble.NewPebbleStorage(source, false) sto, err := pebble.NewPebbleStorage(source, false)

85
db/l2db/apiqueries.go Normal file
View File

@@ -0,0 +1,85 @@
package l2db
import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
return l2db.AddAccountCreationAuth(auth)
}
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow(
l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr,
))
}
// AddTxAPI inserts a tx to the pool
func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
common.PoolL2TxStatePending,
)
var totalTxs uint32
if err := row.Scan(&totalTxs); err != nil {
return tracerr.Wrap(err)
}
if totalTxs >= l2db.maxTxs {
return tracerr.New(
"The pool is at full capacity. More transactions are not accepted currently",
)
}
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
}
// selectPoolTxAPI select part of queries to get PoolL2TxRead
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// GetTxAPI return the specified Tx in PoolTxAPI format
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow(
l2db.db, tx,
selectPoolTxAPI+"WHERE tx_id = $1;",
txID,
))
}

View File

@@ -25,17 +25,25 @@ type L2DB struct {
safetyPeriod common.BatchNum safetyPeriod common.BatchNum
ttl time.Duration ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool maxTxs uint32 // limit of txs that are accepted in the pool
apiConnCon *db.APIConnectionController
} }
// NewL2DB creates a L2DB. // NewL2DB creates a L2DB.
// To create it, it's needed db connection, safety period expressed in batches, // To create it, it's needed db connection, safety period expressed in batches,
// maxTxs that the DB should have and TTL (time to live) for pending txs. // maxTxs that the DB should have and TTL (time to live) for pending txs.
func NewL2DB(db *sqlx.DB, safetyPeriod common.BatchNum, maxTxs uint32, TTL time.Duration) *L2DB { func NewL2DB(
db *sqlx.DB,
safetyPeriod common.BatchNum,
maxTxs uint32,
TTL time.Duration,
apiConnCon *db.APIConnectionController,
) *L2DB {
return &L2DB{ return &L2DB{
db: db, db: db,
safetyPeriod: safetyPeriod, safetyPeriod: safetyPeriod,
ttl: TTL, ttl: TTL,
maxTxs: maxTxs, maxTxs: maxTxs,
apiConnCon: apiConnCon,
} }
} }
@@ -47,7 +55,6 @@ func (l2db *L2DB) DB() *sqlx.DB {
// AddAccountCreationAuth inserts an account creation authorization into the DB // AddAccountCreationAuth inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
// return meddler.Insert(l2db.db, "account_creation_auth", auth)
_, err := l2db.db.Exec( _, err := l2db.db.Exec(
`INSERT INTO account_creation_auth (eth_addr, bjj, signature) `INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES ($1, $2, $3);`, VALUES ($1, $2, $3);`,
@@ -66,16 +73,6 @@ func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.Accoun
)) ))
} }
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow(
l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr,
))
}
// AddTx inserts a tx to the pool // AddTx inserts a tx to the pool
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error { func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
row := l2db.db.QueryRow( row := l2db.db.QueryRow(
@@ -173,16 +170,6 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx)) return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
} }
// selectPoolTxAPI select part of queries to get PoolL2TxRead
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// selectPoolTxCommon select part of queries to get common.PoolL2Tx // selectPoolTxCommon select part of queries to get common.PoolL2Tx
const selectPoolTxCommon = `SELECT tx_pool.tx_id, from_idx, to_idx, tx_pool.to_eth_addr, const selectPoolTxCommon = `SELECT tx_pool.tx_id, from_idx, to_idx, tx_pool.to_eth_addr,
tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce, tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
@@ -202,16 +189,6 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
)) ))
} }
// GetTxAPI return the specified Tx in PoolTxAPI format
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow(
l2db.db, tx,
selectPoolTxAPI+"WHERE tx_id = $1;",
txID,
))
}
// GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee // GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) { func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx var txs []*common.PoolL2Tx
@@ -346,9 +323,10 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending // The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error { func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
_, err := l2db.db.Exec( _, err := l2db.db.Exec(
`UPDATE tx_pool SET batch_num = NULL, state = $1 `UPDATE tx_pool SET batch_num = NULL, state = $1
WHERE (state = $2 OR state = $3) AND batch_num > $4`, WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
common.PoolL2TxStateForging,
common.PoolL2TxStateForged, common.PoolL2TxStateForged,
common.PoolL2TxStateInvalid, common.PoolL2TxStateInvalid,
lastValidBatch, lastValidBatch,

View File

@@ -21,6 +21,7 @@ import (
) )
var l2DB *L2DB var l2DB *L2DB
var l2DBWithACC *L2DB
var historyDB *historydb.HistoryDB var historyDB *historydb.HistoryDB
var tc *til.Context var tc *til.Context
var tokens map[common.TokenID]historydb.TokenWithUSD var tokens map[common.TokenID]historydb.TokenWithUSD
@@ -34,9 +35,11 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour) l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db) historyDB = historydb.NewHistoryDB(db, nil)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -267,7 +270,7 @@ func TestStartForging(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range startForgingTxIDs { for _, id := range startForgingTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -312,7 +315,7 @@ func TestDoneForging(t *testing.T) {
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range doneForgingTxIDs { for _, id := range doneForgingTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -344,7 +347,7 @@ func TestInvalidate(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -385,7 +388,7 @@ func TestInvalidateOldNonces(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -460,13 +463,13 @@ func TestReorg(t *testing.T) {
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
assert.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DB.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
@@ -537,13 +540,13 @@ func TestReorg2(t *testing.T) {
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
assert.NoError(t, err) assert.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DB.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }

View File

@@ -498,11 +498,17 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
}, nil }, nil
} }
// CheckpointExists returns true if the checkpoint exists
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it // Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. // gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints. // If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
if fromSynchronizer { if fromSynchronizer {
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil { if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }

View File

@@ -1,16 +1,19 @@
package db package db
import ( import (
"context"
"database/sql" "database/sql"
"fmt" "fmt"
"math/big" "math/big"
"reflect" "reflect"
"strings" "strings"
"time"
"github.com/gobuffalo/packr/v2" "github.com/gobuffalo/packr/v2"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate" migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler" "github.com/russross/meddler"
) )
@@ -84,6 +87,32 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
return db, nil return db, nil
} }
// APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct {
smphr semaphore.Semaphore
timeout time.Duration
}
// NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{
smphr: semaphore.New(maxConnections),
timeout: timeout,
}
}
// Acquire reserves a SQL connection. If the connection is not acquired
// within the timeout, the function will return an error
func (acc *APIConnectionController) Acquire() (context.CancelFunc, error) {
ctx, cancel := context.WithTimeout(context.Background(), acc.timeout) //nolint:govet
return cancel, acc.smphr.Acquire(ctx, 1)
}
// Release frees a SQL connection
func (acc *APIConnectionController) Release() {
acc.smphr.Release(1)
}
// initMeddler registers tags to be used to read/write from SQL DBs using meddler // initMeddler registers tags to be used to read/write from SQL DBs using meddler
func initMeddler() { func initMeddler() {
meddler.Register("bigint", BigIntMeddler{}) meddler.Register("bigint", BigIntMeddler{})

4
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml v0.3.1
github.com/dghubble/sling v1.3.0 github.com/dghubble/sling v1.3.0
github.com/ethereum/go-ethereum v1.9.25 github.com/ethereum/go-ethereum v1.9.25
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/getkin/kin-openapi v0.22.0 github.com/getkin/kin-openapi v0.22.0
github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/cors v1.3.1
github.com/gin-gonic/gin v1.5.0 github.com/gin-gonic/gin v1.5.0
@@ -17,12 +18,15 @@ require (
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0 github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0 github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/mapstructure v1.3.0 github.com/mitchellh/mapstructure v1.3.0
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/russross/meddler v1.0.0 github.com/russross/meddler v1.0.0
github.com/sirupsen/logrus v1.5.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
github.com/stretchr/testify v1.6.1 github.com/stretchr/testify v1.6.1
github.com/urfave/cli/v2 v2.2.0 github.com/urfave/cli/v2 v2.2.0
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0

13
go.sum
View File

@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
@@ -84,6 +86,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
@@ -169,6 +173,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -415,6 +421,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@@ -596,6 +605,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -614,6 +625,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=

View File

@@ -2,6 +2,7 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"sync" "sync"
@@ -83,8 +84,15 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
var apiConnCon *dbUtils.APIConnectionController
if cfg.API.Explorer || mode == ModeCoordinator {
apiConnCon = dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
}
historyDB := historydb.NewHistoryDB(db) historyDB := historydb.NewHistoryDB(db, apiConnCon)
ethClient, err := ethclient.Dial(cfg.Web3.URL) ethClient, err := ethclient.Dial(cfg.Web3.URL)
if err != nil { if err != nil {
@@ -197,6 +205,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.L2DB.SafetyPeriod, cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.TTL.Duration, cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
) )
// Unlock FeeAccount EthAddr in the keystore to generate the // Unlock FeeAccount EthAddr in the keystore to generate the
@@ -293,6 +302,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration, SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts, EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration, TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath, DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
@@ -479,11 +491,15 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
if stats.Synced() { if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo( if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatch), common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum, stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil { ); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err) log.Errorw("API.UpdateNetworkInfo", "err", err)
} }
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
} }
} }
} }
@@ -565,7 +581,11 @@ func (n *Node) StartSynchronizer() {
if n.ctx.Err() != nil { if n.ctx.Err() != nil {
continue continue
} }
log.Errorw("Synchronizer.Sync", "err", err) if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
log.Warnw("Synchronizer.Sync", "err", err)
} else {
log.Errorw("Synchronizer.Sync", "err", err)
}
} }
} }
} }

View File

@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
assert.NoError(t, err) assert.NoError(t, err)
historyDB := historydb.NewHistoryDB(db) historyDB := historydb.NewHistoryDB(db, nil)
// Clean DB // Clean DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
// Populate DB // Populate DB
@@ -46,8 +46,7 @@ func TestPriceUpdater(t *testing.T) {
// Update prices // Update prices
pu.UpdatePrices(context.Background()) pu.UpdatePrices(context.Background())
// Check that prices have been updated // Check that prices have been updated
limit := uint(10) fetchedTokens, err := historyDB.GetTokensTest()
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, historydb.OrderAsc)
require.NoError(t, err) require.NoError(t, err)
// TokenID 0 (ETH) is always on the DB // TokenID 0 (ETH) is always on the DB
assert.Equal(t, 2, len(fetchedTokens)) assert.Equal(t, 2, len(fetchedTokens))

View File

@@ -25,12 +25,12 @@ type Stats struct {
Updated time.Time Updated time.Time
FirstBlockNum int64 FirstBlockNum int64
LastBlock common.Block LastBlock common.Block
LastBatch int64 LastBatchNum int64
} }
Sync struct { Sync struct {
Updated time.Time Updated time.Time
LastBlock common.Block LastBlock common.Block
LastBatch int64 LastBatch common.Batch
// LastL1BatchBlock is the last ethereum block in which an // LastL1BatchBlock is the last ethereum block in which an
// l1Batch was forged // l1Batch was forged
LastL1BatchBlock int64 LastL1BatchBlock int64
@@ -77,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
} }
// UpdateSync updates the synchronizer stats // UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum, func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) { lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
now := time.Now() now := time.Now()
s.rw.Lock() s.rw.Lock()
s.Sync.LastBlock = *lastBlock s.Sync.LastBlock = *lastBlock
if lastBatch != nil { if lastBatch != nil {
s.Sync.LastBatch = int64(*lastBatch) s.Sync.LastBatch = *lastBatch
} }
if lastL1BatchBlock != nil { if lastL1BatchBlock != nil {
s.Sync.LastL1BatchBlock = *lastL1BatchBlock s.Sync.LastL1BatchBlock = *lastL1BatchBlock
@@ -105,16 +105,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1) lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
} }
lastBatch, err := ethClient.RollupLastForgedBatch() lastBatchNum, err := ethClient.RollupLastForgedBatch()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err))
} }
s.rw.Lock() s.rw.Lock()
s.Eth.Updated = now s.Eth.Updated = now
s.Eth.LastBlock = *lastBlock s.Eth.LastBlock = *lastBlock
s.Eth.LastBatch = lastBatch s.Eth.LastBatchNum = lastBatchNum
s.rw.Unlock() s.rw.Unlock()
return nil return nil
} }
@@ -139,6 +139,10 @@ func (s *StatsHolder) CopyStats() *Stats {
sCopy.Sync.Auction.NextSlot.DefaultSlotBid = sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid) common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
} }
if s.Sync.LastBatch.StateRoot != nil {
sCopy.Sync.LastBatch.StateRoot =
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
}
s.rw.RUnlock() s.rw.RUnlock()
return &sCopy return &sCopy
} }
@@ -152,9 +156,9 @@ func (s *StatsHolder) blocksPerc() float64 {
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1)) float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
} }
func (s *StatsHolder) batchesPerc(batchNum int64) float64 { func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
return float64(batchNum) * 100.0 / return float64(batchNum) * 100.0 /
float64(s.Eth.LastBatch) float64(s.Eth.LastBatchNum)
} }
// StartBlockNums sets the first block used to start tracking the smart // StartBlockNums sets the first block used to start tracking the smart
@@ -329,23 +333,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil return nil
} }
// firstBatchBlockNum is the blockNum of first batch in that block, if any // updateCurrentSlot updates the slot with information of the current slot.
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) { // The information abouth which coordinator is allowed to forge is only updated
slot := common.Slot{ // when we are Synced.
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum, // hasBatch is true when the last synced block contained at least one batch.
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment, func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
}
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset { if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum) dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
firstBatchBlockNum = nil hasBatch = false
} else { } else {
firstBatchBlockNum = &dbFirstBatchBlockNum hasBatch = true
firstBatchBlockNum = dbFirstBatchBlockNum
} }
slot.ForgerCommitment = false slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum { } else if slotNum > slot.SlotNum {
@@ -356,11 +362,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(&slot); err != nil { if err := s.setSlotCoordinator(slot); err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if firstBatchBlockNum != nil && if hasBatch &&
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) < s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) { int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true slot.ForgerCommitment = true
} }
@@ -369,57 +375,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
} }
if !canForge { if !canForge {
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return &slot, nil return nil
} }
func (s *Synchronizer) getNextSlot() (*common.Slot, error) { // updateNextSlot updates the slot with information of the next slot.
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1 slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot := common.Slot{ slot.SlotNum = slotNum
SlotNum: slotNum, slot.ForgerCommitment = false
ForgerCommitment: false,
}
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(&slot); err != nil { if err := s.setSlotCoordinator(slot); err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// TODO: Remove this SANITY CHECK once this code is tested enough // TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
} }
if !canForge { if !canForge {
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return &slot, nil return nil
} }
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error { // updateCurrentNextSlotIfSync updates the current and next slot. Information
current, err := s.getCurrentSlot(reset, firstBatchBlockNum) // about forger address that is allowed to forge is only updated if we are
if err != nil { // Synced.
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
next, err := s.getNextSlot() if err := s.updateNextSlot(&next); err != nil {
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
s.stats.UpdateCurrentNextSlot(current, next) s.stats.UpdateCurrentNextSlot(&current, &next)
return nil return nil
} }
@@ -458,9 +468,9 @@ func (s *Synchronizer) init() error {
"ethLastBlock", s.stats.Eth.LastBlock, "ethLastBlock", s.stats.Eth.LastBlock,
) )
log.Infow("Sync init batch", log.Infow("Sync init batch",
"syncLastBatch", s.stats.Sync.LastBatch, "syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch), "syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatch, "ethLastBatch", s.stats.Eth.LastBatchNum,
) )
return nil return nil
} }
@@ -521,7 +531,7 @@ func (s *Synchronizer) Sync2(ctx context.Context,
if tracerr.Unwrap(err) == ethereum.NotFound { if tracerr.Unwrap(err) == ethereum.NotFound {
return nil, nil, nil return nil, nil, nil
} else if err != nil { } else if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
} }
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
@@ -627,14 +637,14 @@ func (s *Synchronizer) Sync2(ctx context.Context,
} }
} }
s.stats.UpdateSync(ethBlock, s.stats.UpdateSync(ethBlock,
&rollupData.Batches[batchesLen-1].Batch.BatchNum, &rollupData.Batches[batchesLen-1].Batch,
lastL1BatchBlock, lastForgeL1TxsNum) lastL1BatchBlock, lastForgeL1TxsNum)
} }
var firstBatchBlockNum *int64 hasBatch := false
if len(rollupData.Batches) > 0 { if len(rollupData.Batches) > 0 {
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum hasBatch = true
} }
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil { if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -646,8 +656,8 @@ func (s *Synchronizer) Sync2(ctx context.Context,
for _, batchData := range rollupData.Batches { for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch", log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum, "syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)), "syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatch, "ethLastBatch", s.stats.Eth.LastBatchNum,
) )
} }
@@ -700,15 +710,15 @@ func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) { consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
} }
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit() auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
} }
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit() wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
} }
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
@@ -753,15 +763,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer s.vars.WDelayer = *wDelayer
} }
batchNum, err := s.historyDB.GetLastBatchNum() batch, err := s.historyDB.GetLastBatch()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
} }
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
batchNum = 0 batch = &common.Batch{}
} }
err = s.stateDB.Reset(batchNum) err = s.stateDB.Reset(batch.BatchNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err)) return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
} }
@@ -783,9 +793,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n lastForgeL1TxsNum = &n
} }
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum) s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil { if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -802,7 +812,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// the expected one. // the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash) rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if rollupEvents == nil { if rollupEvents == nil {
@@ -919,9 +929,15 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if s.stateDB.CurrentBatch() != batchNum { if s.stateDB.CurrentBatch() != batchNum {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)", return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
"evtForgeBatch.BatchNum = (%v)",
s.stateDB.CurrentBatch(), batchNum)) s.stateDB.CurrentBatch(), batchNum))
} }
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
"forgeBatchArgs.NewStRoot (%v)",
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
}
// Transform processed PoolL2 txs to L2 and store in BatchData // Transform processed PoolL2 txs to L2 and store in BatchData
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
@@ -1106,7 +1122,7 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
// Get auction events in the block // Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash) auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if auctionEvents == nil { if auctionEvents == nil {
@@ -1203,7 +1219,7 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
// Get wDelayer events in the block // Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash) wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if wDelayerEvents == nil { if wDelayerEvents == nil {

View File

@@ -314,7 +314,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
historyDB := historydb.NewHistoryDB(db) historyDB := historydb.NewHistoryDB(db, nil)
// Clear DB // Clear DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())

View File

@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
}) })
} }
hdb := historydb.NewHistoryDB(db) hdb := historydb.NewHistoryDB(db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{ assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1, Num: 1,
})) }))
@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB") dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)

View File

@@ -89,12 +89,8 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
// Reset tells the TxSelector to get it's internal AccountsDB // Reset tells the TxSelector to get it's internal AccountsDB
// from the required `batchNum` // from the required `batchNum`
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error { func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
err := txsel.localAccountsDB.Reset(batchNum, true) return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
if err != nil {
return tracerr.Wrap(err)
}
return nil
} }
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) { func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {

View File

@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour) l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
@@ -106,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
}) })
} }
hdb := historydb.NewHistoryDB(db) hdb := historydb.NewHistoryDB(db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{ assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1, Num: 1,
})) }))