mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
10 Commits
feature/fl
...
feature/up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b38fea17d6 | ||
|
|
0ffd69ad2c | ||
|
|
aca106a2ee | ||
|
|
e737aebd28 | ||
|
|
104b277de0 | ||
|
|
39eb715b98 | ||
|
|
33634a00b1 | ||
|
|
d284baf8c4 | ||
|
|
deede9541b | ||
|
|
e2006403d7 |
@@ -26,7 +26,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Insert to DB
|
||||
if err := a.l2.AddAccountCreationAuthAPI(commonAuth); err != nil {
|
||||
if err := a.l2.AddAccountCreationAuth(commonAuth); err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -28,7 +27,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
"github.com/hermeznetwork/hermez-node/test/txsets"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Pendinger is an interface that allows getting last returned item ID and PendingItems to be used for building fromItem
|
||||
@@ -201,8 +199,7 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
apiConnCon := db.NewAPICnnectionController(1, time.Second)
|
||||
hdb := historydb.NewHistoryDB(database, apiConnCon)
|
||||
hdb := historydb.NewHistoryDB(database)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -221,7 +218,7 @@ func TestMain(m *testing.M) {
|
||||
panic(err)
|
||||
}
|
||||
// L2DB
|
||||
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
|
||||
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour)
|
||||
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
||||
// Config (smart contract constants)
|
||||
chainID := uint16(0)
|
||||
@@ -577,82 +574,6 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(result)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
|
||||
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
|
||||
require.NoError(t, err)
|
||||
// L2DB
|
||||
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
|
||||
|
||||
// API
|
||||
apiGinTO := gin.Default()
|
||||
finishWait := make(chan interface{})
|
||||
startWait := make(chan interface{})
|
||||
apiGinTO.GET("/wait", func(c *gin.Context) {
|
||||
cancel, err := apiConnConTO.Acquire()
|
||||
defer cancel()
|
||||
require.NoError(t, err)
|
||||
defer apiConnConTO.Release()
|
||||
startWait <- nil
|
||||
<-finishWait
|
||||
})
|
||||
// Start server
|
||||
serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
|
||||
go func() {
|
||||
if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
_config := getConfigTest(0)
|
||||
_, err = NewAPI(
|
||||
true,
|
||||
true,
|
||||
apiGinTO,
|
||||
hdbTO,
|
||||
nil,
|
||||
l2DBTO,
|
||||
&_config,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := &http.Client{}
|
||||
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil)
|
||||
require.NoError(t, err)
|
||||
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil)
|
||||
require.NoError(t, err)
|
||||
// Request that will get timed out
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// Request that will make the API busy
|
||||
_, err = client.Do(httpReqWait)
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
<-startWait
|
||||
resp, err := client.Do(httpReq)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
defer resp.Body.Close() //nolint
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// Unmarshal body into return struct
|
||||
msg := &errorMsg{}
|
||||
err = json.Unmarshal(body, msg)
|
||||
require.NoError(t, err)
|
||||
// Check that the error was the expected down
|
||||
require.Equal(t, errSQLTimeout, msg.Message)
|
||||
finishWait <- nil
|
||||
|
||||
// Stop server
|
||||
wg.Wait()
|
||||
require.NoError(t, serverTO.Shutdown(context.Background()))
|
||||
require.NoError(t, databaseTO.Close())
|
||||
}
|
||||
|
||||
func doGoodReqPaginated(
|
||||
path, order string,
|
||||
iterStruct Pendinger,
|
||||
|
||||
@@ -108,7 +108,7 @@ func (a *API) getFullBatch(c *gin.Context) {
|
||||
}
|
||||
// Fetch txs forged in the batch from historyDB
|
||||
maxTxsPerBatch := uint(2048) //nolint:gomnd
|
||||
txs, _, err := a.h.GetTxsAPI(
|
||||
txs, _, err := a.h.GetHistoryTxs(
|
||||
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
|
||||
)
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
|
||||
@@ -30,12 +30,6 @@ const (
|
||||
|
||||
// Error for duplicated key
|
||||
errDuplicatedKey = "Item already exists"
|
||||
|
||||
// Error for timeout due to SQL connection
|
||||
errSQLTimeout = "The node is under heavy preasure, please try again later"
|
||||
|
||||
// Error message returned when context reaches timeout
|
||||
errCtxTimeout = "context deadline exceeded"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -45,19 +39,15 @@ var (
|
||||
|
||||
func retSQLErr(err error, c *gin.Context) {
|
||||
log.Warnw("HTTP API SQL request error", "err", err)
|
||||
errMsg := tracerr.Unwrap(err).Error()
|
||||
if errMsg == errCtxTimeout {
|
||||
c.JSON(http.StatusServiceUnavailable, errorMsg{
|
||||
Message: errSQLTimeout,
|
||||
})
|
||||
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||
if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||
if sqlErr.Code == "23505" {
|
||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||
Message: errDuplicatedKey,
|
||||
})
|
||||
}
|
||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
}
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
c.JSON(http.StatusNotFound, errorMsg{
|
||||
Message: err.Error(),
|
||||
})
|
||||
|
||||
12
api/slots.go
12
api/slots.go
@@ -97,12 +97,12 @@ func (a *API) getSlot(c *gin.Context) {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
currentBlock, err := a.h.GetLastBlockAPI()
|
||||
currentBlock, err := a.h.GetLastBlock()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
auctionVars, err := a.h.GetAuctionVarsAPI()
|
||||
auctionVars, err := a.h.GetAuctionVars()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
@@ -200,12 +200,12 @@ func (a *API) getSlots(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
currentBlock, err := a.h.GetLastBlockAPI()
|
||||
currentBlock, err := a.h.GetLastBlock()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
auctionVars, err := a.h.GetAuctionVarsAPI()
|
||||
auctionVars, err := a.h.GetAuctionVars()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
@@ -220,13 +220,13 @@ func (a *API) getSlots(c *gin.Context) {
|
||||
retBadReq(errors.New("It is necessary to add maxSlotNum filter"), c)
|
||||
return
|
||||
} else if *finishedAuction {
|
||||
currentBlock, err := a.h.GetLastBlockAPI()
|
||||
currentBlock, err := a.h.GetLastBlock()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
currentSlot := a.getCurrentSlot(currentBlock.Num)
|
||||
auctionVars, err := a.h.GetAuctionVarsAPI()
|
||||
auctionVars, err := a.h.GetAuctionVars()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
|
||||
@@ -141,7 +141,7 @@ func (a *API) UpdateNetworkInfo(
|
||||
a.status.Network.NextForgers = nextForgers
|
||||
|
||||
// Update buckets withdrawals
|
||||
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
|
||||
bucketsUpdate, err := a.h.GetBucketUpdates()
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
bucketsUpdate = nil
|
||||
} else if err != nil {
|
||||
@@ -201,7 +201,7 @@ func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot
|
||||
}}
|
||||
} else {
|
||||
// Get all the relevant updates from the DB
|
||||
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
||||
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNum(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func (a *API) UpdateMetrics() error {
|
||||
}
|
||||
batchNum := a.status.Network.LastBatch.BatchNum
|
||||
a.status.RUnlock()
|
||||
metrics, err := a.h.GetMetricsAPI(batchNum)
|
||||
metrics, err := a.h.GetMetrics(batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -293,7 +293,7 @@ func (a *API) UpdateMetrics() error {
|
||||
|
||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||
func (a *API) UpdateRecommendedFee() error {
|
||||
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
|
||||
feeExistingAccount, err := a.h.GetAvgTxFee()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -2916,7 +2916,7 @@ components:
|
||||
example: 101
|
||||
l1UserTotalBytes:
|
||||
type: integer
|
||||
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx).
|
||||
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
|
||||
example: 72
|
||||
maxL1UserTx:
|
||||
type: integer
|
||||
|
||||
@@ -22,7 +22,7 @@ func (a *API) getToken(c *gin.Context) {
|
||||
}
|
||||
tokenID := common.TokenID(*tokenIDUint)
|
||||
// Fetch token from historyDB
|
||||
token, err := a.h.GetTokenAPI(tokenID)
|
||||
token, err := a.h.GetToken(tokenID)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
@@ -45,7 +45,7 @@ func (a *API) getTokens(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Fetch exits from historyDB
|
||||
tokens, pendingItems, err := a.h.GetTokensAPI(
|
||||
tokens, pendingItems, err := a.h.GetTokens(
|
||||
tokenIDs, symbols, name, fromItem, limit, order,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -34,7 +34,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Fetch txs from historyDB
|
||||
txs, pendingItems, err := a.h.GetTxsAPI(
|
||||
txs, pendingItems, err := a.h.GetHistoryTxs(
|
||||
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -61,7 +61,7 @@ func (a *API) getHistoryTx(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Fetch tx from historyDB
|
||||
tx, err := a.h.GetTxAPI(txID)
|
||||
tx, err := a.h.GetHistoryTx(txID)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
|
||||
@@ -28,7 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Insert to DB
|
||||
if err := a.l2.AddTxAPI(writeTx); err != nil {
|
||||
if err := a.l2.AddTx(writeTx); err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
|
||||
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
|
||||
// it can just roll back the internal copy.
|
||||
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
|
||||
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
|
||||
}
|
||||
|
||||
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
|
||||
|
||||
@@ -3,8 +3,6 @@ Address = "localhost:8086"
|
||||
Explorer = true
|
||||
UpdateMetricsInterval = "10s"
|
||||
UpdateRecommendedFeeInterval = "10s"
|
||||
MaxSQLConnections = 100
|
||||
SQLConnectionTimeout = "2s"
|
||||
|
||||
[PriceUpdater]
|
||||
Interval = "10s"
|
||||
@@ -41,12 +39,15 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
|
||||
TokenHEZName = "Hermez Network Token"
|
||||
|
||||
[Coordinator]
|
||||
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
||||
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
||||
ConfirmBlocks = 10
|
||||
L1BatchTimeoutPerc = 0.999
|
||||
L1BatchTimeoutPerc = 0.6
|
||||
StartSlotBlocksDelay = 2
|
||||
ScheduleBatchBlocksAheadCheck = 3
|
||||
SendBatchBlocksMarginCheck = 1
|
||||
ProofServerPollInterval = "1s"
|
||||
ForgeRetryInterval = "500ms"
|
||||
SyncRetryInterval = "1s"
|
||||
@@ -85,8 +86,11 @@ ReceiptLoopInterval = "500ms"
|
||||
CheckLoopInterval = "500ms"
|
||||
Attempts = 4
|
||||
AttemptsDelay = "500ms"
|
||||
TxResendTimeout = "2m"
|
||||
NoReuseNonce = false
|
||||
CallGasLimit = 300000
|
||||
GasPriceDiv = 100
|
||||
MaxGasPrice = "5000000000"
|
||||
|
||||
[Coordinator.EthClient.Keystore]
|
||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||
|
||||
@@ -27,6 +27,24 @@ type Batch struct {
|
||||
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
|
||||
}
|
||||
|
||||
// NewEmptyBatch creates a new empty batch
|
||||
func NewEmptyBatch() *Batch {
|
||||
return &Batch{
|
||||
BatchNum: 0,
|
||||
EthBlockNum: 0,
|
||||
ForgerAddr: ethCommon.Address{},
|
||||
CollectedFees: make(map[TokenID]*big.Int),
|
||||
FeeIdxsCoordinator: make([]Idx, 0),
|
||||
StateRoot: big.NewInt(0),
|
||||
NumAccounts: 0,
|
||||
LastIdx: 0,
|
||||
ExitRoot: big.NewInt(0),
|
||||
ForgeL1TxsNum: nil,
|
||||
SlotNum: 0,
|
||||
TotalFeesUSD: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// BatchNum identifies a batch
|
||||
type BatchNum int64
|
||||
|
||||
@@ -75,3 +93,23 @@ func NewBatchData() *BatchData {
|
||||
Batch: Batch{},
|
||||
}
|
||||
}
|
||||
|
||||
// BatchSync is a subset of Batch that contains fileds needed for the
|
||||
// synchronizer and coordinator
|
||||
// type BatchSync struct {
|
||||
// BatchNum BatchNum `meddler:"batch_num"`
|
||||
// EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
|
||||
// ForgerAddr ethCommon.Address `meddler:"forger_addr"`
|
||||
// StateRoot *big.Int `meddler:"state_root,bigint"`
|
||||
// SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
|
||||
// }
|
||||
//
|
||||
// func NewBatchSync() *BatchSync {
|
||||
// return &BatchSync{
|
||||
// BatchNum: 0,
|
||||
// EthBlockNum: 0,
|
||||
// ForgerAddr: ethCommon.Address,
|
||||
// StateRoot: big.NewInt(0),
|
||||
// SlotNum: 0,
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -33,7 +33,8 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
|
||||
if blockNum >= c.GenesisBlockNum {
|
||||
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||
}
|
||||
return -1
|
||||
// This result will be negative
|
||||
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
|
||||
}
|
||||
|
||||
// SlotBlocks returns the first and the last block numbers included in that slot
|
||||
|
||||
@@ -24,8 +24,8 @@ const (
|
||||
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
|
||||
RollupConstL1CoordinatorTotalBytes = 101
|
||||
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
|
||||
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
|
||||
RollupConstL1UserTotalBytes = 78
|
||||
// [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
|
||||
RollupConstL1UserTotalBytes = 72
|
||||
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
|
||||
RollupConstMaxL1UserTx = 128
|
||||
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch
|
||||
|
||||
@@ -30,7 +30,6 @@ func (f16 Float16) Bytes() []byte {
|
||||
|
||||
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
|
||||
func Float16FromBytes(b []byte) *Float16 {
|
||||
// WARNING b[:2] for a b where len(b)<2 can break
|
||||
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
|
||||
return &f16
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConversionsFloat16(t *testing.T) {
|
||||
func TestConversions(t *testing.T) {
|
||||
testVector := map[Float16]string{
|
||||
0x307B: "123000000",
|
||||
0x1DC6: "454500",
|
||||
@@ -32,14 +32,14 @@ func TestConversionsFloat16(t *testing.T) {
|
||||
bi.SetString(testVector[test], 10)
|
||||
|
||||
fl, err := NewFloat16(bi)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
fx2 := fl.BigInt()
|
||||
assert.Equal(t, fx2.String(), testVector[test])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloorFix2FloatFloat16(t *testing.T) {
|
||||
func TestFloorFix2Float(t *testing.T) {
|
||||
testVector := map[string]Float16{
|
||||
"87999990000000000": 0x776f,
|
||||
"87950000000000001": 0x776f,
|
||||
@@ -57,10 +57,10 @@ func TestFloorFix2FloatFloat16(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversionLossesFloat16(t *testing.T) {
|
||||
func TestConversionLosses(t *testing.T) {
|
||||
a := big.NewInt(1000)
|
||||
b, err := NewFloat16(a)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nil, err)
|
||||
c := b.BigInt()
|
||||
assert.Equal(t, c, a)
|
||||
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
// Package common Float40 provides methods to work with Hermez custom half
|
||||
// float precision, 40 bits, codification internally called Float40 has been
|
||||
// adopted to encode large integers. This is done in order to save bits when L2
|
||||
// transactions are published.
|
||||
//nolint:gomnd
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxFloat40Value is the maximum value that the Float40 can have
|
||||
// (40 bits: maxFloat40Value=2**40-1)
|
||||
maxFloat40Value = 0xffffffffff
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFloat40Overflow is used when a given nonce overflows the maximum
|
||||
// capacity of the Float40 (2**40-1)
|
||||
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
|
||||
// ErrFloat40E31 is used when the e > 31 when trying to convert a
|
||||
// *big.Int to Float40
|
||||
ErrFloat40E31 = errors.New("Float40 error, e > 31")
|
||||
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
|
||||
// not be represented as Float40 due not enough precission
|
||||
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
|
||||
)
|
||||
|
||||
// Float40 represents a float in a 64 bit format
|
||||
type Float40 uint64
|
||||
|
||||
// Bytes return a byte array of length 5 with the Float40 value encoded in
|
||||
// BigEndian
|
||||
func (f40 Float40) Bytes() ([]byte, error) {
|
||||
if f40 > maxFloat40Value {
|
||||
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
|
||||
}
|
||||
|
||||
var f40Bytes [8]byte
|
||||
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
|
||||
var b [5]byte
|
||||
copy(b[:], f40Bytes[3:])
|
||||
return b[:], nil
|
||||
}
|
||||
|
||||
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
|
||||
// representation.
|
||||
func Float40FromBytes(b []byte) Float40 {
|
||||
var f40Bytes [8]byte
|
||||
copy(f40Bytes[3:], b[:])
|
||||
f40 := binary.BigEndian.Uint64(f40Bytes[:])
|
||||
return Float40(f40)
|
||||
}
|
||||
|
||||
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
|
||||
// [ e | m ]
|
||||
// [ 5 bits | 35 bits ]
|
||||
func (f40 Float40) BigInt() (*big.Int, error) {
|
||||
// take the 5 used bytes (FF * 5)
|
||||
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
|
||||
f40Bytes, err := f40.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
|
||||
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
|
||||
|
||||
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
|
||||
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
|
||||
// of loss during the encoding.
|
||||
func NewFloat40(f *big.Int) (Float40, error) {
|
||||
m := f
|
||||
e := big.NewInt(0)
|
||||
zero := big.NewInt(0)
|
||||
ten := big.NewInt(10)
|
||||
thres := big.NewInt(0x08_00_00_00_00)
|
||||
for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) &&
|
||||
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
|
||||
m = new(big.Int).Div(m, ten)
|
||||
e = new(big.Int).Add(e, big.NewInt(1))
|
||||
}
|
||||
if e.Int64() > 31 {
|
||||
return 0, ErrFloat40E31
|
||||
}
|
||||
if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
|
||||
return 0, ErrFloat40NotEnoughPrecission
|
||||
}
|
||||
r := new(big.Int).Add(m,
|
||||
new(big.Int).Mul(e, thres))
|
||||
return Float40(r.Uint64()), nil
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConversionsFloat40(t *testing.T) {
|
||||
testVector := map[Float40]string{
|
||||
6*0x800000000 + 123: "123000000",
|
||||
2*0x800000000 + 4545: "454500",
|
||||
30*0x800000000 + 10235: "10235000000000000000000000000000000",
|
||||
0x000000000: "0",
|
||||
0x800000000: "0",
|
||||
0x0001: "1",
|
||||
0x0401: "1025",
|
||||
0x800000000 + 1: "10",
|
||||
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
|
||||
}
|
||||
|
||||
for test := range testVector {
|
||||
fix, err := test.BigInt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fix.String(), testVector[test])
|
||||
|
||||
bi, ok := new(big.Int).SetString(testVector[test], 10)
|
||||
require.True(t, ok)
|
||||
|
||||
fl, err := NewFloat40(bi)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fx2, err := fl.BigInt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fx2.String(), testVector[test])
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpectError(t *testing.T) {
|
||||
testVector := map[string]error{
|
||||
"9922334455000000000000000000000000000000": nil,
|
||||
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
|
||||
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
|
||||
"42949672950000000000000000000000000000000": nil,
|
||||
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
|
||||
"992233445500000000000000000000000000000000": ErrFloat40E31,
|
||||
"343597383670000000000000000000000000000000": nil,
|
||||
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
|
||||
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
|
||||
"343597383700000000000000000000000000000000": ErrFloat40E31,
|
||||
}
|
||||
for test := range testVector {
|
||||
bi, ok := new(big.Int).SetString(test, 10)
|
||||
require.True(t, ok)
|
||||
_, err := NewFloat40(bi)
|
||||
assert.Equal(t, testVector[test], err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFloat40(b *testing.B) {
|
||||
newBigInt := func(s string) *big.Int {
|
||||
bigInt, ok := new(big.Int).SetString(s, 10)
|
||||
if !ok {
|
||||
panic("Can not convert string to *big.Int")
|
||||
}
|
||||
return bigInt
|
||||
}
|
||||
type pair struct {
|
||||
Float40 Float40
|
||||
BigInt *big.Int
|
||||
}
|
||||
testVector := []pair{
|
||||
{6*0x800000000 + 123, newBigInt("123000000")},
|
||||
{2*0x800000000 + 4545, newBigInt("454500")},
|
||||
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
|
||||
{0x000000000, newBigInt("0")},
|
||||
{0x800000000, newBigInt("0")},
|
||||
{0x0001, newBigInt("1")},
|
||||
{0x0401, newBigInt("1025")},
|
||||
{0x800000000 + 1, newBigInt("10")},
|
||||
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
|
||||
}
|
||||
b.Run("NewFloat40()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
|
||||
}
|
||||
})
|
||||
b.Run("Float40.BigInt()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = testVector[i%len(testVector)].Float40.BigInt()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -11,6 +11,13 @@ import (
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
)
|
||||
|
||||
const (
|
||||
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
|
||||
L1UserTxBytesLen = 72
|
||||
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
|
||||
L1CoordinatorTxBytesLen = 101
|
||||
)
|
||||
|
||||
// L1Tx is a struct that represents a L1 tx
|
||||
type L1Tx struct {
|
||||
// Stored in DB: mandatory fileds
|
||||
@@ -172,38 +179,45 @@ func (tx L1Tx) Tx() Tx {
|
||||
// [ 8 bits ] empty (userFee) // 1 byte
|
||||
// [ 40 bits ] empty (nonce) // 5 bytes
|
||||
// [ 32 bits ] tokenID // 4 bytes
|
||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// [ 16 bits ] chainId // 2 bytes
|
||||
// [ 32 bits ] empty (signatureConstant) // 4 bytes
|
||||
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
|
||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
||||
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
var b [29]byte
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var b [31]byte
|
||||
// b[0:7] empty: no ToBJJSign, no fee, no nonce
|
||||
copy(b[7:11], tx.TokenID.Bytes())
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[11:17], toIdxBytes[:])
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[17:23], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||
copy(b[25:29], SignatureConstantBytes[:])
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||
copy(b[27:31], SignatureConstantBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
|
||||
// [ fromIdx | toIdx | amountFloat40 | Fee ]
|
||||
// [ fromIdx | toIdx | amountFloat16 | Fee ]
|
||||
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
idxLen := nLevels / 8 //nolint:gomnd
|
||||
|
||||
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
|
||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
||||
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
@@ -217,17 +231,13 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||
|
||||
if tx.EffectiveAmount != nil {
|
||||
amountFloat40, err := NewFloat40(tx.EffectiveAmount)
|
||||
amountFloat16, err := NewFloat16(tx.EffectiveAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes)
|
||||
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
||||
}
|
||||
// fee = 0 (as is L1Tx)
|
||||
// fee = 0 (as is L1Tx) b[10:11]
|
||||
return b[:], nil
|
||||
}
|
||||
|
||||
@@ -237,7 +247,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
||||
|
||||
fromIdxBytes := b[0:idxLen]
|
||||
toIdxBytes := b[idxLen : idxLen*2]
|
||||
amountBytes := b[idxLen*2 : idxLen*2+5]
|
||||
amountBytes := b[idxLen*2 : idxLen*2+2]
|
||||
|
||||
l1tx := L1Tx{}
|
||||
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
|
||||
@@ -250,8 +260,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
l1tx.ToIdx = toIdx
|
||||
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
|
||||
return &l1tx, err
|
||||
l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
|
||||
return &l1tx, nil
|
||||
}
|
||||
|
||||
// BytesGeneric returns the generic representation of a L1Tx. This method is
|
||||
@@ -259,7 +269,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
||||
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
|
||||
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
|
||||
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
||||
var b [RollupConstL1UserTotalBytes]byte
|
||||
var b [L1UserTxBytesLen]byte
|
||||
copy(b[0:20], tx.FromEthAddr.Bytes())
|
||||
if tx.FromBJJ != EmptyBJJComp {
|
||||
pkCompL := tx.FromBJJ
|
||||
@@ -271,33 +281,22 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[52:58], fromIdxBytes[:])
|
||||
|
||||
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
|
||||
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes()
|
||||
copy(b[58:60], depositAmountFloat16.Bytes())
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[58:63], depositAmountFloat40Bytes)
|
||||
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[63:68], amountFloat40Bytes)
|
||||
|
||||
copy(b[68:72], tx.TokenID.Bytes())
|
||||
copy(b[60:62], amountFloat16.Bytes())
|
||||
copy(b[62:66], tx.TokenID.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[72:78], toIdxBytes[:])
|
||||
copy(b[66:72], toIdxBytes[:])
|
||||
return b[:], nil
|
||||
}
|
||||
|
||||
@@ -314,7 +313,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
|
||||
if tx.UserOrigin {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
|
||||
}
|
||||
var b [RollupConstL1CoordinatorTotalBytes]byte
|
||||
var b [L1CoordinatorTxBytesLen]byte
|
||||
v := compressedSignatureBytes[64]
|
||||
s := compressedSignatureBytes[32:64]
|
||||
r := compressedSignatureBytes[0:32]
|
||||
@@ -330,7 +329,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
|
||||
|
||||
// L1UserTxFromBytes decodes a L1Tx from []byte
|
||||
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
||||
if len(b) != RollupConstL1UserTotalBytes {
|
||||
if len(b) != L1UserTxBytesLen {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
|
||||
}
|
||||
|
||||
@@ -348,19 +347,13 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.FromIdx = fromIdx
|
||||
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt()
|
||||
tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
|
||||
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
|
||||
tx.TokenID, err = TokenIDFromBytes(b[62:66])
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.TokenID, err = TokenIDFromBytes(b[68:72])
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.ToIdx, err = IdxFromBytes(b[72:78])
|
||||
tx.ToIdx, err = IdxFromBytes(b[66:72])
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -375,7 +368,7 @@ func signHash(data []byte) []byte {
|
||||
|
||||
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
|
||||
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
|
||||
if len(b) != RollupConstL1CoordinatorTotalBytes {
|
||||
if len(b) != L1CoordinatorTxBytesLen {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
|
||||
}
|
||||
|
||||
|
||||
@@ -50,110 +50,64 @@ func TestNewL1CoordinatorTx(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestL1TxCompressedData(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation (using
|
||||
// PoolL2Tx values)
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx := L1Tx{
|
||||
FromIdx: (1 << 48) - 1,
|
||||
ToIdx: (1 << 48) - 1,
|
||||
Amount: amount,
|
||||
TokenID: (1 << 32) - 1,
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
}
|
||||
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
|
||||
chainID := uint16(0)
|
||||
txCompressedData, err := tx.TxCompressedData(chainID)
|
||||
assert.NoError(t, err)
|
||||
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
tx = L1Tx{
|
||||
FromIdx: 0,
|
||||
ToIdx: 0,
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 0,
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
amount, ok = new(big.Int).SetString("63000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx = L1Tx{
|
||||
FromIdx: 324,
|
||||
ToIdx: 256,
|
||||
Amount: amount,
|
||||
TokenID: 123,
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(1))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "7b0000000001000000000001440001c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
tx = L1Tx{
|
||||
FromIdx: 1,
|
||||
ToIdx: 2,
|
||||
TokenID: 3,
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "030000000000020000000000010000c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "7307597389635308713748674793997299267459594577423"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestBytesDataAvailability(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx := L1Tx{
|
||||
ToIdx: (1 << 16) - 1,
|
||||
FromIdx: (1 << 16) - 1,
|
||||
EffectiveAmount: amount,
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
}
|
||||
txCompressedData, err := tx.BytesDataAvailability(16)
|
||||
txCompressedData, err := tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
|
||||
|
||||
tx = L1Tx{
|
||||
ToIdx: (1 << 32) - 1,
|
||||
FromIdx: (1 << 32) - 1,
|
||||
EffectiveAmount: amount,
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
EffectiveAmount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
|
||||
}
|
||||
|
||||
func TestL1TxFromDataAvailability(t *testing.T) {
|
||||
tx := L1Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
}
|
||||
txCompressedData, err := tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||
|
||||
tx = L1Tx{
|
||||
ToIdx: 0,
|
||||
FromIdx: 0,
|
||||
EffectiveAmount: big.NewInt(0),
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
EffectiveAmount: big.NewInt(4),
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||
|
||||
tx = L1Tx{
|
||||
ToIdx: 635,
|
||||
FromIdx: 296,
|
||||
EffectiveAmount: big.NewInt(1000000000000000000),
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
@@ -218,10 +172,12 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
|
||||
UserOrigin: true,
|
||||
}
|
||||
|
||||
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
|
||||
require.NoError(t, err)
|
||||
|
||||
encodedData, err := l1Tx.BytesUser()
|
||||
require.NoError(t, err)
|
||||
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
assert.Equal(t, expected, encodedData)
|
||||
}
|
||||
|
||||
func TestL1CoordinatorTxByteParsers(t *testing.T) {
|
||||
|
||||
@@ -89,15 +89,11 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
|
||||
// TokenID
|
||||
b = append(b, tx.TokenID.Bytes()[:]...)
|
||||
// Amount
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return txID, tracerr.Wrap(err)
|
||||
}
|
||||
b = append(b, amountFloat40Bytes...)
|
||||
b = append(b, amountFloat16.Bytes()...)
|
||||
// Nonce
|
||||
nonceBytes, err := tx.Nonce.Bytes()
|
||||
if err != nil {
|
||||
@@ -174,11 +170,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
|
||||
}
|
||||
|
||||
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
|
||||
// [ fromIdx | toIdx | amountFloat40 | Fee ]
|
||||
// [ fromIdx | toIdx | amountFloat16 | Fee ]
|
||||
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
idxLen := nLevels / 8 //nolint:gomnd
|
||||
|
||||
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
|
||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
||||
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
@@ -192,16 +188,13 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
}
|
||||
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes)
|
||||
b[idxLen*2+5] = byte(tx.Fee)
|
||||
|
||||
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
||||
b[idxLen*2+2] = byte(tx.Fee)
|
||||
|
||||
return b[:], nil
|
||||
}
|
||||
@@ -226,10 +219,7 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+5]).BigInt()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.Fee = FeeSelector(b[idxLen*2+5])
|
||||
tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
|
||||
tx.Fee = FeeSelector(b[idxLen*2+2])
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err := NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 87654,
|
||||
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 87654,
|
||||
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 87654,
|
||||
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 1,
|
||||
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 999,
|
||||
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 4444,
|
||||
@@ -90,85 +90,25 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
|
||||
}
|
||||
|
||||
func TestL2TxByteParsers(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
amount := new(big.Int)
|
||||
amount.SetString("79000000", 10)
|
||||
l2Tx := &L2Tx{
|
||||
ToIdx: (1 << 16) - 1,
|
||||
FromIdx: (1 << 16) - 1,
|
||||
Amount: amount,
|
||||
Fee: (1 << 8) - 1,
|
||||
}
|
||||
expected := "ffffffffffffffffffff"
|
||||
encodedData, err := l2Tx.BytesDataAvailability(16)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: (1 << 32) - 1,
|
||||
FromIdx: (1 << 32) - 1,
|
||||
Amount: amount,
|
||||
Fee: (1 << 8) - 1,
|
||||
}
|
||||
expected = "ffffffffffffffffffffffffffff"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: 0,
|
||||
FromIdx: 0,
|
||||
Amount: big.NewInt(0),
|
||||
Fee: 0,
|
||||
}
|
||||
expected = "0000000000000000000000000000"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: 0,
|
||||
FromIdx: 1061,
|
||||
Amount: big.NewInt(420000000000),
|
||||
Fee: 127,
|
||||
}
|
||||
expected = "000004250000000010fa56ea007f"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: 256,
|
||||
Amount: amount,
|
||||
FromIdx: 257,
|
||||
Amount: big.NewInt(79000000),
|
||||
Fee: 201,
|
||||
}
|
||||
expected = "00000101000001000004b571c0c9"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
// Data from the compatibility test
|
||||
expected := "00000101000001002b16c9"
|
||||
encodedData, err := l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ type PoolL2Tx struct {
|
||||
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
|
||||
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
|
||||
TokenID TokenID `meddler:"token_id"`
|
||||
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40
|
||||
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
|
||||
Fee FeeSelector `meddler:"fee"`
|
||||
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
|
||||
State PoolL2TxState `meddler:"state"`
|
||||
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
|
||||
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
|
||||
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
|
||||
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
|
||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40
|
||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
|
||||
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
|
||||
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
|
||||
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
|
||||
@@ -122,13 +122,18 @@ func (tx *PoolL2Tx) SetID() error {
|
||||
// [ 8 bits ] userFee // 1 byte
|
||||
// [ 40 bits ] nonce // 5 bytes
|
||||
// [ 32 bits ] tokenID // 4 bytes
|
||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// [ 16 bits ] chainId // 2 bytes
|
||||
// [ 32 bits ] signatureConstant // 4 bytes
|
||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
||||
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
var b [29]byte
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
var b [31]byte
|
||||
|
||||
toBJJSign := byte(0)
|
||||
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
||||
@@ -144,18 +149,19 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
}
|
||||
copy(b[2:7], nonceBytes[:])
|
||||
copy(b[7:11], tx.TokenID.Bytes())
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[11:17], toIdxBytes[:])
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[17:23], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||
copy(b[25:29], SignatureConstantBytes[:])
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||
copy(b[27:31], SignatureConstantBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
@@ -164,9 +170,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
// TxCompressedDataEmpty calculates the TxCompressedData of an empty
|
||||
// transaction
|
||||
func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
||||
var b [29]byte
|
||||
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||
copy(b[25:29], SignatureConstantBytes[:])
|
||||
var b [31]byte
|
||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||
copy(b[27:31], SignatureConstantBytes[:])
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi
|
||||
}
|
||||
@@ -176,7 +182,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
||||
// [ 8 bits ] userFee // 1 byte
|
||||
// [ 40 bits ] nonce // 5 bytes
|
||||
// [ 32 bits ] tokenID // 4 bytes
|
||||
// [ 40 bits ] amountFloat40 // 5 bytes
|
||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
||||
@@ -184,16 +190,11 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
if tx.Amount == nil {
|
||||
tx.Amount = big.NewInt(0)
|
||||
}
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var b [28]byte
|
||||
var b [25]byte
|
||||
toBJJSign := byte(0)
|
||||
if tx.ToBJJ != EmptyBJJComp {
|
||||
sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
||||
@@ -209,17 +210,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
}
|
||||
copy(b[2:7], nonceBytes[:])
|
||||
copy(b[7:11], tx.TokenID.Bytes())
|
||||
copy(b[11:16], amountFloat40Bytes)
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[16:22], toIdxBytes[:])
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[22:28], fromIdxBytes[:])
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
@@ -235,7 +236,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
// [ 8 bits ] rqUserFee // 1 byte
|
||||
// [ 40 bits ] rqNonce // 5 bytes
|
||||
// [ 32 bits ] rqTokenID // 4 bytes
|
||||
// [ 40 bits ] rqAmountFloat40 // 5 bytes
|
||||
// [ 16 bits ] rqAmountFloat16 // 2 bytes
|
||||
// [ 48 bits ] rqToIdx // 6 bytes
|
||||
// [ 48 bits ] rqFromIdx // 6 bytes
|
||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
||||
@@ -243,16 +244,11 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
||||
if tx.RqAmount == nil {
|
||||
tx.RqAmount = big.NewInt(0)
|
||||
}
|
||||
amountFloat40, err := NewFloat40(tx.RqAmount)
|
||||
amountFloat16, err := NewFloat16(tx.RqAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var b [28]byte
|
||||
var b [25]byte
|
||||
rqToBJJSign := byte(0)
|
||||
if tx.RqToBJJ != EmptyBJJComp {
|
||||
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
|
||||
@@ -268,17 +264,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
||||
}
|
||||
copy(b[2:7], nonceBytes[:])
|
||||
copy(b[7:11], tx.RqTokenID.Bytes())
|
||||
copy(b[11:16], amountFloat40Bytes)
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
toIdxBytes, err := tx.RqToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[16:22], toIdxBytes[:])
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.RqFromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[22:28], fromIdxBytes[:])
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
@@ -291,22 +287,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
|
||||
var e1B [25]byte
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(e1B[0:5], amountFloat40Bytes)
|
||||
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
|
||||
copy(e1B[5:25], toEthAddr.Bytes())
|
||||
e1 := new(big.Int).SetBytes(e1B[:])
|
||||
|
||||
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
|
||||
|
||||
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
|
||||
@@ -318,7 +299,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
||||
|
||||
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
|
||||
|
||||
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
|
||||
return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
|
||||
}
|
||||
|
||||
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp
|
||||
|
||||
@@ -21,104 +21,80 @@ func TestNewPoolL2Tx(t *testing.T) {
|
||||
}
|
||||
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
|
||||
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
|
||||
}
|
||||
|
||||
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation
|
||||
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
|
||||
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
func TestTxCompressedData(t *testing.T) {
|
||||
chainID := uint16(0)
|
||||
var sk babyjub.PrivateKey
|
||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
|
||||
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx := PoolL2Tx{
|
||||
FromIdx: (1 << 48) - 1,
|
||||
ToIdx: (1 << 48) - 1,
|
||||
Amount: amount,
|
||||
TokenID: (1 << 32) - 1,
|
||||
Nonce: (1 << 40) - 1,
|
||||
Fee: (1 << 3) - 1,
|
||||
ToBJJ: skPositive.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
|
||||
require.NoError(t, err)
|
||||
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
txCompressedDataV2, err := tx.TxCompressedDataV2()
|
||||
require.NoError(t, err)
|
||||
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 0,
|
||||
ToIdx: 0,
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 0,
|
||||
Nonce: 0,
|
||||
Fee: 0,
|
||||
ToBJJ: skNegative.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
txCompressedDataV2, err = tx.TxCompressedDataV2()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "0", txCompressedDataV2.String())
|
||||
|
||||
amount, ok = new(big.Int).SetString("63000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 324,
|
||||
ToIdx: 256,
|
||||
Amount: amount,
|
||||
TokenID: 123,
|
||||
Nonce: 76,
|
||||
Fee: 214,
|
||||
ToBJJ: skNegative.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(1))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
txCompressedDataV2, err = tx.TxCompressedDataV2()
|
||||
require.NoError(t, err)
|
||||
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 1,
|
||||
ToIdx: 2,
|
||||
TokenID: 3,
|
||||
Nonce: 4,
|
||||
Fee: 5,
|
||||
ToBJJ: skNegative.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
TokenID: 4,
|
||||
Nonce: 5,
|
||||
Fee: 6,
|
||||
ToBJJ: skPositive.Public().Compress(),
|
||||
Amount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
Nonce: 6,
|
||||
ToBJJ: sk.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
txCompressedData, err := tx.TxCompressedData(chainID)
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
// using a different chainID
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(100))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(65535))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
RqFromIdx: 7,
|
||||
RqToIdx: 8,
|
||||
RqAmount: big.NewInt(9),
|
||||
RqTokenID: 10,
|
||||
RqNonce: 11,
|
||||
RqFee: 12,
|
||||
RqToBJJ: sk.Public().Compress(),
|
||||
}
|
||||
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
|
||||
assert.Equal(t, expectedStr, rqTxCompressedData.String())
|
||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestTxCompressedDataV2(t *testing.T) {
|
||||
var sk babyjub.PrivateKey
|
||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
tx := PoolL2Tx{
|
||||
FromIdx: 7,
|
||||
ToIdx: 8,
|
||||
Amount: big.NewInt(9),
|
||||
TokenID: 10,
|
||||
Nonce: 11,
|
||||
Fee: 12,
|
||||
ToBJJ: sk.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err := tx.TxCompressedDataV2()
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
||||
assert.True(t, ok)
|
||||
|
||||
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestRqTxCompressedDataV2(t *testing.T) {
|
||||
@@ -137,16 +113,19 @@ func TestRqTxCompressedDataV2(t *testing.T) {
|
||||
txCompressedData, err := tx.RqTxCompressedDataV2()
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663"
|
||||
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
||||
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestHashToSign(t *testing.T) {
|
||||
chainID := uint16(0)
|
||||
var sk babyjub.PrivateKey
|
||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
tx := PoolL2Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
@@ -157,7 +136,7 @@ func TestHashToSign(t *testing.T) {
|
||||
}
|
||||
toSign, err := tx.HashToSign(chainID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
|
||||
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
|
||||
}
|
||||
|
||||
func TestVerifyTxSignature(t *testing.T) {
|
||||
@@ -177,7 +156,7 @@ func TestVerifyTxSignature(t *testing.T) {
|
||||
}
|
||||
toSign, err := tx.HashToSign(chainID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
|
||||
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
|
||||
|
||||
sig := sk.SignPoseidon(toSign)
|
||||
tx.Signature = sig.Compress()
|
||||
|
||||
@@ -112,7 +112,7 @@ type ZKInputs struct {
|
||||
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
|
||||
// account (fromIdx==0)
|
||||
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
|
||||
// DepositAmountF encoded as float40
|
||||
// DepositAmountF encoded as float16
|
||||
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
|
||||
// FromEthAddr
|
||||
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
|
||||
|
||||
@@ -3,6 +3,7 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
@@ -51,6 +52,27 @@ type Coordinator struct {
|
||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||
// timeout that will trigger a schedule to forge an L1Batch
|
||||
L1BatchTimeoutPerc float64 `validate:"required"`
|
||||
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||
// starting the pipeline when we reach a slot in which we can forge.
|
||||
StartSlotBlocksDelay int64
|
||||
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||
// the forger address is checked to be allowed to forge (apart from
|
||||
// checking the next block), used to decide when to stop scheduling new
|
||||
// batches (by stopping the pipeline).
|
||||
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||
// stopped if we can't forge at block 15.
|
||||
// This value should be the expected number of blocks it takes between
|
||||
// scheduling a batch and having it mined.
|
||||
ScheduleBatchBlocksAheadCheck int64
|
||||
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||
// which the coordinator is also checked to be allowed to forge, apart
|
||||
// from the next block; used to decide when to stop sending batches to
|
||||
// the smart contract.
|
||||
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||
// if we can't forge at block 15.
|
||||
SendBatchBlocksMarginCheck int64
|
||||
// ProofServerPollInterval is the waiting interval between polling the
|
||||
// ProofServer while waiting for a particular status
|
||||
ProofServerPollInterval Duration `validate:"required"`
|
||||
@@ -101,6 +123,9 @@ type Coordinator struct {
|
||||
// calls, except for methods where a particular gas limit is
|
||||
// harcoded because it's known to be a big value
|
||||
CallGasLimit uint64 `validate:"required"`
|
||||
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||
// transactions
|
||||
MaxGasPrice *big.Int `validate:"required"`
|
||||
// GasPriceDiv is the gas price division
|
||||
GasPriceDiv uint64 `validate:"required"`
|
||||
// CheckLoopInterval is the waiting interval between receipt
|
||||
@@ -112,6 +137,13 @@ type Coordinator struct {
|
||||
// AttemptsDelay is delay between attempts do do an eth client
|
||||
// RPC call
|
||||
AttemptsDelay Duration `validate:"required"`
|
||||
// TxResendTimeout is the timeout after which a non-mined
|
||||
// ethereum transaction will be resent (reusing the nonce) with
|
||||
// a newly calculated gas price
|
||||
TxResendTimeout Duration `validate:"required"`
|
||||
// NoReuseNonce disables reusing nonces of pending transactions for
|
||||
// new replacement transactions
|
||||
NoReuseNonce bool
|
||||
// Keystore is the ethereum keystore where private keys are kept
|
||||
Keystore struct {
|
||||
// Path to the keystore
|
||||
@@ -204,14 +236,9 @@ type Node struct {
|
||||
// UpdateMetricsInterval is the interval between updates of the
|
||||
// API metrics
|
||||
UpdateMetricsInterval Duration
|
||||
// UpdateRecommendedFeeInterval is the interval between updates of the
|
||||
// UpdateMetricsInterval is the interval between updates of the
|
||||
// recommended fees
|
||||
UpdateRecommendedFeeInterval Duration
|
||||
// Maximum concurrent connections allowed between API and SQL
|
||||
MaxSQLConnections int `validate:"required"`
|
||||
// SQLConnectionTimeout is the maximum amount of time that an API request
|
||||
// can wait to stablish a SQL connection
|
||||
SQLConnectionTimeout Duration
|
||||
} `validate:"required"`
|
||||
Debug struct {
|
||||
// APIAddress is the address where the debugAPI will listen if
|
||||
|
||||
@@ -47,6 +47,8 @@ type Debug struct {
|
||||
MineBlockNum int64
|
||||
// SendBlockNum is the blockNum when the batch was sent to ethereum
|
||||
SendBlockNum int64
|
||||
// ResendNum is the number of times the tx has been resent
|
||||
ResendNum int
|
||||
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
|
||||
// was scheduled
|
||||
LastScheduledL1BatchBlockNum int64
|
||||
@@ -64,10 +66,17 @@ type Debug struct {
|
||||
// StartToSendDelay is the delay between starting a batch and sending
|
||||
// it to ethereum, in seconds
|
||||
StartToSendDelay float64
|
||||
// StartToMineDelay is the delay between starting a batch and having
|
||||
// it mined in seconds
|
||||
StartToMineDelay float64
|
||||
// SendToMineDelay is the delay between sending a batch tx and having
|
||||
// it mined in seconds
|
||||
SendToMineDelay float64
|
||||
}
|
||||
|
||||
// BatchInfo contans the Batch information
|
||||
type BatchInfo struct {
|
||||
PipelineNum int
|
||||
BatchNum common.BatchNum
|
||||
ServerProof prover.Client
|
||||
ZKInputs *common.ZKInputs
|
||||
@@ -82,9 +91,15 @@ type BatchInfo struct {
|
||||
CoordIdxs []common.Idx
|
||||
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
||||
// FeesInfo
|
||||
EthTx *types.Transaction
|
||||
Receipt *types.Receipt
|
||||
Debug Debug
|
||||
EthTx *types.Transaction
|
||||
// SendTimestamp the time of batch sent to ethereum
|
||||
SendTimestamp time.Time
|
||||
Receipt *types.Receipt
|
||||
// Fail is true if:
|
||||
// - The receipt status is failed
|
||||
// - A previous parent batch is failed
|
||||
Fail bool
|
||||
Debug Debug
|
||||
}
|
||||
|
||||
// DebugStore is a debug function to store the BatchInfo as a json text file in
|
||||
|
||||
@@ -3,8 +3,8 @@ package coordinator
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -42,6 +42,29 @@ type Config struct {
|
||||
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
|
||||
// timeout that will trigger a schedule to forge an L1Batch
|
||||
L1BatchTimeoutPerc float64
|
||||
// StartSlotBlocksDelay is the number of blocks of delay to wait before
|
||||
// starting the pipeline when we reach a slot in which we can forge.
|
||||
StartSlotBlocksDelay int64
|
||||
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
|
||||
// the forger address is checked to be allowed to forge (apart from
|
||||
// checking the next block), used to decide when to stop scheduling new
|
||||
// batches (by stopping the pipeline).
|
||||
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
||||
// stopped if we can't forge at block 15.
|
||||
// This value should be the expected number of blocks it takes between
|
||||
// scheduling a batch and having it mined.
|
||||
ScheduleBatchBlocksAheadCheck int64
|
||||
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
|
||||
// which the coordinator is also checked to be allowed to forge, apart
|
||||
// from the next block; used to decide when to stop sending batches to
|
||||
// the smart contract.
|
||||
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
||||
// if we can't forge at block 15.
|
||||
// This value should be the expected number of blocks it takes between
|
||||
// sending a batch and having it mined.
|
||||
SendBatchBlocksMarginCheck int64
|
||||
// EthClientAttempts is the number of attempts to do an eth client RPC
|
||||
// call before giving up
|
||||
EthClientAttempts int
|
||||
@@ -54,13 +77,25 @@ type Config struct {
|
||||
// EthClientAttemptsDelay is delay between attempts do do an eth client
|
||||
// RPC call
|
||||
EthClientAttemptsDelay time.Duration
|
||||
// EthTxResendTimeout is the timeout after which a non-mined ethereum
|
||||
// transaction will be resent (reusing the nonce) with a newly
|
||||
// calculated gas price
|
||||
EthTxResendTimeout time.Duration
|
||||
// EthNoReuseNonce disables reusing nonces of pending transactions for
|
||||
// new replacement transactions
|
||||
EthNoReuseNonce bool
|
||||
// MaxGasPrice is the maximum gas price allowed for ethereum
|
||||
// transactions
|
||||
MaxGasPrice *big.Int
|
||||
// TxManagerCheckInterval is the waiting interval between receipt
|
||||
// checks of ethereum transactions in the TxManager
|
||||
TxManagerCheckInterval time.Duration
|
||||
// DebugBatchPath if set, specifies the path where batchInfo is stored
|
||||
// in JSON in every step/update of the pipeline
|
||||
DebugBatchPath string
|
||||
Purger PurgerCfg
|
||||
DebugBatchPath string
|
||||
Purger PurgerCfg
|
||||
// VerifierIdx is the index of the verifier contract registered in the
|
||||
// smart contract
|
||||
VerifierIdx uint8
|
||||
TxProcessorConfig txprocessor.Config
|
||||
}
|
||||
@@ -74,15 +109,22 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
type fromBatch struct {
|
||||
BatchNum common.BatchNum
|
||||
ForgerAddr ethCommon.Address
|
||||
StateRoot *big.Int
|
||||
}
|
||||
|
||||
// Coordinator implements the Coordinator type
|
||||
type Coordinator struct {
|
||||
// State
|
||||
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
|
||||
provers []prover.Client
|
||||
consts synchronizer.SCConsts
|
||||
vars synchronizer.SCVariables
|
||||
stats synchronizer.Stats
|
||||
started bool
|
||||
pipelineNum int // Pipeline sequential number. The first pipeline is 1
|
||||
pipelineFromBatch fromBatch // batch from which we started the pipeline
|
||||
provers []prover.Client
|
||||
consts synchronizer.SCConsts
|
||||
vars synchronizer.SCVariables
|
||||
stats synchronizer.Stats
|
||||
started bool
|
||||
|
||||
cfg Config
|
||||
|
||||
@@ -96,7 +138,8 @@ type Coordinator struct {
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
|
||||
pipeline *Pipeline
|
||||
pipeline *Pipeline
|
||||
lastNonFailedBatchNum common.BatchNum
|
||||
|
||||
purger *Purger
|
||||
txManager *TxManager
|
||||
@@ -139,10 +182,15 @@ func NewCoordinator(cfg Config,
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c := Coordinator{
|
||||
pipelineBatchNum: -1,
|
||||
provers: serverProofs,
|
||||
consts: *scConsts,
|
||||
vars: *initSCVars,
|
||||
pipelineNum: 0,
|
||||
pipelineFromBatch: fromBatch{
|
||||
BatchNum: 0,
|
||||
ForgerAddr: ethCommon.Address{},
|
||||
StateRoot: big.NewInt(0),
|
||||
},
|
||||
provers: serverProofs,
|
||||
consts: *scConsts,
|
||||
vars: *initSCVars,
|
||||
|
||||
cfg: cfg,
|
||||
|
||||
@@ -183,8 +231,9 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
|
||||
}
|
||||
|
||||
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
||||
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
|
||||
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
|
||||
c.pipelineNum++
|
||||
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
|
||||
c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
|
||||
}
|
||||
|
||||
// MsgSyncBlock indicates an update to the Synchronizer stats
|
||||
@@ -205,6 +254,9 @@ type MsgSyncReorg struct {
|
||||
// MsgStopPipeline indicates a signal to reset the pipeline
|
||||
type MsgStopPipeline struct {
|
||||
Reason string
|
||||
// FailedBatchNum indicates the first batchNum that faile in the
|
||||
// pipeline. If FailedBatchNum is 0, it should be ignored.
|
||||
FailedBatchNum common.BatchNum
|
||||
}
|
||||
|
||||
// SendMsg is a thread safe method to pass a message to the Coordinator
|
||||
@@ -215,27 +267,36 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
|
||||
if update.Rollup != nil {
|
||||
vars.Rollup = *update.Rollup
|
||||
}
|
||||
if update.Auction != nil {
|
||||
vars.Auction = *update.Auction
|
||||
}
|
||||
if update.WDelayer != nil {
|
||||
vars.WDelayer = *update.WDelayer
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
c.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
c.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
c.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
updateSCVars(&c.vars, vars)
|
||||
}
|
||||
|
||||
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
|
||||
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
|
||||
if blockNum < auctionConstants.GenesisBlockNum {
|
||||
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
|
||||
"genesis", auctionConstants.GenesisBlockNum)
|
||||
return false
|
||||
}
|
||||
var slot *common.Slot
|
||||
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
|
||||
slot = currentSlot
|
||||
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
|
||||
slot = nextSlot
|
||||
} else {
|
||||
log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
|
||||
log.Warnw("canForge: requested blockNum is outside current and next slot",
|
||||
"blockNum", blockNum, "currentSlot", currentSlot,
|
||||
"nextSlot", nextSlot,
|
||||
)
|
||||
@@ -244,16 +305,23 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
|
||||
anyoneForge := false
|
||||
if !slot.ForgerCommitment &&
|
||||
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
|
||||
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
|
||||
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
|
||||
"block", blockNum)
|
||||
anyoneForge = true
|
||||
}
|
||||
if slot.Forger == addr || anyoneForge {
|
||||
return true
|
||||
}
|
||||
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Coordinator) canForgeAt(blockNum int64) bool {
|
||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
|
||||
c.cfg.ForgerAddress, blockNum)
|
||||
}
|
||||
|
||||
func (c *Coordinator) canForge() bool {
|
||||
blockNum := c.stats.Eth.LastBlock.Num + 1
|
||||
return canForge(&c.consts.Auction, &c.vars.Auction,
|
||||
@@ -262,12 +330,24 @@ func (c *Coordinator) canForge() bool {
|
||||
}
|
||||
|
||||
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
|
||||
canForge := c.canForge()
|
||||
nextBlock := c.stats.Eth.LastBlock.Num + 1
|
||||
canForge := c.canForgeAt(nextBlock)
|
||||
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
|
||||
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
|
||||
}
|
||||
if c.pipeline == nil {
|
||||
if canForge {
|
||||
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
|
||||
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
|
||||
log.Debugw("Coordinator: delaying pipeline start due to "+
|
||||
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
|
||||
relativeBlock, c.cfg.StartSlotBlocksDelay)
|
||||
} else if canForge {
|
||||
log.Infow("Coordinator: forging state begin", "block",
|
||||
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
|
||||
batchNum := common.BatchNum(stats.Sync.LastBatch)
|
||||
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
|
||||
batchNum := stats.Sync.LastBatch.BatchNum
|
||||
if c.lastNonFailedBatchNum > batchNum {
|
||||
batchNum = c.lastNonFailedBatchNum
|
||||
}
|
||||
var err error
|
||||
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
@@ -276,7 +356,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
c.pipeline = nil
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
c.pipelineBatchNum = batchNum
|
||||
// c.pipelineBatchNum = batchNum
|
||||
}
|
||||
} else {
|
||||
if !canForge {
|
||||
@@ -293,18 +373,17 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
|
||||
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
||||
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
||||
if err != nil {
|
||||
// if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)) {
|
||||
// if err := c.txSelector.Reset(stats.Sync.LastBatch.BatchNum); err != nil {
|
||||
// return tracerr.Wrap(err)
|
||||
// }
|
||||
// }
|
||||
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
|
||||
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
|
||||
if err != nil {
|
||||
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
|
||||
int64(stats.Sync.LastBatch.BatchNum)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
@@ -331,33 +410,42 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
|
||||
}
|
||||
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
|
||||
// There's been a reorg and the batch from which the pipeline
|
||||
// was started was in a block that was discarded. The batch
|
||||
// may not be in the main chain, so we stop the pipeline as a
|
||||
// precaution (it will be started again once the node is in
|
||||
// sync).
|
||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
|
||||
"sync.LastBatch", c.stats.Sync.LastBatch,
|
||||
"c.pipelineBatchNum", c.pipelineBatchNum)
|
||||
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
|
||||
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
|
||||
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0 {
|
||||
// There's been a reorg and the batch state root from which the
|
||||
// pipeline was started has changed (probably because it was in
|
||||
// a block that was discarded), and it was sent by a different
|
||||
// coordinator than us. That batch may never be in the main
|
||||
// chain, so we stop the pipeline (it will be started again
|
||||
// once the node is in sync).
|
||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
|
||||
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
|
||||
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
|
||||
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
|
||||
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
|
||||
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
|
||||
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
|
||||
// the next pipeline will start from the last state of the synchronizer,
|
||||
// otherwise, it will state from failedBatchNum-1.
|
||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
|
||||
batchNum := c.stats.Sync.LastBatch.BatchNum
|
||||
if failedBatchNum != 0 {
|
||||
batchNum = failedBatchNum - 1
|
||||
}
|
||||
if err := c.l2DB.Reorg(batchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.Stop(c.ctx)
|
||||
c.pipeline = nil
|
||||
}
|
||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
|
||||
// TODO: Check that we are in a slot in which we can't forge
|
||||
}
|
||||
c.lastNonFailedBatchNum = batchNum
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -373,7 +461,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
|
||||
}
|
||||
case MsgStopPipeline:
|
||||
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
|
||||
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
|
||||
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
|
||||
}
|
||||
default:
|
||||
|
||||
@@ -2,6 +2,7 @@ package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||
@@ -105,8 +107,8 @@ func newTestModules(t *testing.T) modules {
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
test.WipeDB(db)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
|
||||
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
|
||||
require.NoError(t, err)
|
||||
@@ -261,8 +263,8 @@ func TestCoordinatorFlow(t *testing.T) {
|
||||
var stats synchronizer.Stats
|
||||
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
|
||||
stats.Sync.LastBlock = stats.Eth.LastBlock
|
||||
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
|
||||
stats.Sync.LastBatch = stats.Eth.LastBatch
|
||||
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
|
||||
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
|
||||
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
|
||||
require.NoError(t, err)
|
||||
var slot common.Slot
|
||||
@@ -279,7 +281,7 @@ func TestCoordinatorFlow(t *testing.T) {
|
||||
// Copy stateDB to synchronizer if there was a new batch
|
||||
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
|
||||
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
|
||||
if stats.Sync.LastBatch != 0 {
|
||||
if stats.Sync.LastBatch.BatchNum != 0 {
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
log.Infow("Making pebble checkpoint for sync",
|
||||
"source", source, "dest", dest)
|
||||
@@ -566,3 +568,8 @@ func TestCoordinatorStress(t *testing.T) {
|
||||
// TODO: Test forgeBatch
|
||||
// TODO: Test waitServerProof
|
||||
// TODO: Test handleReorg
|
||||
|
||||
func TestFoo(t *testing.T) {
|
||||
a := tracerr.Wrap(fmt.Errorf("AAA: %w", core.ErrNonceTooLow))
|
||||
fmt.Println(errors.Is(a, core.ErrNonceTooLow))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
@@ -24,19 +25,30 @@ type statsVars struct {
|
||||
Vars synchronizer.SCVariablesPtr
|
||||
}
|
||||
|
||||
type state struct {
|
||||
batchNum common.BatchNum
|
||||
lastScheduledL1BatchBlockNum int64
|
||||
lastForgeL1TxsNum int64
|
||||
}
|
||||
|
||||
// Pipeline manages the forging of batches with parallel server proofs
|
||||
type Pipeline struct {
|
||||
num int
|
||||
cfg Config
|
||||
consts synchronizer.SCConsts
|
||||
|
||||
// state
|
||||
batchNum common.BatchNum
|
||||
lastScheduledL1BatchBlockNum int64
|
||||
lastForgeL1TxsNum int64
|
||||
started bool
|
||||
state state
|
||||
// batchNum common.BatchNum
|
||||
// lastScheduledL1BatchBlockNum int64
|
||||
// lastForgeL1TxsNum int64
|
||||
started bool
|
||||
rw sync.RWMutex
|
||||
errAtBatchNum common.BatchNum
|
||||
|
||||
proversPool *ProversPool
|
||||
provers []prover.Client
|
||||
coord *Coordinator
|
||||
txManager *TxManager
|
||||
historyDB *historydb.HistoryDB
|
||||
l2DB *l2db.L2DB
|
||||
@@ -53,14 +65,28 @@ type Pipeline struct {
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
|
||||
p.rw.Lock()
|
||||
defer p.rw.Unlock()
|
||||
p.errAtBatchNum = batchNum
|
||||
}
|
||||
|
||||
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
|
||||
p.rw.RLock()
|
||||
defer p.rw.RUnlock()
|
||||
return p.errAtBatchNum
|
||||
}
|
||||
|
||||
// NewPipeline creates a new Pipeline
|
||||
func NewPipeline(ctx context.Context,
|
||||
cfg Config,
|
||||
num int, // Pipeline sequential number
|
||||
historyDB *historydb.HistoryDB,
|
||||
l2DB *l2db.L2DB,
|
||||
txSelector *txselector.TxSelector,
|
||||
batchBuilder *batchbuilder.BatchBuilder,
|
||||
purger *Purger,
|
||||
coord *Coordinator,
|
||||
txManager *TxManager,
|
||||
provers []prover.Client,
|
||||
scConsts *synchronizer.SCConsts,
|
||||
@@ -79,6 +105,7 @@ func NewPipeline(ctx context.Context,
|
||||
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
|
||||
}
|
||||
return &Pipeline{
|
||||
num: num,
|
||||
cfg: cfg,
|
||||
historyDB: historyDB,
|
||||
l2DB: l2DB,
|
||||
@@ -87,6 +114,7 @@ func NewPipeline(ctx context.Context,
|
||||
provers: provers,
|
||||
proversPool: proversPool,
|
||||
purger: purger,
|
||||
coord: coord,
|
||||
txManager: txManager,
|
||||
consts: *scConsts,
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
@@ -104,33 +132,67 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
|
||||
// reset pipeline state
|
||||
func (p *Pipeline) reset(batchNum common.BatchNum,
|
||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
||||
p.batchNum = batchNum
|
||||
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
|
||||
p.state = state{
|
||||
batchNum: batchNum,
|
||||
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
|
||||
lastScheduledL1BatchBlockNum: 0,
|
||||
}
|
||||
p.stats = *stats
|
||||
p.vars = *vars
|
||||
p.lastScheduledL1BatchBlockNum = 0
|
||||
|
||||
err := p.txSelector.Reset(p.batchNum)
|
||||
// Reset the StateDB in TxSelector and BatchBuilder from the
|
||||
// synchronizer only if the checkpoint we reset from either:
|
||||
// a. Doesn't exist in the TxSelector/BatchBuilder
|
||||
// b. The batch has already been synced by the synchronizer and has a
|
||||
// different MTRoot than the BatchBuilder
|
||||
// Otherwise, reset from the local checkpoint.
|
||||
|
||||
// First attempt to reset from local checkpoint if such checkpoint exists
|
||||
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
err = p.batchBuilder.Reset(p.batchNum, true)
|
||||
fromSynchronizerTxSelector := !existsTxSelector
|
||||
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
fromSynchronizerBatchBuilder := !existsBatchBuilder
|
||||
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// After reset, check that if the batch exists in the historyDB, the
|
||||
// stateRoot matches with the local one, if not, force a reset from
|
||||
// synchronizer
|
||||
batch, err := p.historyDB.GetBatch(p.state.batchNum)
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
// nothing to do
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
} else {
|
||||
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
|
||||
if batch.StateRoot.Cmp(localStateRoot) != 0 {
|
||||
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
|
||||
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
|
||||
// StateRoot from synchronizer doesn't match StateRoot
|
||||
// from batchBuilder, force a reset from synchronizer
|
||||
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
p.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
p.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
p.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
updateSCVars(&p.vars, vars)
|
||||
}
|
||||
|
||||
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
|
||||
@@ -143,7 +205,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
|
||||
} else if err != nil {
|
||||
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
|
||||
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
|
||||
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
|
||||
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
|
||||
} else {
|
||||
log.Errorw("forgeBatch", "err", err)
|
||||
@@ -199,15 +261,32 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
p.stats = statsVars.Stats
|
||||
p.syncSCVars(statsVars.Vars)
|
||||
case <-time.After(waitDuration):
|
||||
batchNum = p.batchNum + 1
|
||||
// Once errAtBatchNum != 0, we stop forging
|
||||
// batches because there's been an error and we
|
||||
// wait for the pipeline to be stopped.
|
||||
if p.getErrAtBatchNum() != 0 {
|
||||
waitDuration = p.cfg.ForgeRetryInterval
|
||||
continue
|
||||
}
|
||||
batchNum = p.state.batchNum + 1
|
||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||
if p.ctx.Err() != nil {
|
||||
continue
|
||||
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
|
||||
waitDuration = p.cfg.ForgeRetryInterval
|
||||
continue
|
||||
} else if err != nil {
|
||||
waitDuration = p.cfg.SyncRetryInterval
|
||||
p.setErrAtBatchNum(batchNum)
|
||||
waitDuration = p.cfg.ForgeRetryInterval
|
||||
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf(
|
||||
"Pipeline.handleForgBatch: %v", err),
|
||||
FailedBatchNum: batchNum,
|
||||
})
|
||||
continue
|
||||
}
|
||||
p.batchNum = batchNum
|
||||
|
||||
p.state.batchNum = batchNum
|
||||
select {
|
||||
case batchChSentServerProof <- batchInfo:
|
||||
case <-p.ctx.Done():
|
||||
@@ -225,16 +304,28 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
p.wg.Done()
|
||||
return
|
||||
case batchInfo := <-batchChSentServerProof:
|
||||
// Once errAtBatchNum != 0, we stop forging
|
||||
// batches because there's been an error and we
|
||||
// wait for the pipeline to be stopped.
|
||||
if p.getErrAtBatchNum() != 0 {
|
||||
continue
|
||||
}
|
||||
err := p.waitServerProof(p.ctx, batchInfo)
|
||||
// We are done with this serverProof, add it back to the pool
|
||||
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
||||
batchInfo.ServerProof = nil
|
||||
if p.ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("waitServerProof", "err", err)
|
||||
p.setErrAtBatchNum(batchInfo.BatchNum)
|
||||
p.coord.SendMsg(p.ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf(
|
||||
"Pipeline.waitServerProof: %v", err),
|
||||
FailedBatchNum: batchInfo.BatchNum,
|
||||
})
|
||||
continue
|
||||
}
|
||||
// We are done with this serverProof, add it back to the pool
|
||||
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
|
||||
// batchInfo.ServerProof = nil
|
||||
p.txManager.AddBatch(p.ctx, batchInfo)
|
||||
}
|
||||
}
|
||||
@@ -284,8 +375,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
|
||||
// Structure to accumulate data and metadata of the batch
|
||||
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
|
||||
batchInfo.Debug.StartTimestamp = time.Now()
|
||||
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||
|
||||
@@ -300,22 +391,19 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
var auths [][]byte
|
||||
var coordIdxs []common.Idx
|
||||
|
||||
// TODO: If there are no txs and we are behind the timeout, skip
|
||||
// forging a batch and return a particular error that can be handleded
|
||||
// in the loop where handleForgeBatch is called to retry after an
|
||||
// interval
|
||||
|
||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||
if p.shouldL1L2Batch(batchInfo) {
|
||||
batchInfo.L1Batch = true
|
||||
defer func() {
|
||||
// If there's no error, update the parameters related
|
||||
// to the last L1Batch forged
|
||||
if err == nil {
|
||||
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||
p.lastForgeL1TxsNum++
|
||||
}
|
||||
}()
|
||||
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
|
||||
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
|
||||
}
|
||||
// 2a: L1+L2 txs
|
||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
|
||||
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -324,6 +412,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
|
||||
p.state.lastForgeL1TxsNum++
|
||||
} else {
|
||||
// 2b: only L2 txs
|
||||
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||
@@ -399,12 +490,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
||||
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
|
||||
// Take the lastL1BatchBlockNum as the biggest between the last
|
||||
// scheduled one, and the synchronized one.
|
||||
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
|
||||
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
|
||||
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
|
||||
}
|
||||
// Set Debug information
|
||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
|
||||
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
|
||||
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
|
||||
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
|
||||
batchInfo.Debug.L1BatchBlockScheduleDeadline =
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
|
||||
//
|
||||
// Scheduled L1Batch
|
||||
//
|
||||
pipeline.lastScheduledL1BatchBlockNum = startBlock
|
||||
pipeline.state.lastScheduledL1BatchBlockNum = startBlock
|
||||
stats.Sync.LastL1BatchBlock = startBlock - 10
|
||||
|
||||
// We are are one block before the timeout range * 0.5
|
||||
@@ -172,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
|
||||
// users with positive balances
|
||||
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
|
||||
syncStats := sync.Stats()
|
||||
batchNum := common.BatchNum(syncStats.Sync.LastBatch)
|
||||
batchNum := syncStats.Sync.LastBatch.BatchNum
|
||||
syncSCVars := sync.SCVars()
|
||||
|
||||
pipeline, err := coord.newPipeline(ctx)
|
||||
|
||||
@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
test.WipeDB(db)
|
||||
return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
return l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
}
|
||||
|
||||
func newStateDB(t *testing.T) *statedb.LocalStateDB {
|
||||
|
||||
@@ -2,6 +2,7 @@ package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
@@ -35,12 +37,22 @@ type TxManager struct {
|
||||
vars synchronizer.SCVariables
|
||||
statsVarsCh chan statsVars
|
||||
|
||||
queue []*BatchInfo
|
||||
discardPipelineCh chan int // int refers to the pipelineNum
|
||||
|
||||
minPipelineNum int
|
||||
queue Queue
|
||||
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
|
||||
lastSuccessBatch common.BatchNum
|
||||
lastPendingBatch common.BatchNum
|
||||
lastSuccessNonce uint64
|
||||
lastPendingNonce uint64
|
||||
// lastPendingBatch common.BatchNum
|
||||
// accNonce is the account nonce in the last mined block (due to mined txs)
|
||||
accNonce uint64
|
||||
// accNextNonce is the nonce that we should use to send the next tx.
|
||||
// In some cases this will be a reused nonce of an already pending tx.
|
||||
accNextNonce uint64
|
||||
// accPendingNonce is the pending nonce of the account due to pending txs
|
||||
// accPendingNonce uint64
|
||||
|
||||
lastSentL1BatchBlockNum int64
|
||||
}
|
||||
|
||||
// NewTxManager creates a new TxManager
|
||||
@@ -54,26 +66,27 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
||||
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lastSuccessNonce != lastPendingNonce {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
|
||||
lastSuccessNonce, lastPendingNonce))
|
||||
}
|
||||
log.Infow("TxManager started", "nonce", lastSuccessNonce)
|
||||
// accPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if accNonce != accPendingNonce {
|
||||
// return nil, tracerr.Wrap(fmt.Errorf("currentNonce (%v) != accPendingNonce (%v)",
|
||||
// accNonce, accPendingNonce))
|
||||
// }
|
||||
log.Infow("TxManager started", "nonce", accNonce)
|
||||
return &TxManager{
|
||||
cfg: *cfg,
|
||||
ethClient: ethClient,
|
||||
l2DB: l2DB,
|
||||
coord: coord,
|
||||
batchCh: make(chan *BatchInfo, queueLen),
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
cfg: *cfg,
|
||||
ethClient: ethClient,
|
||||
l2DB: l2DB,
|
||||
coord: coord,
|
||||
batchCh: make(chan *BatchInfo, queueLen),
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
discardPipelineCh: make(chan int, queueLen),
|
||||
account: accounts.Account{
|
||||
Address: *address,
|
||||
},
|
||||
@@ -82,8 +95,11 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
|
||||
|
||||
vars: *initSCVars,
|
||||
|
||||
lastSuccessNonce: lastSuccessNonce,
|
||||
lastPendingNonce: lastPendingNonce,
|
||||
minPipelineNum: 0,
|
||||
queue: NewQueue(),
|
||||
accNonce: accNonce,
|
||||
accNextNonce: accNonce,
|
||||
// accPendingNonce: accPendingNonce,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -104,16 +120,17 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
|
||||
}
|
||||
}
|
||||
|
||||
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
|
||||
// due to a reorg
|
||||
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
|
||||
select {
|
||||
case t.discardPipelineCh <- pipelineNum:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
t.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
t.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
t.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
updateSCVars(&t.vars, vars)
|
||||
}
|
||||
|
||||
// NewAuth generates a new auth object for an ethereum transaction
|
||||
@@ -123,6 +140,7 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
inc := new(big.Int).Set(gasPrice)
|
||||
// TODO: Replace this by a value of percentage
|
||||
const gasPriceDiv = 100
|
||||
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
|
||||
gasPrice.Add(gasPrice, inc)
|
||||
@@ -141,29 +159,75 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
|
||||
// TODO: Check if we can forge in the next blockNum, abort if we can't
|
||||
batchInfo.Debug.Status = StatusSent
|
||||
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||
batchInfo.Debug.SendTimestamp = time.Now()
|
||||
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
||||
batchInfo.Debug.StartTimestamp).Seconds()
|
||||
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
|
||||
nextBlock := t.stats.Eth.LastBlock.Num + 1
|
||||
if !t.canForgeAt(nextBlock) {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
|
||||
}
|
||||
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
|
||||
}
|
||||
margin := t.cfg.SendBatchBlocksMarginCheck
|
||||
if margin != 0 {
|
||||
if !t.canForgeAt(nextBlock + margin) {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
|
||||
margin, nextBlock))
|
||||
}
|
||||
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
|
||||
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
|
||||
margin, nextBlock))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addPerc(v *big.Int, p int64) *big.Int {
|
||||
r := new(big.Int).Set(v)
|
||||
r.Mul(r, big.NewInt(p))
|
||||
// nolint reason: to calculate percetnages we divide by 100
|
||||
r.Div(r, big.NewInt(100)) //nolit:gomnd
|
||||
return r.Add(v, r)
|
||||
}
|
||||
|
||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
|
||||
var ethTx *types.Transaction
|
||||
var err error
|
||||
auth, err := t.NewAuth(ctx)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
|
||||
t.lastPendingNonce++
|
||||
auth.Nonce = big.NewInt(int64(t.accNextNonce))
|
||||
if resend {
|
||||
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
|
||||
}
|
||||
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
|
||||
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
|
||||
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
|
||||
auth.GasPrice, t.cfg.MaxGasPrice))
|
||||
}
|
||||
// RollupForgeBatch() calls ethclient.SendTransaction()
|
||||
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
|
||||
if err != nil {
|
||||
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
|
||||
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
|
||||
// "block", t.stats.Eth.LastBlock.Num+1)
|
||||
// return tracerr.Wrap(err)
|
||||
// }
|
||||
if errors.Is(err, core.ErrNonceTooLow) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
|
||||
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
|
||||
attempt--
|
||||
} else if errors.Is(err, core.ErrNonceTooHigh) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
|
||||
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
|
||||
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
|
||||
attempt--
|
||||
} else if errors.Is(err, core.ErrUnderpriced) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||
attempt--
|
||||
} else if errors.Is(err, core.ErrReplaceUnderpriced) {
|
||||
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
|
||||
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
|
||||
auth.GasPrice = addPerc(auth.GasPrice, 10)
|
||||
attempt--
|
||||
} else if err != nil {
|
||||
log.Errorw("TxManager ethClient.RollupForgeBatch",
|
||||
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
|
||||
"batchNum", batchInfo.BatchNum)
|
||||
@@ -179,10 +243,30 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
||||
}
|
||||
if !resend {
|
||||
t.accNextNonce = auth.Nonce.Uint64() + 1
|
||||
}
|
||||
batchInfo.EthTx = ethTx
|
||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
|
||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
|
||||
now := time.Now()
|
||||
batchInfo.SendTimestamp = now
|
||||
|
||||
if resend {
|
||||
batchInfo.Debug.ResendNum++
|
||||
}
|
||||
batchInfo.Debug.Status = StatusSent
|
||||
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
|
||||
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
|
||||
batchInfo.Debug.StartTimestamp).Seconds()
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
t.lastPendingBatch = batchInfo.BatchNum
|
||||
|
||||
// t.lastPendingBatch = batchInfo.BatchNum
|
||||
if !resend {
|
||||
if batchInfo.L1Batch {
|
||||
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
|
||||
}
|
||||
}
|
||||
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -225,13 +309,19 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
|
||||
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
|
||||
receipt := batchInfo.Receipt
|
||||
if receipt != nil {
|
||||
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
|
||||
t.accNonce = batchInfo.EthTx.Nonce() + 1
|
||||
}
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
batchInfo.Debug.Status = StatusFailed
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
|
||||
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
|
||||
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
|
||||
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
|
||||
"err", err)
|
||||
if batchInfo.BatchNum <= t.lastSuccessBatch {
|
||||
t.lastSuccessBatch = batchInfo.BatchNum - 1
|
||||
}
|
||||
return nil, tracerr.Wrap(fmt.Errorf(
|
||||
"ethereum transaction receipt status is failed: %w", err))
|
||||
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
||||
@@ -239,6 +329,17 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
||||
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
|
||||
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
|
||||
batchInfo.Debug.StartBlockNum
|
||||
if batchInfo.Debug.StartToMineDelay == 0 {
|
||||
if block, err := t.ethClient.EthBlockByNumber(ctx,
|
||||
receipt.BlockNumber.Int64()); err != nil {
|
||||
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
|
||||
} else {
|
||||
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
|
||||
batchInfo.Debug.SendTimestamp).Seconds()
|
||||
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
|
||||
batchInfo.Debug.StartTimestamp).Seconds()
|
||||
}
|
||||
}
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
if batchInfo.BatchNum > t.lastSuccessBatch {
|
||||
t.lastSuccessBatch = batchInfo.BatchNum
|
||||
@@ -250,9 +351,72 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
|
||||
|
||||
// Queue of BatchInfos
|
||||
type Queue struct {
|
||||
list []*BatchInfo
|
||||
// nonceByBatchNum map[common.BatchNum]uint64
|
||||
next int
|
||||
}
|
||||
|
||||
// NewQueue returns a new queue
|
||||
func NewQueue() Queue {
|
||||
return Queue{
|
||||
list: make([]*BatchInfo, 0),
|
||||
// nonceByBatchNum: make(map[common.BatchNum]uint64),
|
||||
next: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Len is the length of the queue
|
||||
func (q *Queue) Len() int {
|
||||
return len(q.list)
|
||||
}
|
||||
|
||||
// At returns the BatchInfo at position (or nil if position is out of bounds)
|
||||
func (q *Queue) At(position int) *BatchInfo {
|
||||
if position >= len(q.list) {
|
||||
return nil
|
||||
}
|
||||
return q.list[position]
|
||||
}
|
||||
|
||||
// Next returns the next BatchInfo (or nil if queue is empty)
|
||||
func (q *Queue) Next() (int, *BatchInfo) {
|
||||
if len(q.list) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
defer func() { q.next = (q.next + 1) % len(q.list) }()
|
||||
return q.next, q.list[q.next]
|
||||
}
|
||||
|
||||
// Remove removes the BatchInfo at position
|
||||
func (q *Queue) Remove(position int) {
|
||||
// batchInfo := q.list[position]
|
||||
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
|
||||
q.list = append(q.list[:position], q.list[position+1:]...)
|
||||
if len(q.list) == 0 {
|
||||
q.next = 0
|
||||
} else {
|
||||
q.next = position % len(q.list)
|
||||
}
|
||||
}
|
||||
|
||||
// Push adds a new BatchInfo
|
||||
func (q *Queue) Push(batchInfo *BatchInfo) {
|
||||
q.list = append(q.list, batchInfo)
|
||||
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
|
||||
}
|
||||
|
||||
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
|
||||
// nonce, ok := q.nonceByBatchNum[batchNum]
|
||||
// return nonce, ok
|
||||
// }
|
||||
|
||||
// Run the TxManager
|
||||
func (t *TxManager) Run(ctx context.Context) {
|
||||
next := 0
|
||||
waitDuration := longWaitDuration
|
||||
|
||||
var statsVars statsVars
|
||||
@@ -263,7 +427,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
t.stats = statsVars.Stats
|
||||
t.syncSCVars(statsVars.Vars)
|
||||
log.Infow("TxManager: received initial statsVars",
|
||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
|
||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -273,8 +437,27 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
case statsVars := <-t.statsVarsCh:
|
||||
t.stats = statsVars.Stats
|
||||
t.syncSCVars(statsVars.Vars)
|
||||
case pipelineNum := <-t.discardPipelineCh:
|
||||
t.minPipelineNum = pipelineNum + 1
|
||||
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||
continue
|
||||
}
|
||||
case batchInfo := <-t.batchCh:
|
||||
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
|
||||
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
|
||||
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
|
||||
}
|
||||
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
|
||||
log.Warnw("TxManager: shouldSend", "err", err,
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
|
||||
continue
|
||||
}
|
||||
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
// If we reach here it's because our ethNode has
|
||||
@@ -282,19 +465,20 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
// ethereum. This could be due to the ethNode
|
||||
// failure, or an invalid transaction (that
|
||||
// can't be mined)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
||||
log.Warnw("TxManager: forgeBatch send failed", "err", err,
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
|
||||
continue
|
||||
}
|
||||
t.queue = append(t.queue, batchInfo)
|
||||
t.queue.Push(batchInfo)
|
||||
waitDuration = t.cfg.TxManagerCheckInterval
|
||||
case <-time.After(waitDuration):
|
||||
if len(t.queue) == 0 {
|
||||
queuePosition, batchInfo := t.queue.Next()
|
||||
if batchInfo == nil {
|
||||
waitDuration = longWaitDuration
|
||||
continue
|
||||
}
|
||||
current := next
|
||||
next = (current + 1) % len(t.queue)
|
||||
batchInfo := t.queue[current]
|
||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil { //nolint:staticcheck
|
||||
@@ -304,7 +488,8 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
// if it was not mined, mined and succesfull or
|
||||
// mined and failed. This could be due to the
|
||||
// ethNode failure.
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
|
||||
}
|
||||
|
||||
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||
@@ -312,32 +497,108 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
continue
|
||||
} else if err != nil { //nolint:staticcheck
|
||||
// Transaction was rejected
|
||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
||||
if len(t.queue) == 0 {
|
||||
next = 0
|
||||
} else {
|
||||
next = current % len(t.queue)
|
||||
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
|
||||
continue
|
||||
}
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
|
||||
continue
|
||||
}
|
||||
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
||||
log.Debugw("TxManager tx for RollupForgeBatch confirmed",
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.queue = append(t.queue[:current], t.queue[current+1:]...)
|
||||
if len(t.queue) == 0 {
|
||||
next = 0
|
||||
} else {
|
||||
next = current % len(t.queue)
|
||||
now := time.Now()
|
||||
if !t.cfg.EthNoReuseNonce && confirm == nil &&
|
||||
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
|
||||
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
|
||||
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
// If we reach here it's because our ethNode has
|
||||
// been unable to send the transaction to
|
||||
// ethereum. This could be due to the ethNode
|
||||
// failure, or an invalid transaction (that
|
||||
// can't be mined)
|
||||
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
|
||||
"batch", batchInfo.BatchNum)
|
||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
|
||||
log.Debugw("TxManager: forgeBatch tx confirmed",
|
||||
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
|
||||
t.queue.Remove(queuePosition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nolint reason: this function will be used in the future
|
||||
//nolint:unused
|
||||
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
|
||||
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
|
||||
next := 0
|
||||
// batchNum := 0
|
||||
for {
|
||||
batchInfo := t.queue.At(next)
|
||||
if batchInfo == nil {
|
||||
break
|
||||
}
|
||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// Our ethNode is giving an error different
|
||||
// than "not found" when getting the receipt
|
||||
// for the transaction, so we can't figure out
|
||||
// if it was not mined, mined and succesfull or
|
||||
// mined and failed. This could be due to the
|
||||
// ethNode failure.
|
||||
next++
|
||||
continue
|
||||
}
|
||||
confirm, err := t.handleReceipt(ctx, batchInfo)
|
||||
if ctx.Err() != nil {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// Transaction was rejected
|
||||
if t.minPipelineNum <= batchInfo.PipelineNum {
|
||||
t.minPipelineNum = batchInfo.PipelineNum + 1
|
||||
}
|
||||
t.queue.Remove(next)
|
||||
continue
|
||||
}
|
||||
// If tx is pending but is from a cancelled pipeline, remove it
|
||||
// from the queue
|
||||
if confirm == nil {
|
||||
if batchInfo.PipelineNum < t.minPipelineNum {
|
||||
// batchNum++
|
||||
t.queue.Remove(next)
|
||||
continue
|
||||
}
|
||||
}
|
||||
next++
|
||||
}
|
||||
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !t.cfg.EthNoReuseNonce {
|
||||
t.accNextNonce = accNonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TxManager) canForgeAt(blockNum int64) bool {
|
||||
return canForge(&t.consts.Auction, &t.vars.Auction,
|
||||
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
|
||||
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
|
||||
t.cfg.ForgerAddress, blockNum)
|
||||
}
|
||||
|
||||
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
|
||||
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
|
||||
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
|
||||
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
|
||||
}
|
||||
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
|
||||
}
|
||||
|
||||
15
coordinator/txmanager_test.go
Normal file
15
coordinator/txmanager_test.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAddPerc(t *testing.T) {
|
||||
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
|
||||
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
|
||||
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
|
||||
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,6 @@ import (
|
||||
)
|
||||
|
||||
var historyDB *HistoryDB
|
||||
var historyDBWithACC *HistoryDB
|
||||
|
||||
// In order to run the test you need to run a Posgres DB with
|
||||
// a database named "history" that is accessible by
|
||||
@@ -39,12 +38,10 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
historyDB = NewHistoryDB(db, nil)
|
||||
historyDB = NewHistoryDB(db)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||
historyDBWithACC = NewHistoryDB(db, apiConnCon)
|
||||
// Run tests
|
||||
result := m.Run()
|
||||
// Close DB
|
||||
@@ -88,7 +85,7 @@ func TestBlocks(t *testing.T) {
|
||||
blocks...,
|
||||
)
|
||||
// Get all blocks from DB
|
||||
fetchedBlocks, err := historyDB.getBlocks(fromBlock, toBlock)
|
||||
fetchedBlocks, err := historyDB.GetBlocks(fromBlock, toBlock)
|
||||
assert.Equal(t, len(blocks), len(fetchedBlocks))
|
||||
// Compare generated vs getted blocks
|
||||
assert.NoError(t, err)
|
||||
@@ -203,6 +200,10 @@ func TestBatches(t *testing.T) {
|
||||
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
|
||||
// Test GetLastBatch
|
||||
fetchedLastBatch, err := historyDB.GetLastBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
|
||||
// Test GetLastL1TxsNum
|
||||
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
|
||||
assert.NoError(t, err)
|
||||
@@ -211,6 +212,12 @@ func TestBatches(t *testing.T) {
|
||||
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
|
||||
// Test GetBatch
|
||||
fetchedBatch, err := historyDB.GetBatch(1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &batches[0], fetchedBatch)
|
||||
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
|
||||
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
|
||||
}
|
||||
|
||||
func TestBids(t *testing.T) {
|
||||
@@ -248,8 +255,9 @@ func TestTokens(t *testing.T) {
|
||||
err := historyDB.AddTokens(tokens)
|
||||
assert.NoError(t, err)
|
||||
tokens = append([]common.Token{ethToken}, tokens...)
|
||||
limit := uint(10)
|
||||
// Fetch tokens
|
||||
fetchedTokens, err := historyDB.GetTokensTest()
|
||||
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -269,7 +277,7 @@ func TestTokens(t *testing.T) {
|
||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
||||
}
|
||||
// Fetch tokens
|
||||
fetchedTokens, err = historyDB.GetTokensTest()
|
||||
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -304,8 +312,9 @@ func TestTokensUTF8(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Work with nonUTFTokens as tokens one gets updated and non UTF-8 characters are lost
|
||||
nonUTFTokens = append([]common.Token{ethToken}, nonUTFTokens...)
|
||||
limit := uint(10)
|
||||
// Fetch tokens
|
||||
fetchedTokens, err := historyDB.GetTokensTest()
|
||||
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -325,7 +334,7 @@ func TestTokensUTF8(t *testing.T) {
|
||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
||||
}
|
||||
// Fetch tokens
|
||||
fetchedTokens, err = historyDB.GetTokensTest()
|
||||
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -611,10 +620,10 @@ func TestTxs(t *testing.T) {
|
||||
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
|
||||
|
||||
// Tx ID
|
||||
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String())
|
||||
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String())
|
||||
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String())
|
||||
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String())
|
||||
assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
|
||||
assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
|
||||
assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
|
||||
assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
|
||||
|
||||
// Tx From and To IDx
|
||||
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
|
||||
@@ -1088,8 +1097,9 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
|
||||
assert.Equal(t, escapeHatchWithdrawals, dbEscapeHatchWithdrawals)
|
||||
}
|
||||
|
||||
func TestGetMetricsAPI(t *testing.T) {
|
||||
func TestGetMetrics(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
@@ -1146,7 +1156,7 @@ func TestGetMetricsAPI(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
||||
res, err := historyDB.GetMetrics(common.BatchNum(numBatches))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
|
||||
@@ -1165,7 +1175,7 @@ func TestGetMetricsAPI(t *testing.T) {
|
||||
assert.Equal(t, float64(0), res.AvgTransactionFee)
|
||||
}
|
||||
|
||||
func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
func TestGetMetricsMoreThan24Hours(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
testUsersLen := 3
|
||||
@@ -1226,7 +1236,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
||||
res, err := historyDB.GetMetrics(common.BatchNum(numBatches))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
|
||||
@@ -1245,15 +1255,15 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
assert.Equal(t, float64(0), res.AvgTransactionFee)
|
||||
}
|
||||
|
||||
func TestGetMetricsAPIEmpty(t *testing.T) {
|
||||
func TestGetMetricsEmpty(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
_, err := historyDBWithACC.GetMetricsAPI(0)
|
||||
_, err := historyDB.GetMetrics(0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetAvgTxFeeEmpty(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
_, err := historyDBWithACC.GetAvgTxFeeAPI()
|
||||
_, err := historyDB.GetAvgTxFee()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -425,12 +425,13 @@ func (k *KVDB) MakeCheckpoint() error {
|
||||
}
|
||||
|
||||
// if checkpoint BatchNum already exist in disk, delete it
|
||||
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
|
||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
} else {
|
||||
if err := os.RemoveAll(checkpointPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// execute Checkpoint
|
||||
@@ -451,12 +452,25 @@ func (k *KVDB) MakeCheckpoint() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckpointExists returns true if the checkpoint exists
|
||||
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
||||
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
|
||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
return os.RemoveAll(checkpointPath)
|
||||
@@ -520,6 +534,8 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
|
||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||
// if kvdb does not have checkpoint at batchNum, return err
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// By locking we allow calling MakeCheckpointFromTo from multiple
|
||||
// places at the same time for the same stateDB. This allows the
|
||||
@@ -533,12 +549,13 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
|
||||
|
||||
func pebbleMakeCheckpoint(source, dest string) error {
|
||||
// Remove dest folder (if it exists) before doing the checkpoint
|
||||
if _, err := os.Stat(dest); !os.IsNotExist(err) {
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
} else {
|
||||
if err := os.RemoveAll(dest); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
sto, err := pebble.NewPebbleStorage(source, false)
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
package l2db
|
||||
|
||||
import (
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
|
||||
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
return l2db.AddAccountCreationAuth(auth)
|
||||
}
|
||||
|
||||
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
|
||||
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
auth := new(AccountCreationAuthAPI)
|
||||
return auth, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, auth,
|
||||
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
||||
addr,
|
||||
))
|
||||
}
|
||||
|
||||
// AddTxAPI inserts a tx to the pool
|
||||
func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
row := l2db.db.QueryRow(
|
||||
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
|
||||
common.PoolL2TxStatePending,
|
||||
)
|
||||
var totalTxs uint32
|
||||
if err := row.Scan(&totalTxs); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if totalTxs >= l2db.maxTxs {
|
||||
return tracerr.New(
|
||||
"The pool is at full capacity. More transactions are not accepted currently",
|
||||
)
|
||||
}
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
|
||||
}
|
||||
|
||||
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
||||
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
|
||||
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
|
||||
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
|
||||
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
||||
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
||||
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
||||
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
||||
|
||||
// GetTxAPI return the specified Tx in PoolTxAPI format
|
||||
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
tx := new(PoolTxAPI)
|
||||
return tx, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, tx,
|
||||
selectPoolTxAPI+"WHERE tx_id = $1;",
|
||||
txID,
|
||||
))
|
||||
}
|
||||
@@ -25,25 +25,17 @@ type L2DB struct {
|
||||
safetyPeriod common.BatchNum
|
||||
ttl time.Duration
|
||||
maxTxs uint32 // limit of txs that are accepted in the pool
|
||||
apiConnCon *db.APIConnectionController
|
||||
}
|
||||
|
||||
// NewL2DB creates a L2DB.
|
||||
// To create it, it's needed db connection, safety period expressed in batches,
|
||||
// maxTxs that the DB should have and TTL (time to live) for pending txs.
|
||||
func NewL2DB(
|
||||
db *sqlx.DB,
|
||||
safetyPeriod common.BatchNum,
|
||||
maxTxs uint32,
|
||||
TTL time.Duration,
|
||||
apiConnCon *db.APIConnectionController,
|
||||
) *L2DB {
|
||||
func NewL2DB(db *sqlx.DB, safetyPeriod common.BatchNum, maxTxs uint32, TTL time.Duration) *L2DB {
|
||||
return &L2DB{
|
||||
db: db,
|
||||
safetyPeriod: safetyPeriod,
|
||||
ttl: TTL,
|
||||
maxTxs: maxTxs,
|
||||
apiConnCon: apiConnCon,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +47,7 @@ func (l2db *L2DB) DB() *sqlx.DB {
|
||||
|
||||
// AddAccountCreationAuth inserts an account creation authorization into the DB
|
||||
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
|
||||
// return meddler.Insert(l2db.db, "account_creation_auth", auth)
|
||||
_, err := l2db.db.Exec(
|
||||
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
|
||||
VALUES ($1, $2, $3);`,
|
||||
@@ -73,6 +66,16 @@ func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.Accoun
|
||||
))
|
||||
}
|
||||
|
||||
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
|
||||
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
|
||||
auth := new(AccountCreationAuthAPI)
|
||||
return auth, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, auth,
|
||||
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
||||
addr,
|
||||
))
|
||||
}
|
||||
|
||||
// AddTx inserts a tx to the pool
|
||||
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
|
||||
row := l2db.db.QueryRow(
|
||||
@@ -170,6 +173,16 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
|
||||
}
|
||||
|
||||
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
||||
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
|
||||
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
|
||||
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
|
||||
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
||||
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
||||
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
||||
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
||||
|
||||
// selectPoolTxCommon select part of queries to get common.PoolL2Tx
|
||||
const selectPoolTxCommon = `SELECT tx_pool.tx_id, from_idx, to_idx, tx_pool.to_eth_addr,
|
||||
tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
@@ -189,6 +202,16 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
|
||||
))
|
||||
}
|
||||
|
||||
// GetTxAPI return the specified Tx in PoolTxAPI format
|
||||
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
||||
tx := new(PoolTxAPI)
|
||||
return tx, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, tx,
|
||||
selectPoolTxAPI+"WHERE tx_id = $1;",
|
||||
txID,
|
||||
))
|
||||
}
|
||||
|
||||
// GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee
|
||||
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
|
||||
var txs []*common.PoolL2Tx
|
||||
@@ -323,10 +346,9 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
|
||||
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
|
||||
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
|
||||
_, err := l2db.db.Exec(
|
||||
`UPDATE tx_pool SET batch_num = NULL, state = $1
|
||||
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
|
||||
`UPDATE tx_pool SET batch_num = NULL, state = $1
|
||||
WHERE (state = $2 OR state = $3) AND batch_num > $4`,
|
||||
common.PoolL2TxStatePending,
|
||||
common.PoolL2TxStateForging,
|
||||
common.PoolL2TxStateForged,
|
||||
common.PoolL2TxStateInvalid,
|
||||
lastValidBatch,
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
)
|
||||
|
||||
var l2DB *L2DB
|
||||
var l2DBWithACC *L2DB
|
||||
var historyDB *historydb.HistoryDB
|
||||
var tc *til.Context
|
||||
var tokens map[common.TokenID]historydb.TokenWithUSD
|
||||
@@ -35,11 +34,9 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
|
||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
|
||||
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour)
|
||||
test.WipeDB(l2DB.DB())
|
||||
historyDB = historydb.NewHistoryDB(db, nil)
|
||||
historyDB = historydb.NewHistoryDB(db)
|
||||
// Run tests
|
||||
result := m.Run()
|
||||
// Close DB
|
||||
@@ -270,7 +267,7 @@ func TestStartForging(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range startForgingTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -315,7 +312,7 @@ func TestDoneForging(t *testing.T) {
|
||||
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range doneForgingTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -347,7 +344,7 @@ func TestInvalidate(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range invalidTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -388,7 +385,7 @@ func TestInvalidateOldNonces(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range invalidTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -463,13 +460,13 @@ func TestReorg(t *testing.T) {
|
||||
err = l2DB.Reorg(lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
for _, id := range reorgedTxIDs {
|
||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||
tx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, tx.BatchNum)
|
||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||
}
|
||||
for _, id := range nonReorgedTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||
}
|
||||
@@ -540,13 +537,13 @@ func TestReorg2(t *testing.T) {
|
||||
err = l2DB.Reorg(lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
for _, id := range reorgedTxIDs {
|
||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||
tx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, tx.BatchNum)
|
||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||
}
|
||||
for _, id := range nonReorgedTxIDs {
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||
}
|
||||
|
||||
@@ -498,11 +498,17 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckpointExists returns true if the checkpoint exists
|
||||
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
|
||||
return l.db.CheckpointExists(batchNum)
|
||||
}
|
||||
|
||||
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
||||
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
if fromSynchronizer {
|
||||
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
|
||||
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
29
db/utils.go
29
db/utils.go
@@ -1,19 +1,16 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gobuffalo/packr/v2"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/marusama/semaphore/v2"
|
||||
migrate "github.com/rubenv/sql-migrate"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
@@ -87,32 +84,6 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// APIConnectionController is used to limit the SQL open connections used by the API
|
||||
type APIConnectionController struct {
|
||||
smphr semaphore.Semaphore
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewAPICnnectionController initialize APIConnectionController
|
||||
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
||||
return &APIConnectionController{
|
||||
smphr: semaphore.New(maxConnections),
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire reserves a SQL connection. If the connection is not acquired
|
||||
// within the timeout, the function will return an error
|
||||
func (acc *APIConnectionController) Acquire() (context.CancelFunc, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), acc.timeout) //nolint:govet
|
||||
return cancel, acc.smphr.Acquire(ctx, 1)
|
||||
}
|
||||
|
||||
// Release frees a SQL connection
|
||||
func (acc *APIConnectionController) Release() {
|
||||
acc.smphr.Release(1)
|
||||
}
|
||||
|
||||
// initMeddler registers tags to be used to read/write from SQL DBs using meddler
|
||||
func initMeddler() {
|
||||
meddler.Register("bigint", BigIntMeddler{})
|
||||
|
||||
@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
|
||||
}
|
||||
fromIdxBig := big.NewInt(fromIdx)
|
||||
toIdxBig := big.NewInt(toIdx)
|
||||
depositAmountF, err := common.NewFloat40(depositAmount)
|
||||
depositAmountF, err := common.NewFloat16(depositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountF, err := common.NewFloat40(amount)
|
||||
amountF, err := common.NewFloat16(amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
|
||||
}
|
||||
fromIdxBig := big.NewInt(fromIdx)
|
||||
toIdxBig := big.NewInt(toIdx)
|
||||
depositAmountF, err := common.NewFloat40(depositAmount)
|
||||
depositAmountF, err := common.NewFloat16(depositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountF, err := common.NewFloat40(amount)
|
||||
amountF, err := common.NewFloat16(amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -941,7 +941,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
||||
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
|
||||
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
|
||||
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
|
||||
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
|
||||
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
|
||||
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
|
||||
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
|
||||
l1UserTxsData := []byte{}
|
||||
@@ -968,7 +968,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
||||
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
|
||||
}
|
||||
for i := 0; i < numTxsL1Coord; i++ {
|
||||
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
|
||||
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
|
||||
var signature []byte
|
||||
v := bytesL1Coordinator[0]
|
||||
s := bytesL1Coordinator[1:33]
|
||||
|
||||
@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
|
||||
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
|
||||
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
|
||||
require.NoError(t, err)
|
||||
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
|
||||
numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
|
||||
for i := 0; i < numTxsL1; i++ {
|
||||
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
|
||||
bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
|
||||
var signature []byte
|
||||
v := bytesL1Coordinator[0]
|
||||
s := bytesL1Coordinator[1:33]
|
||||
|
||||
4
go.mod
4
go.mod
@@ -6,6 +6,7 @@ require (
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/dghubble/sling v1.3.0
|
||||
github.com/ethereum/go-ethereum v1.9.25
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/getkin/kin-openapi v0.22.0
|
||||
github.com/gin-contrib/cors v1.3.1
|
||||
github.com/gin-gonic/gin v1.5.0
|
||||
@@ -17,13 +18,14 @@ require (
|
||||
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/lib/pq v1.8.0
|
||||
github.com/marusama/semaphore/v2 v2.4.1
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
|
||||
github.com/mitchellh/copystructure v1.0.0
|
||||
github.com/mitchellh/mapstructure v1.3.0
|
||||
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
||||
github.com/russross/meddler v1.0.0
|
||||
github.com/sirupsen/logrus v1.5.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
go.uber.org/zap v1.16.0
|
||||
|
||||
13
go.sum
13
go.sum
@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
@@ -84,6 +86,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||
@@ -169,6 +173,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
@@ -415,9 +421,6 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
|
||||
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
|
||||
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
|
||||
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
|
||||
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
@@ -599,6 +602,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@@ -617,6 +622,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
||||
|
||||
26
node/node.go
26
node/node.go
@@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
@@ -83,15 +84,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
var apiConnCon *dbUtils.APIConnectionController
|
||||
if cfg.API.Explorer || mode == ModeCoordinator {
|
||||
apiConnCon = dbUtils.NewAPICnnectionController(
|
||||
cfg.API.MaxSQLConnections,
|
||||
cfg.API.SQLConnectionTimeout.Duration,
|
||||
)
|
||||
}
|
||||
|
||||
historyDB := historydb.NewHistoryDB(db, apiConnCon)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
|
||||
ethClient, err := ethclient.Dial(cfg.Web3.URL)
|
||||
if err != nil {
|
||||
@@ -204,7 +198,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
cfg.Coordinator.L2DB.SafetyPeriod,
|
||||
cfg.Coordinator.L2DB.MaxTxs,
|
||||
cfg.Coordinator.L2DB.TTL.Duration,
|
||||
apiConnCon,
|
||||
)
|
||||
|
||||
// Unlock FeeAccount EthAddr in the keystore to generate the
|
||||
@@ -301,6 +294,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
|
||||
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
|
||||
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
|
||||
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
|
||||
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
|
||||
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
|
||||
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
|
||||
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
|
||||
Purger: coordinator.PurgerCfg{
|
||||
@@ -487,11 +483,15 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
|
||||
if stats.Synced() {
|
||||
if err := n.nodeAPI.api.UpdateNetworkInfo(
|
||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||
common.BatchNum(stats.Eth.LastBatch),
|
||||
common.BatchNum(stats.Eth.LastBatchNum),
|
||||
stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||
); err != nil {
|
||||
log.Errorw("API.UpdateNetworkInfo", "err", err)
|
||||
}
|
||||
} else {
|
||||
n.nodeAPI.api.UpdateNetworkInfoBlock(
|
||||
stats.Eth.LastBlock, stats.Sync.LastBlock,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -573,7 +573,11 @@ func (n *Node) StartSynchronizer() {
|
||||
if n.ctx.Err() != nil {
|
||||
continue
|
||||
}
|
||||
log.Errorw("Synchronizer.Sync", "err", err)
|
||||
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
|
||||
log.Warnw("Synchronizer.Sync", "err", err)
|
||||
} else {
|
||||
log.Errorw("Synchronizer.Sync", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
assert.NoError(t, err)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
// Clean DB
|
||||
test.WipeDB(historyDB.DB())
|
||||
// Populate DB
|
||||
@@ -46,7 +46,8 @@ func TestPriceUpdater(t *testing.T) {
|
||||
// Update prices
|
||||
pu.UpdatePrices(context.Background())
|
||||
// Check that prices have been updated
|
||||
fetchedTokens, err := historyDB.GetTokensTest()
|
||||
limit := uint(10)
|
||||
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, historydb.OrderAsc)
|
||||
require.NoError(t, err)
|
||||
// TokenID 0 (ETH) is always on the DB
|
||||
assert.Equal(t, 2, len(fetchedTokens))
|
||||
|
||||
@@ -25,12 +25,12 @@ type Stats struct {
|
||||
Updated time.Time
|
||||
FirstBlockNum int64
|
||||
LastBlock common.Block
|
||||
LastBatch int64
|
||||
LastBatchNum int64
|
||||
}
|
||||
Sync struct {
|
||||
Updated time.Time
|
||||
LastBlock common.Block
|
||||
LastBatch int64
|
||||
LastBatch common.Batch
|
||||
// LastL1BatchBlock is the last ethereum block in which an
|
||||
// l1Batch was forged
|
||||
LastL1BatchBlock int64
|
||||
@@ -77,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
|
||||
}
|
||||
|
||||
// UpdateSync updates the synchronizer stats
|
||||
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
|
||||
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
|
||||
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
|
||||
now := time.Now()
|
||||
s.rw.Lock()
|
||||
s.Sync.LastBlock = *lastBlock
|
||||
if lastBatch != nil {
|
||||
s.Sync.LastBatch = int64(*lastBatch)
|
||||
s.Sync.LastBatch = *lastBatch
|
||||
}
|
||||
if lastL1BatchBlock != nil {
|
||||
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
|
||||
@@ -107,14 +107,14 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
lastBatch, err := ethClient.RollupLastForgedBatch()
|
||||
lastBatchNum, err := ethClient.RollupLastForgedBatch()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.rw.Lock()
|
||||
s.Eth.Updated = now
|
||||
s.Eth.LastBlock = *lastBlock
|
||||
s.Eth.LastBatch = lastBatch
|
||||
s.Eth.LastBatchNum = lastBatchNum
|
||||
s.rw.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -139,6 +139,10 @@ func (s *StatsHolder) CopyStats() *Stats {
|
||||
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
|
||||
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
|
||||
}
|
||||
if s.Sync.LastBatch.StateRoot != nil {
|
||||
sCopy.Sync.LastBatch.StateRoot =
|
||||
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
|
||||
}
|
||||
s.rw.RUnlock()
|
||||
return &sCopy
|
||||
}
|
||||
@@ -152,9 +156,9 @@ func (s *StatsHolder) blocksPerc() float64 {
|
||||
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
|
||||
}
|
||||
|
||||
func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
|
||||
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
|
||||
return float64(batchNum) * 100.0 /
|
||||
float64(s.Eth.LastBatch)
|
||||
float64(s.Eth.LastBatchNum)
|
||||
}
|
||||
|
||||
// StartBlockNums sets the first block used to start tracking the smart
|
||||
@@ -329,23 +333,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// firstBatchBlockNum is the blockNum of first batch in that block, if any
|
||||
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
|
||||
slot := common.Slot{
|
||||
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
|
||||
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
|
||||
}
|
||||
// updateCurrentSlot updates the slot with information of the current slot.
|
||||
// The information abouth which coordinator is allowed to forge is only updated
|
||||
// when we are Synced.
|
||||
// hasBatch is true when the last synced block contained at least one batch.
|
||||
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
|
||||
// We want the next block because the current one is already mined
|
||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||
slotNum := s.consts.Auction.SlotNum(blockNum)
|
||||
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
|
||||
if reset {
|
||||
// Using this query only to know if there
|
||||
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
|
||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
firstBatchBlockNum = nil
|
||||
hasBatch = false
|
||||
} else {
|
||||
firstBatchBlockNum = &dbFirstBatchBlockNum
|
||||
hasBatch = true
|
||||
firstBatchBlockNum = dbFirstBatchBlockNum
|
||||
}
|
||||
slot.ForgerCommitment = false
|
||||
} else if slotNum > slot.SlotNum {
|
||||
@@ -356,11 +362,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||
// If Synced, update the current coordinator
|
||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err := s.setSlotCoordinator(slot); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if firstBatchBlockNum != nil &&
|
||||
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
|
||||
if hasBatch &&
|
||||
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
|
||||
int64(s.vars.Auction.SlotDeadline) {
|
||||
slot.ForgerCommitment = true
|
||||
}
|
||||
@@ -369,57 +375,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
|
||||
// BEGIN SANITY CHECK
|
||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if !canForge {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
"differs from smart contract: %+v", slot))
|
||||
}
|
||||
// END SANITY CHECK
|
||||
}
|
||||
return &slot, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
|
||||
// updateNextSlot updates the slot with information of the next slot.
|
||||
// The information abouth which coordinator is allowed to forge is only updated
|
||||
// when we are Synced.
|
||||
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
|
||||
// We want the next block because the current one is already mined
|
||||
blockNum := s.stats.Sync.LastBlock.Num + 1
|
||||
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
|
||||
slot := common.Slot{
|
||||
SlotNum: slotNum,
|
||||
ForgerCommitment: false,
|
||||
}
|
||||
slot.SlotNum = slotNum
|
||||
slot.ForgerCommitment = false
|
||||
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
|
||||
// If Synced, update the current coordinator
|
||||
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
|
||||
if err := s.setSlotCoordinator(&slot); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
if err := s.setSlotCoordinator(slot); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: Remove this SANITY CHECK once this code is tested enough
|
||||
// BEGIN SANITY CHECK
|
||||
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if !canForge {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
|
||||
"differs from smart contract: %+v", slot))
|
||||
}
|
||||
// END SANITY CHECK
|
||||
}
|
||||
return &slot, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
|
||||
current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
|
||||
if err != nil {
|
||||
// updateCurrentNextSlotIfSync updates the current and next slot. Information
|
||||
// about forger address that is allowed to forge is only updated if we are
|
||||
// Synced.
|
||||
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
|
||||
current := s.stats.Sync.Auction.CurrentSlot
|
||||
next := s.stats.Sync.Auction.NextSlot
|
||||
if err := s.updateCurrentSlot(¤t, reset, hasBatch); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
next, err := s.getNextSlot()
|
||||
if err != nil {
|
||||
if err := s.updateNextSlot(&next); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.stats.UpdateCurrentNextSlot(current, next)
|
||||
s.stats.UpdateCurrentNextSlot(¤t, &next)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -458,9 +468,9 @@ func (s *Synchronizer) init() error {
|
||||
"ethLastBlock", s.stats.Eth.LastBlock,
|
||||
)
|
||||
log.Infow("Sync init batch",
|
||||
"syncLastBatch", s.stats.Sync.LastBatch,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
|
||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
||||
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
|
||||
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -627,14 +637,14 @@ func (s *Synchronizer) Sync2(ctx context.Context,
|
||||
}
|
||||
}
|
||||
s.stats.UpdateSync(ethBlock,
|
||||
&rollupData.Batches[batchesLen-1].Batch.BatchNum,
|
||||
&rollupData.Batches[batchesLen-1].Batch,
|
||||
lastL1BatchBlock, lastForgeL1TxsNum)
|
||||
}
|
||||
var firstBatchBlockNum *int64
|
||||
hasBatch := false
|
||||
if len(rollupData.Batches) > 0 {
|
||||
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
|
||||
hasBatch = true
|
||||
}
|
||||
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
|
||||
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -646,8 +656,8 @@ func (s *Synchronizer) Sync2(ctx context.Context,
|
||||
for _, batchData := range rollupData.Batches {
|
||||
log.Debugw("Synced batch",
|
||||
"syncLastBatch", batchData.Batch.BatchNum,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
|
||||
"ethLastBatch", s.stats.Eth.LastBatch,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
||||
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -753,15 +763,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
||||
s.vars.WDelayer = *wDelayer
|
||||
}
|
||||
|
||||
batchNum, err := s.historyDB.GetLastBatchNum()
|
||||
batch, err := s.historyDB.GetLastBatch()
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
|
||||
}
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
batchNum = 0
|
||||
batch = &common.Batch{}
|
||||
}
|
||||
|
||||
err = s.stateDB.Reset(batchNum)
|
||||
err = s.stateDB.Reset(batch.BatchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
||||
}
|
||||
@@ -783,9 +793,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
||||
lastForgeL1TxsNum = &n
|
||||
}
|
||||
|
||||
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||
|
||||
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
|
||||
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
@@ -919,9 +929,15 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if s.stateDB.CurrentBatch() != batchNum {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)",
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
|
||||
"evtForgeBatch.BatchNum = (%v)",
|
||||
s.stateDB.CurrentBatch(), batchNum))
|
||||
}
|
||||
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
|
||||
"forgeBatchArgs.NewStRoot (%v)",
|
||||
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
|
||||
}
|
||||
|
||||
// Transform processed PoolL2 txs to L2 and store in BatchData
|
||||
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
||||
|
||||
@@ -314,7 +314,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
// Clear DB
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
|
||||
@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
|
||||
cpy := c.nextBlock().copy()
|
||||
defer func() { c.revertIfErr(err, cpy) }()
|
||||
|
||||
_, err = common.NewFloat40(amount)
|
||||
_, err = common.NewFloat16(amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
_, err = common.NewFloat40(depositAmount)
|
||||
_, err = common.NewFloat16(depositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
||||
})
|
||||
}
|
||||
|
||||
hdb := historydb.NewHistoryDB(db, nil)
|
||||
hdb := historydb.NewHistoryDB(db)
|
||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||
Num: 1,
|
||||
}))
|
||||
@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpSyncDB")
|
||||
require.NoError(t, err)
|
||||
@@ -311,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
||||
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
|
||||
h, err := zki.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "12174727174629825205577542675894290689387326670869871089988393208259924373499", h.String())
|
||||
assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String())
|
||||
sendProofAndCheckResp(t, zki)
|
||||
|
||||
// batch3
|
||||
@@ -334,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
||||
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
|
||||
h, err = zki.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "16351950370739934361208977436603065280805499094788807090831605833717933916063", h.String())
|
||||
assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String())
|
||||
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
|
||||
assert.Equal(t, "0", zki.EthAddr3[1].String())
|
||||
sendProofAndCheckResp(t, zki)
|
||||
|
||||
@@ -501,11 +501,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
||||
tp.zki.OnChain[tp.i] = big.NewInt(1)
|
||||
|
||||
// L1Txs
|
||||
depositAmountF40, err := common.NewFloat40(tx.DepositAmount)
|
||||
depositAmountF16, err := common.NewFloat16(tx.DepositAmount)
|
||||
if err != nil {
|
||||
return nil, nil, false, nil, tracerr.Wrap(err)
|
||||
}
|
||||
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40))
|
||||
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16))
|
||||
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
||||
if tx.FromBJJ != common.EmptyBJJComp {
|
||||
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package txprocessor
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
@@ -640,16 +642,17 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
|
||||
|
||||
users := txsets.GenerateJsUsers(t)
|
||||
|
||||
daMaxF40 := common.Float40(0xFFFFFFFFFF)
|
||||
daMaxBI, err := daMaxF40.BigInt()
|
||||
daMaxHex, err := hex.DecodeString("FFFF")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String())
|
||||
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
|
||||
daMaxBI := daMaxF16.BigInt()
|
||||
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
|
||||
|
||||
daMax1F40 := common.Float40(0xFFFFFFFFFE)
|
||||
daMax1Hex, err := hex.DecodeString("FFFE")
|
||||
require.NoError(t, err)
|
||||
daMax1BI, err := daMax1F40.BigInt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String())
|
||||
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
|
||||
daMax1BI := daMax1F16.BigInt()
|
||||
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
|
||||
|
||||
l1Txs := []common.L1Tx{
|
||||
{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -89,12 +89,8 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
|
||||
|
||||
// Reset tells the TxSelector to get it's internal AccountsDB
|
||||
// from the required `batchNum`
|
||||
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
|
||||
err := txsel.localAccountsDB.Reset(batchNum, true)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
|
||||
}
|
||||
|
||||
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
|
||||
|
||||
@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
@@ -106,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
||||
})
|
||||
}
|
||||
|
||||
hdb := historydb.NewHistoryDB(db, nil)
|
||||
hdb := historydb.NewHistoryDB(db)
|
||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||
Num: 1,
|
||||
}))
|
||||
@@ -424,9 +424,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr
|
||||
expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit
|
||||
expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr
|
||||
expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr
|
||||
expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit
|
||||
expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr
|
||||
|
||||
// batch2
|
||||
// prepare the PoolL2Txs
|
||||
|
||||
Reference in New Issue
Block a user