mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
12 Commits
feature/sq
...
feature/fl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22802d6273 | ||
|
|
453ecc0504 | ||
|
|
2a77dac9c1 | ||
|
|
ac1fd9acf7 | ||
|
|
1bf29636db | ||
|
|
2bf3b843ed | ||
|
|
277a1bc321 | ||
|
|
3181c8738c | ||
|
|
62df063ccf | ||
|
|
48a538faa3 | ||
|
|
10a34c8801 | ||
|
|
6260dfedad |
@@ -26,7 +26,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Insert to DB
|
||||
if err := a.l2.AddAccountCreationAuth(commonAuth); err != nil {
|
||||
if err := a.l2.AddAccountCreationAuthAPI(commonAuth); err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
"github.com/hermeznetwork/hermez-node/test/txsets"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Pendinger is an interface that allows getting last returned item ID and PendingItems to be used for building fromItem
|
||||
@@ -199,7 +201,8 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
hdb := historydb.NewHistoryDB(database)
|
||||
apiConnCon := db.NewAPICnnectionController(1, time.Second)
|
||||
hdb := historydb.NewHistoryDB(database, apiConnCon)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -213,12 +216,12 @@ func TestMain(m *testing.M) {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeTxSelector, NLevels: 0})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// L2DB
|
||||
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour)
|
||||
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
|
||||
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
|
||||
// Config (smart contract constants)
|
||||
chainID := uint16(0)
|
||||
@@ -574,6 +577,82 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(result)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
|
||||
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
|
||||
require.NoError(t, err)
|
||||
// L2DB
|
||||
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
|
||||
|
||||
// API
|
||||
apiGinTO := gin.Default()
|
||||
finishWait := make(chan interface{})
|
||||
startWait := make(chan interface{})
|
||||
apiGinTO.GET("/wait", func(c *gin.Context) {
|
||||
cancel, err := apiConnConTO.Acquire()
|
||||
defer cancel()
|
||||
require.NoError(t, err)
|
||||
defer apiConnConTO.Release()
|
||||
startWait <- nil
|
||||
<-finishWait
|
||||
})
|
||||
// Start server
|
||||
serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
|
||||
go func() {
|
||||
if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
_config := getConfigTest(0)
|
||||
_, err = NewAPI(
|
||||
true,
|
||||
true,
|
||||
apiGinTO,
|
||||
hdbTO,
|
||||
nil,
|
||||
l2DBTO,
|
||||
&_config,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := &http.Client{}
|
||||
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil)
|
||||
require.NoError(t, err)
|
||||
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil)
|
||||
require.NoError(t, err)
|
||||
// Request that will get timed out
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// Request that will make the API busy
|
||||
_, err = client.Do(httpReqWait)
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
<-startWait
|
||||
resp, err := client.Do(httpReq)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
defer resp.Body.Close() //nolint
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// Unmarshal body into return struct
|
||||
msg := &errorMsg{}
|
||||
err = json.Unmarshal(body, msg)
|
||||
require.NoError(t, err)
|
||||
// Check that the error was the expected down
|
||||
require.Equal(t, errSQLTimeout, msg.Message)
|
||||
finishWait <- nil
|
||||
|
||||
// Stop server
|
||||
wg.Wait()
|
||||
require.NoError(t, serverTO.Shutdown(context.Background()))
|
||||
require.NoError(t, databaseTO.Close())
|
||||
}
|
||||
|
||||
func doGoodReqPaginated(
|
||||
path, order string,
|
||||
iterStruct Pendinger,
|
||||
|
||||
@@ -108,7 +108,7 @@ func (a *API) getFullBatch(c *gin.Context) {
|
||||
}
|
||||
// Fetch txs forged in the batch from historyDB
|
||||
maxTxsPerBatch := uint(2048) //nolint:gomnd
|
||||
txs, _, err := a.h.GetHistoryTxs(
|
||||
txs, _, err := a.h.GetTxsAPI(
|
||||
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
|
||||
)
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
|
||||
@@ -30,6 +30,12 @@ const (
|
||||
|
||||
// Error for duplicated key
|
||||
errDuplicatedKey = "Item already exists"
|
||||
|
||||
// Error for timeout due to SQL connection
|
||||
errSQLTimeout = "The node is under heavy preasure, please try again later"
|
||||
|
||||
// Error message returned when context reaches timeout
|
||||
errCtxTimeout = "context deadline exceeded"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -38,16 +44,20 @@ var (
|
||||
)
|
||||
|
||||
func retSQLErr(err error, c *gin.Context) {
|
||||
log.Warn("HTTP API SQL request error", "err", err)
|
||||
if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||
log.Warnw("HTTP API SQL request error", "err", err)
|
||||
errMsg := tracerr.Unwrap(err).Error()
|
||||
if errMsg == errCtxTimeout {
|
||||
c.JSON(http.StatusServiceUnavailable, errorMsg{
|
||||
Message: errSQLTimeout,
|
||||
})
|
||||
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
|
||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||
if sqlErr.Code == "23505" {
|
||||
c.JSON(http.StatusInternalServerError, errorMsg{
|
||||
Message: errDuplicatedKey,
|
||||
})
|
||||
}
|
||||
}
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
c.JSON(http.StatusNotFound, errorMsg{
|
||||
Message: err.Error(),
|
||||
})
|
||||
@@ -59,7 +69,7 @@ func retSQLErr(err error, c *gin.Context) {
|
||||
}
|
||||
|
||||
func retBadReq(err error, c *gin.Context) {
|
||||
log.Warn("HTTP API Bad request error", "err", err)
|
||||
log.Warnw("HTTP API Bad request error", "err", err)
|
||||
c.JSON(http.StatusBadRequest, errorMsg{
|
||||
Message: err.Error(),
|
||||
})
|
||||
|
||||
12
api/slots.go
12
api/slots.go
@@ -97,12 +97,12 @@ func (a *API) getSlot(c *gin.Context) {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
currentBlock, err := a.h.GetLastBlock()
|
||||
currentBlock, err := a.h.GetLastBlockAPI()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
auctionVars, err := a.h.GetAuctionVars()
|
||||
auctionVars, err := a.h.GetAuctionVarsAPI()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
@@ -200,12 +200,12 @@ func (a *API) getSlots(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
currentBlock, err := a.h.GetLastBlock()
|
||||
currentBlock, err := a.h.GetLastBlockAPI()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
auctionVars, err := a.h.GetAuctionVars()
|
||||
auctionVars, err := a.h.GetAuctionVarsAPI()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
@@ -220,13 +220,13 @@ func (a *API) getSlots(c *gin.Context) {
|
||||
retBadReq(errors.New("It is necessary to add maxSlotNum filter"), c)
|
||||
return
|
||||
} else if *finishedAuction {
|
||||
currentBlock, err := a.h.GetLastBlock()
|
||||
currentBlock, err := a.h.GetLastBlockAPI()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
currentSlot := a.getCurrentSlot(currentBlock.Num)
|
||||
auctionVars, err := a.h.GetAuctionVars()
|
||||
auctionVars, err := a.h.GetAuctionVarsAPI()
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
|
||||
@@ -141,7 +141,7 @@ func (a *API) UpdateNetworkInfo(
|
||||
a.status.Network.NextForgers = nextForgers
|
||||
|
||||
// Update buckets withdrawals
|
||||
bucketsUpdate, err := a.h.GetBucketUpdates()
|
||||
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
bucketsUpdate = nil
|
||||
} else if err != nil {
|
||||
@@ -201,7 +201,7 @@ func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot
|
||||
}}
|
||||
} else {
|
||||
// Get all the relevant updates from the DB
|
||||
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNum(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
||||
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func (a *API) UpdateMetrics() error {
|
||||
}
|
||||
batchNum := a.status.Network.LastBatch.BatchNum
|
||||
a.status.RUnlock()
|
||||
metrics, err := a.h.GetMetrics(batchNum)
|
||||
metrics, err := a.h.GetMetricsAPI(batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -293,7 +293,7 @@ func (a *API) UpdateMetrics() error {
|
||||
|
||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||
func (a *API) UpdateRecommendedFee() error {
|
||||
feeExistingAccount, err := a.h.GetAvgTxFee()
|
||||
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -2916,7 +2916,7 @@ components:
|
||||
example: 101
|
||||
l1UserTotalBytes:
|
||||
type: integer
|
||||
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
|
||||
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx).
|
||||
example: 72
|
||||
maxL1UserTx:
|
||||
type: integer
|
||||
|
||||
@@ -22,7 +22,7 @@ func (a *API) getToken(c *gin.Context) {
|
||||
}
|
||||
tokenID := common.TokenID(*tokenIDUint)
|
||||
// Fetch token from historyDB
|
||||
token, err := a.h.GetToken(tokenID)
|
||||
token, err := a.h.GetTokenAPI(tokenID)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
@@ -45,7 +45,7 @@ func (a *API) getTokens(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Fetch exits from historyDB
|
||||
tokens, pendingItems, err := a.h.GetTokens(
|
||||
tokens, pendingItems, err := a.h.GetTokensAPI(
|
||||
tokenIDs, symbols, name, fromItem, limit, order,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -34,7 +34,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Fetch txs from historyDB
|
||||
txs, pendingItems, err := a.h.GetHistoryTxs(
|
||||
txs, pendingItems, err := a.h.GetTxsAPI(
|
||||
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -61,7 +61,7 @@ func (a *API) getHistoryTx(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Fetch tx from historyDB
|
||||
tx, err := a.h.GetHistoryTx(txID)
|
||||
tx, err := a.h.GetTxAPI(txID)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
|
||||
@@ -28,7 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
// Insert to DB
|
||||
if err := a.l2.AddTx(writeTx); err != nil {
|
||||
if err := a.l2.AddTxAPI(writeTx); err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package batchbuilder
|
||||
|
||||
import (
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/kvdb"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
@@ -28,8 +29,14 @@ type ConfigBatch struct {
|
||||
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
|
||||
// method
|
||||
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
|
||||
localStateDB, err := statedb.NewLocalStateDB(dbpath, 128, synchronizerStateDB,
|
||||
statedb.TypeBatchBuilder, int(nLevels))
|
||||
localStateDB, err := statedb.NewLocalStateDB(
|
||||
statedb.Config{
|
||||
Path: dbpath,
|
||||
Keep: kvdb.DefaultKeep,
|
||||
Type: statedb.TypeBatchBuilder,
|
||||
NLevels: int(nLevels),
|
||||
},
|
||||
synchronizerStateDB)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestBatchBuilder(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
defer assert.Nil(t, os.RemoveAll(dir))
|
||||
|
||||
synchDB, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 0)
|
||||
synchDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeBatchBuilder, NLevels: 0})
|
||||
assert.Nil(t, err)
|
||||
|
||||
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
||||
|
||||
@@ -3,6 +3,8 @@ Address = "localhost:8086"
|
||||
Explorer = true
|
||||
UpdateMetricsInterval = "10s"
|
||||
UpdateRecommendedFeeInterval = "10s"
|
||||
MaxSQLConnections = 100
|
||||
SQLConnectionTimeout = "2s"
|
||||
|
||||
[PriceUpdater]
|
||||
Interval = "10s"
|
||||
@@ -96,3 +98,4 @@ Coordinator = true
|
||||
[Coordinator.Debug]
|
||||
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
||||
LightScrypt = true
|
||||
# RollupVerifierIndex = 0
|
||||
|
||||
@@ -24,8 +24,8 @@ const (
|
||||
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
|
||||
RollupConstL1CoordinatorTotalBytes = 101
|
||||
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
|
||||
// [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
|
||||
RollupConstL1UserTotalBytes = 72
|
||||
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
|
||||
RollupConstL1UserTotalBytes = 78
|
||||
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
|
||||
RollupConstMaxL1UserTx = 128
|
||||
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch
|
||||
|
||||
@@ -30,6 +30,7 @@ func (f16 Float16) Bytes() []byte {
|
||||
|
||||
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
|
||||
func Float16FromBytes(b []byte) *Float16 {
|
||||
// WARNING b[:2] for a b where len(b)<2 can break
|
||||
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
|
||||
return &f16
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConversions(t *testing.T) {
|
||||
func TestConversionsFloat16(t *testing.T) {
|
||||
testVector := map[Float16]string{
|
||||
0x307B: "123000000",
|
||||
0x1DC6: "454500",
|
||||
@@ -32,14 +32,14 @@ func TestConversions(t *testing.T) {
|
||||
bi.SetString(testVector[test], 10)
|
||||
|
||||
fl, err := NewFloat16(bi)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fx2 := fl.BigInt()
|
||||
assert.Equal(t, fx2.String(), testVector[test])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloorFix2Float(t *testing.T) {
|
||||
func TestFloorFix2FloatFloat16(t *testing.T) {
|
||||
testVector := map[string]Float16{
|
||||
"87999990000000000": 0x776f,
|
||||
"87950000000000001": 0x776f,
|
||||
@@ -57,10 +57,10 @@ func TestFloorFix2Float(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversionLosses(t *testing.T) {
|
||||
func TestConversionLossesFloat16(t *testing.T) {
|
||||
a := big.NewInt(1000)
|
||||
b, err := NewFloat16(a)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.NoError(t, err)
|
||||
c := b.BigInt()
|
||||
assert.Equal(t, c, a)
|
||||
|
||||
|
||||
102
common/float40.go
Normal file
102
common/float40.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Package common Float40 provides methods to work with Hermez custom half
|
||||
// float precision, 40 bits, codification internally called Float40 has been
|
||||
// adopted to encode large integers. This is done in order to save bits when L2
|
||||
// transactions are published.
|
||||
//nolint:gomnd
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxFloat40Value is the maximum value that the Float40 can have
|
||||
// (40 bits: maxFloat40Value=2**40-1)
|
||||
maxFloat40Value = 0xffffffffff
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFloat40Overflow is used when a given nonce overflows the maximum
|
||||
// capacity of the Float40 (2**40-1)
|
||||
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
|
||||
// ErrFloat40E31 is used when the e > 31 when trying to convert a
|
||||
// *big.Int to Float40
|
||||
ErrFloat40E31 = errors.New("Float40 error, e > 31")
|
||||
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
|
||||
// not be represented as Float40 due not enough precission
|
||||
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
|
||||
)
|
||||
|
||||
// Float40 represents a float in a 64 bit format
|
||||
type Float40 uint64
|
||||
|
||||
// Bytes return a byte array of length 5 with the Float40 value encoded in
|
||||
// BigEndian
|
||||
func (f40 Float40) Bytes() ([]byte, error) {
|
||||
if f40 > maxFloat40Value {
|
||||
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
|
||||
}
|
||||
|
||||
var f40Bytes [8]byte
|
||||
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
|
||||
var b [5]byte
|
||||
copy(b[:], f40Bytes[3:])
|
||||
return b[:], nil
|
||||
}
|
||||
|
||||
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
|
||||
// representation.
|
||||
func Float40FromBytes(b []byte) Float40 {
|
||||
var f40Bytes [8]byte
|
||||
copy(f40Bytes[3:], b[:])
|
||||
f40 := binary.BigEndian.Uint64(f40Bytes[:])
|
||||
return Float40(f40)
|
||||
}
|
||||
|
||||
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
|
||||
// [ e | m ]
|
||||
// [ 5 bits | 35 bits ]
|
||||
func (f40 Float40) BigInt() (*big.Int, error) {
|
||||
// take the 5 used bytes (FF * 5)
|
||||
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
|
||||
f40Bytes, err := f40.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
|
||||
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
|
||||
|
||||
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
|
||||
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
|
||||
// of loss during the encoding.
|
||||
func NewFloat40(f *big.Int) (Float40, error) {
|
||||
m := f
|
||||
e := big.NewInt(0)
|
||||
zero := big.NewInt(0)
|
||||
ten := big.NewInt(10)
|
||||
thres := big.NewInt(0x08_00_00_00_00)
|
||||
for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) &&
|
||||
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
|
||||
m = new(big.Int).Div(m, ten)
|
||||
e = new(big.Int).Add(e, big.NewInt(1))
|
||||
}
|
||||
if e.Int64() > 31 {
|
||||
return 0, ErrFloat40E31
|
||||
}
|
||||
if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
|
||||
return 0, ErrFloat40NotEnoughPrecission
|
||||
}
|
||||
r := new(big.Int).Add(m,
|
||||
new(big.Int).Mul(e, thres))
|
||||
return Float40(r.Uint64()), nil
|
||||
}
|
||||
95
common/float40_test.go
Normal file
95
common/float40_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConversionsFloat40(t *testing.T) {
|
||||
testVector := map[Float40]string{
|
||||
6*0x800000000 + 123: "123000000",
|
||||
2*0x800000000 + 4545: "454500",
|
||||
30*0x800000000 + 10235: "10235000000000000000000000000000000",
|
||||
0x000000000: "0",
|
||||
0x800000000: "0",
|
||||
0x0001: "1",
|
||||
0x0401: "1025",
|
||||
0x800000000 + 1: "10",
|
||||
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
|
||||
}
|
||||
|
||||
for test := range testVector {
|
||||
fix, err := test.BigInt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fix.String(), testVector[test])
|
||||
|
||||
bi, ok := new(big.Int).SetString(testVector[test], 10)
|
||||
require.True(t, ok)
|
||||
|
||||
fl, err := NewFloat40(bi)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fx2, err := fl.BigInt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fx2.String(), testVector[test])
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpectError(t *testing.T) {
|
||||
testVector := map[string]error{
|
||||
"9922334455000000000000000000000000000000": nil,
|
||||
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
|
||||
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
|
||||
"42949672950000000000000000000000000000000": nil,
|
||||
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
|
||||
"992233445500000000000000000000000000000000": ErrFloat40E31,
|
||||
"343597383670000000000000000000000000000000": nil,
|
||||
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
|
||||
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
|
||||
"343597383700000000000000000000000000000000": ErrFloat40E31,
|
||||
}
|
||||
for test := range testVector {
|
||||
bi, ok := new(big.Int).SetString(test, 10)
|
||||
require.True(t, ok)
|
||||
_, err := NewFloat40(bi)
|
||||
assert.Equal(t, testVector[test], err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFloat40(b *testing.B) {
|
||||
newBigInt := func(s string) *big.Int {
|
||||
bigInt, ok := new(big.Int).SetString(s, 10)
|
||||
if !ok {
|
||||
panic("Can not convert string to *big.Int")
|
||||
}
|
||||
return bigInt
|
||||
}
|
||||
type pair struct {
|
||||
Float40 Float40
|
||||
BigInt *big.Int
|
||||
}
|
||||
testVector := []pair{
|
||||
{6*0x800000000 + 123, newBigInt("123000000")},
|
||||
{2*0x800000000 + 4545, newBigInt("454500")},
|
||||
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
|
||||
{0x000000000, newBigInt("0")},
|
||||
{0x800000000, newBigInt("0")},
|
||||
{0x0001, newBigInt("1")},
|
||||
{0x0401, newBigInt("1025")},
|
||||
{0x800000000 + 1, newBigInt("10")},
|
||||
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
|
||||
}
|
||||
b.Run("NewFloat40()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
|
||||
}
|
||||
})
|
||||
b.Run("Float40.BigInt()", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = testVector[i%len(testVector)].Float40.BigInt()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -11,13 +11,6 @@ import (
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
)
|
||||
|
||||
const (
|
||||
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
|
||||
L1UserTxBytesLen = 72
|
||||
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
|
||||
L1CoordinatorTxBytesLen = 101
|
||||
)
|
||||
|
||||
// L1Tx is a struct that represents a L1 tx
|
||||
type L1Tx struct {
|
||||
// Stored in DB: mandatory fileds
|
||||
@@ -179,45 +172,38 @@ func (tx L1Tx) Tx() Tx {
|
||||
// [ 8 bits ] empty (userFee) // 1 byte
|
||||
// [ 40 bits ] empty (nonce) // 5 bytes
|
||||
// [ 32 bits ] tokenID // 4 bytes
|
||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// [ 16 bits ] chainId // 2 bytes
|
||||
// [ 32 bits ] empty (signatureConstant) // 4 bytes
|
||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
||||
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
|
||||
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var b [31]byte
|
||||
var b [29]byte
|
||||
// b[0:7] empty: no ToBJJSign, no fee, no nonce
|
||||
copy(b[7:11], tx.TokenID.Bytes())
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
copy(b[11:17], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||
copy(b[27:31], SignatureConstantBytes[:])
|
||||
copy(b[17:23], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||
copy(b[25:29], SignatureConstantBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
|
||||
// [ fromIdx | toIdx | amountFloat16 | Fee ]
|
||||
// [ fromIdx | toIdx | amountFloat40 | Fee ]
|
||||
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
idxLen := nLevels / 8 //nolint:gomnd
|
||||
|
||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
||||
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
|
||||
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
@@ -231,13 +217,17 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||
|
||||
if tx.EffectiveAmount != nil {
|
||||
amountFloat16, err := NewFloat16(tx.EffectiveAmount)
|
||||
amountFloat40, err := NewFloat40(tx.EffectiveAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes)
|
||||
}
|
||||
// fee = 0 (as is L1Tx) b[10:11]
|
||||
// fee = 0 (as is L1Tx)
|
||||
return b[:], nil
|
||||
}
|
||||
|
||||
@@ -247,7 +237,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
||||
|
||||
fromIdxBytes := b[0:idxLen]
|
||||
toIdxBytes := b[idxLen : idxLen*2]
|
||||
amountBytes := b[idxLen*2 : idxLen*2+2]
|
||||
amountBytes := b[idxLen*2 : idxLen*2+5]
|
||||
|
||||
l1tx := L1Tx{}
|
||||
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
|
||||
@@ -260,8 +250,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
l1tx.ToIdx = toIdx
|
||||
l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
|
||||
return &l1tx, nil
|
||||
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
|
||||
return &l1tx, err
|
||||
}
|
||||
|
||||
// BytesGeneric returns the generic representation of a L1Tx. This method is
|
||||
@@ -269,7 +259,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
|
||||
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
|
||||
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
|
||||
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
||||
var b [L1UserTxBytesLen]byte
|
||||
var b [RollupConstL1UserTotalBytes]byte
|
||||
copy(b[0:20], tx.FromEthAddr.Bytes())
|
||||
if tx.FromBJJ != EmptyBJJComp {
|
||||
pkCompL := tx.FromBJJ
|
||||
@@ -281,22 +271,33 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[52:58], fromIdxBytes[:])
|
||||
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
|
||||
|
||||
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[58:60], depositAmountFloat16.Bytes())
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[60:62], amountFloat16.Bytes())
|
||||
copy(b[62:66], tx.TokenID.Bytes())
|
||||
copy(b[58:63], depositAmountFloat40Bytes)
|
||||
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[63:68], amountFloat40Bytes)
|
||||
|
||||
copy(b[68:72], tx.TokenID.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[66:72], toIdxBytes[:])
|
||||
copy(b[72:78], toIdxBytes[:])
|
||||
return b[:], nil
|
||||
}
|
||||
|
||||
@@ -313,7 +314,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
|
||||
if tx.UserOrigin {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
|
||||
}
|
||||
var b [L1CoordinatorTxBytesLen]byte
|
||||
var b [RollupConstL1CoordinatorTotalBytes]byte
|
||||
v := compressedSignatureBytes[64]
|
||||
s := compressedSignatureBytes[32:64]
|
||||
r := compressedSignatureBytes[0:32]
|
||||
@@ -329,7 +330,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
|
||||
|
||||
// L1UserTxFromBytes decodes a L1Tx from []byte
|
||||
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
||||
if len(b) != L1UserTxBytesLen {
|
||||
if len(b) != RollupConstL1UserTotalBytes {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
|
||||
}
|
||||
|
||||
@@ -347,13 +348,19 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.FromIdx = fromIdx
|
||||
tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
|
||||
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
|
||||
tx.TokenID, err = TokenIDFromBytes(b[62:66])
|
||||
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.ToIdx, err = IdxFromBytes(b[66:72])
|
||||
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.TokenID, err = TokenIDFromBytes(b[68:72])
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.ToIdx, err = IdxFromBytes(b[72:78])
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -368,7 +375,7 @@ func signHash(data []byte) []byte {
|
||||
|
||||
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
|
||||
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
|
||||
if len(b) != L1CoordinatorTxBytesLen {
|
||||
if len(b) != RollupConstL1CoordinatorTotalBytes {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
|
||||
}
|
||||
|
||||
|
||||
@@ -50,64 +50,110 @@ func TestNewL1CoordinatorTx(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestL1TxCompressedData(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation (using
|
||||
// PoolL2Tx values)
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx := L1Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
FromIdx: (1 << 48) - 1,
|
||||
ToIdx: (1 << 48) - 1,
|
||||
Amount: amount,
|
||||
TokenID: (1 << 32) - 1,
|
||||
}
|
||||
chainID := uint16(0)
|
||||
txCompressedData, err := tx.TxCompressedData(chainID)
|
||||
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
|
||||
assert.NoError(t, err)
|
||||
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "7307597389635308713748674793997299267459594577423"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
tx = L1Tx{
|
||||
FromIdx: 0,
|
||||
ToIdx: 0,
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 0,
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
amount, ok = new(big.Int).SetString("63000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx = L1Tx{
|
||||
FromIdx: 324,
|
||||
ToIdx: 256,
|
||||
Amount: amount,
|
||||
TokenID: 123,
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(1))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "7b0000000001000000000001440001c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
tx = L1Tx{
|
||||
FromIdx: 1,
|
||||
ToIdx: 2,
|
||||
TokenID: 3,
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "030000000000020000000000010000c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestBytesDataAvailability(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx := L1Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
ToIdx: (1 << 16) - 1,
|
||||
FromIdx: (1 << 16) - 1,
|
||||
EffectiveAmount: amount,
|
||||
}
|
||||
txCompressedData, err := tx.BytesDataAvailability(32)
|
||||
txCompressedData, err := tx.BytesDataAvailability(16)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
|
||||
|
||||
tx = L1Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
EffectiveAmount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
|
||||
}
|
||||
|
||||
func TestL1TxFromDataAvailability(t *testing.T) {
|
||||
tx := L1Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
}
|
||||
txCompressedData, err := tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
|
||||
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||
|
||||
tx = L1Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
EffectiveAmount: big.NewInt(4),
|
||||
ToIdx: (1 << 32) - 1,
|
||||
FromIdx: (1 << 32) - 1,
|
||||
EffectiveAmount: amount,
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||
|
||||
tx = L1Tx{
|
||||
ToIdx: 0,
|
||||
FromIdx: 0,
|
||||
EffectiveAmount: big.NewInt(0),
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
|
||||
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
|
||||
|
||||
tx = L1Tx{
|
||||
ToIdx: 635,
|
||||
FromIdx: 296,
|
||||
EffectiveAmount: big.NewInt(1000000000000000000),
|
||||
}
|
||||
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
|
||||
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
|
||||
@@ -172,12 +218,10 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
|
||||
UserOrigin: true,
|
||||
}
|
||||
|
||||
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
|
||||
require.NoError(t, err)
|
||||
|
||||
encodedData, err := l1Tx.BytesUser()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, encodedData)
|
||||
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
}
|
||||
|
||||
func TestL1CoordinatorTxByteParsers(t *testing.T) {
|
||||
|
||||
@@ -89,11 +89,15 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
|
||||
// TokenID
|
||||
b = append(b, tx.TokenID.Bytes()[:]...)
|
||||
// Amount
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
|
||||
}
|
||||
b = append(b, amountFloat16.Bytes()...)
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return txID, tracerr.Wrap(err)
|
||||
}
|
||||
b = append(b, amountFloat40Bytes...)
|
||||
// Nonce
|
||||
nonceBytes, err := tx.Nonce.Bytes()
|
||||
if err != nil {
|
||||
@@ -170,11 +174,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
|
||||
}
|
||||
|
||||
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
|
||||
// [ fromIdx | toIdx | amountFloat16 | Fee ]
|
||||
// [ fromIdx | toIdx | amountFloat40 | Fee ]
|
||||
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
idxLen := nLevels / 8 //nolint:gomnd
|
||||
|
||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
||||
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
|
||||
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
@@ -188,13 +192,16 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||
}
|
||||
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
||||
b[idxLen*2+2] = byte(tx.Fee)
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes)
|
||||
b[idxLen*2+5] = byte(tx.Fee)
|
||||
|
||||
return b[:], nil
|
||||
}
|
||||
@@ -219,7 +226,10 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
|
||||
tx.Fee = FeeSelector(b[idxLen*2+2])
|
||||
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+5]).BigInt()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
tx.Fee = FeeSelector(b[idxLen*2+5])
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err := NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 87654,
|
||||
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 87654,
|
||||
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 87654,
|
||||
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 1,
|
||||
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 999,
|
||||
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
FromIdx: 4444,
|
||||
@@ -90,25 +90,85 @@ func TestNewL2Tx(t *testing.T) {
|
||||
}
|
||||
l2Tx, err = NewL2Tx(l2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
|
||||
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
|
||||
}
|
||||
|
||||
func TestL2TxByteParsers(t *testing.T) {
|
||||
amount := new(big.Int)
|
||||
amount.SetString("79000000", 10)
|
||||
// test vectors values generated from javascript implementation
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
l2Tx := &L2Tx{
|
||||
ToIdx: 256,
|
||||
ToIdx: (1 << 16) - 1,
|
||||
FromIdx: (1 << 16) - 1,
|
||||
Amount: amount,
|
||||
FromIdx: 257,
|
||||
Fee: 201,
|
||||
Fee: (1 << 8) - 1,
|
||||
}
|
||||
// Data from the compatibility test
|
||||
expected := "00000101000001002b16c9"
|
||||
encodedData, err := l2Tx.BytesDataAvailability(32)
|
||||
expected := "ffffffffffffffffffff"
|
||||
encodedData, err := l2Tx.BytesDataAvailability(16)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: (1 << 32) - 1,
|
||||
FromIdx: (1 << 32) - 1,
|
||||
Amount: amount,
|
||||
Fee: (1 << 8) - 1,
|
||||
}
|
||||
expected = "ffffffffffffffffffffffffffff"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: 0,
|
||||
FromIdx: 0,
|
||||
Amount: big.NewInt(0),
|
||||
Fee: 0,
|
||||
}
|
||||
expected = "0000000000000000000000000000"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: 0,
|
||||
FromIdx: 1061,
|
||||
Amount: big.NewInt(420000000000),
|
||||
Fee: 127,
|
||||
}
|
||||
expected = "000004250000000010fa56ea007f"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
|
||||
l2Tx = &L2Tx{
|
||||
ToIdx: 256,
|
||||
FromIdx: 257,
|
||||
Amount: big.NewInt(79000000),
|
||||
Fee: 201,
|
||||
}
|
||||
expected = "00000101000001000004b571c0c9"
|
||||
encodedData, err = l2Tx.BytesDataAvailability(32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||
|
||||
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l2Tx, decodedData)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ type PoolL2Tx struct {
|
||||
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
|
||||
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
|
||||
TokenID TokenID `meddler:"token_id"`
|
||||
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
|
||||
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40
|
||||
Fee FeeSelector `meddler:"fee"`
|
||||
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
|
||||
State PoolL2TxState `meddler:"state"`
|
||||
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
|
||||
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
|
||||
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
|
||||
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
|
||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
|
||||
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40
|
||||
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
|
||||
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
|
||||
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
|
||||
@@ -122,18 +122,13 @@ func (tx *PoolL2Tx) SetID() error {
|
||||
// [ 8 bits ] userFee // 1 byte
|
||||
// [ 40 bits ] nonce // 5 bytes
|
||||
// [ 32 bits ] tokenID // 4 bytes
|
||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// [ 16 bits ] chainId // 2 bytes
|
||||
// [ 32 bits ] signatureConstant // 4 bytes
|
||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
||||
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
var b [31]byte
|
||||
var b [29]byte
|
||||
|
||||
toBJJSign := byte(0)
|
||||
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
||||
@@ -149,19 +144,18 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
}
|
||||
copy(b[2:7], nonceBytes[:])
|
||||
copy(b[7:11], tx.TokenID.Bytes())
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
copy(b[11:17], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||
copy(b[27:31], SignatureConstantBytes[:])
|
||||
copy(b[17:23], fromIdxBytes[:])
|
||||
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||
copy(b[25:29], SignatureConstantBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
@@ -170,9 +164,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||
// TxCompressedDataEmpty calculates the TxCompressedData of an empty
|
||||
// transaction
|
||||
func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
||||
var b [31]byte
|
||||
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||
copy(b[27:31], SignatureConstantBytes[:])
|
||||
var b [29]byte
|
||||
binary.BigEndian.PutUint16(b[23:25], chainID)
|
||||
copy(b[25:29], SignatureConstantBytes[:])
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi
|
||||
}
|
||||
@@ -182,7 +176,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
|
||||
// [ 8 bits ] userFee // 1 byte
|
||||
// [ 40 bits ] nonce // 5 bytes
|
||||
// [ 32 bits ] tokenID // 4 bytes
|
||||
// [ 16 bits ] amountFloat16 // 2 bytes
|
||||
// [ 40 bits ] amountFloat40 // 5 bytes
|
||||
// [ 48 bits ] toIdx // 6 bytes
|
||||
// [ 48 bits ] fromIdx // 6 bytes
|
||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
||||
@@ -190,11 +184,16 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
if tx.Amount == nil {
|
||||
tx.Amount = big.NewInt(0)
|
||||
}
|
||||
amountFloat16, err := NewFloat16(tx.Amount)
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
var b [25]byte
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var b [28]byte
|
||||
toBJJSign := byte(0)
|
||||
if tx.ToBJJ != EmptyBJJComp {
|
||||
sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
|
||||
@@ -210,17 +209,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
}
|
||||
copy(b[2:7], nonceBytes[:])
|
||||
copy(b[7:11], tx.TokenID.Bytes())
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
copy(b[11:16], amountFloat40Bytes)
|
||||
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
copy(b[16:22], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
copy(b[22:28], fromIdxBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
@@ -236,7 +235,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
|
||||
// [ 8 bits ] rqUserFee // 1 byte
|
||||
// [ 40 bits ] rqNonce // 5 bytes
|
||||
// [ 32 bits ] rqTokenID // 4 bytes
|
||||
// [ 16 bits ] rqAmountFloat16 // 2 bytes
|
||||
// [ 40 bits ] rqAmountFloat40 // 5 bytes
|
||||
// [ 48 bits ] rqToIdx // 6 bytes
|
||||
// [ 48 bits ] rqFromIdx // 6 bytes
|
||||
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
|
||||
@@ -244,11 +243,16 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
||||
if tx.RqAmount == nil {
|
||||
tx.RqAmount = big.NewInt(0)
|
||||
}
|
||||
amountFloat16, err := NewFloat16(tx.RqAmount)
|
||||
amountFloat40, err := NewFloat40(tx.RqAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
var b [25]byte
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var b [28]byte
|
||||
rqToBJJSign := byte(0)
|
||||
if tx.RqToBJJ != EmptyBJJComp {
|
||||
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
|
||||
@@ -264,17 +268,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
||||
}
|
||||
copy(b[2:7], nonceBytes[:])
|
||||
copy(b[7:11], tx.RqTokenID.Bytes())
|
||||
copy(b[11:13], amountFloat16.Bytes())
|
||||
copy(b[11:16], amountFloat40Bytes)
|
||||
toIdxBytes, err := tx.RqToIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[13:19], toIdxBytes[:])
|
||||
copy(b[16:22], toIdxBytes[:])
|
||||
fromIdxBytes, err := tx.RqFromIdx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(b[19:25], fromIdxBytes[:])
|
||||
copy(b[22:28], fromIdxBytes[:])
|
||||
|
||||
bi := new(big.Int).SetBytes(b[:])
|
||||
return bi, nil
|
||||
@@ -287,7 +291,22 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
|
||||
var e1B [25]byte
|
||||
amountFloat40, err := NewFloat40(tx.Amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountFloat40Bytes, err := amountFloat40.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
copy(e1B[0:5], amountFloat40Bytes)
|
||||
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
|
||||
copy(e1B[5:25], toEthAddr.Bytes())
|
||||
e1 := new(big.Int).SetBytes(e1B[:])
|
||||
|
||||
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
|
||||
|
||||
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
|
||||
@@ -299,7 +318,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
||||
|
||||
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
|
||||
|
||||
return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
|
||||
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
|
||||
}
|
||||
|
||||
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp
|
||||
|
||||
@@ -21,80 +21,104 @@ func TestNewPoolL2Tx(t *testing.T) {
|
||||
}
|
||||
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
|
||||
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
|
||||
}
|
||||
|
||||
func TestTxCompressedData(t *testing.T) {
|
||||
chainID := uint16(0)
|
||||
var sk babyjub.PrivateKey
|
||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
|
||||
// test vectors values generated from javascript implementation
|
||||
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
|
||||
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
|
||||
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx := PoolL2Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
Amount: big.NewInt(4),
|
||||
TokenID: 5,
|
||||
Nonce: 6,
|
||||
ToBJJ: sk.Public().Compress(),
|
||||
FromIdx: (1 << 48) - 1,
|
||||
ToIdx: (1 << 48) - 1,
|
||||
Amount: amount,
|
||||
TokenID: (1 << 32) - 1,
|
||||
Nonce: (1 << 40) - 1,
|
||||
Fee: (1 << 3) - 1,
|
||||
ToBJJ: skPositive.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err := tx.TxCompressedData(chainID)
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
// using a different chainID
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(100))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(65535))
|
||||
assert.NoError(t, err)
|
||||
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
|
||||
require.NoError(t, err)
|
||||
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
txCompressedDataV2, err := tx.TxCompressedDataV2()
|
||||
require.NoError(t, err)
|
||||
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
RqFromIdx: 7,
|
||||
RqToIdx: 8,
|
||||
RqAmount: big.NewInt(9),
|
||||
RqTokenID: 10,
|
||||
RqNonce: 11,
|
||||
RqFee: 12,
|
||||
RqToBJJ: sk.Public().Compress(),
|
||||
FromIdx: 0,
|
||||
ToIdx: 0,
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 0,
|
||||
Nonce: 0,
|
||||
Fee: 0,
|
||||
ToBJJ: skNegative.Public().Compress(),
|
||||
}
|
||||
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
|
||||
assert.Equal(t, expectedStr, rqTxCompressedData.String())
|
||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
func TestTxCompressedDataV2(t *testing.T) {
|
||||
var sk babyjub.PrivateKey
|
||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
tx := PoolL2Tx{
|
||||
FromIdx: 7,
|
||||
ToIdx: 8,
|
||||
Amount: big.NewInt(9),
|
||||
TokenID: 10,
|
||||
Nonce: 11,
|
||||
Fee: 12,
|
||||
ToBJJ: sk.Public().Compress(),
|
||||
txCompressedDataV2, err = tx.TxCompressedDataV2()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "0", txCompressedDataV2.String())
|
||||
|
||||
amount, ok = new(big.Int).SetString("63000000000000000", 10)
|
||||
require.True(t, ok)
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 324,
|
||||
ToIdx: 256,
|
||||
Amount: amount,
|
||||
TokenID: 123,
|
||||
Nonce: 76,
|
||||
Fee: 214,
|
||||
ToBJJ: skNegative.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err := tx.TxCompressedDataV2()
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
||||
assert.True(t, ok)
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(1))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
txCompressedDataV2, err = tx.TxCompressedDataV2()
|
||||
require.NoError(t, err)
|
||||
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 1,
|
||||
ToIdx: 2,
|
||||
TokenID: 3,
|
||||
Nonce: 4,
|
||||
Fee: 5,
|
||||
ToBJJ: skNegative.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
|
||||
tx = PoolL2Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
TokenID: 4,
|
||||
Nonce: 5,
|
||||
Fee: 6,
|
||||
ToBJJ: skPositive.Public().Compress(),
|
||||
}
|
||||
txCompressedData, err = tx.TxCompressedData(uint16(0))
|
||||
require.NoError(t, err)
|
||||
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f"
|
||||
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestRqTxCompressedDataV2(t *testing.T) {
|
||||
@@ -113,19 +137,16 @@ func TestRqTxCompressedDataV2(t *testing.T) {
|
||||
txCompressedData, err := tx.RqTxCompressedDataV2()
|
||||
assert.NoError(t, err)
|
||||
// test vector value generated from javascript implementation
|
||||
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
|
||||
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663"
|
||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||
expected, ok := new(big.Int).SetString(expectedStr, 10)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
|
||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
||||
}
|
||||
|
||||
func TestHashToSign(t *testing.T) {
|
||||
chainID := uint16(0)
|
||||
var sk babyjub.PrivateKey
|
||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
tx := PoolL2Tx{
|
||||
FromIdx: 2,
|
||||
ToIdx: 3,
|
||||
@@ -136,7 +157,7 @@ func TestHashToSign(t *testing.T) {
|
||||
}
|
||||
toSign, err := tx.HashToSign(chainID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
|
||||
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
|
||||
}
|
||||
|
||||
func TestVerifyTxSignature(t *testing.T) {
|
||||
@@ -156,7 +177,7 @@ func TestVerifyTxSignature(t *testing.T) {
|
||||
}
|
||||
toSign, err := tx.HashToSign(chainID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
|
||||
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
|
||||
|
||||
sig := sk.SignPoseidon(toSign)
|
||||
tx.Signature = sig.Compress()
|
||||
|
||||
@@ -112,7 +112,7 @@ type ZKInputs struct {
|
||||
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
|
||||
// account (fromIdx==0)
|
||||
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
|
||||
// DepositAmountF encoded as float16
|
||||
// DepositAmountF encoded as float40
|
||||
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
|
||||
// FromEthAddr
|
||||
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
|
||||
|
||||
@@ -90,7 +90,6 @@ type Coordinator struct {
|
||||
} `validate:"required"`
|
||||
ServerProofs []ServerProof `validate:"required"`
|
||||
Circuit struct {
|
||||
// VerifierIdx uint8 `validate:"required"`
|
||||
// MaxTx is the maximum number of txs supported by the circuit
|
||||
MaxTx int64 `validate:"required"`
|
||||
// NLevels is the maximum number of merkle tree levels
|
||||
@@ -132,6 +131,10 @@ type Coordinator struct {
|
||||
// LightScrypt if set, uses light parameters for the ethereum
|
||||
// keystore encryption algorithm.
|
||||
LightScrypt bool
|
||||
// RollupVerifierIndex is the index of the verifier to use in
|
||||
// the Rollup smart contract. The verifier chosen by index
|
||||
// must match with the Circuit parameters.
|
||||
RollupVerifierIndex *int
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,9 +204,14 @@ type Node struct {
|
||||
// UpdateMetricsInterval is the interval between updates of the
|
||||
// API metrics
|
||||
UpdateMetricsInterval Duration
|
||||
// UpdateMetricsInterval is the interval between updates of the
|
||||
// UpdateRecommendedFeeInterval is the interval between updates of the
|
||||
// recommended fees
|
||||
UpdateRecommendedFeeInterval Duration
|
||||
// Maximum concurrent connections allowed between API and SQL
|
||||
MaxSQLConnections int `validate:"required"`
|
||||
// SQLConnectionTimeout is the maximum amount of time that an API request
|
||||
// can wait to stablish a SQL connection
|
||||
SQLConnectionTimeout Duration
|
||||
} `validate:"required"`
|
||||
Debug struct {
|
||||
// APIAddress is the address where the debugAPI will listen if
|
||||
|
||||
@@ -348,13 +348,13 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
|
||||
}
|
||||
|
||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
|
||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.Stop(c.ctx)
|
||||
c.pipeline = nil
|
||||
}
|
||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
|
||||
// TODO: Check that we are in a slot in which we can't forge
|
||||
}
|
||||
|
||||
@@ -97,15 +97,16 @@ func newTestModules(t *testing.T) modules {
|
||||
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
|
||||
require.NoError(t, err)
|
||||
deleteme = append(deleteme, syncDBPath)
|
||||
syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
|
||||
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 48})
|
||||
assert.NoError(t, err)
|
||||
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
test.WipeDB(db)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
|
||||
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -200,15 +200,17 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
p.syncSCVars(statsVars.Vars)
|
||||
case <-time.After(waitDuration):
|
||||
batchNum = p.batchNum + 1
|
||||
if batchInfo, err := p.handleForgeBatch(p.ctx, batchNum); err != nil {
|
||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||
if p.ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
waitDuration = p.cfg.SyncRetryInterval
|
||||
continue
|
||||
} else {
|
||||
p.batchNum = batchNum
|
||||
select {
|
||||
case batchChSentServerProof <- batchInfo:
|
||||
case <-p.ctx.Done():
|
||||
}
|
||||
}
|
||||
p.batchNum = batchNum
|
||||
select {
|
||||
case batchChSentServerProof <- batchInfo:
|
||||
case <-p.ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,19 +21,21 @@ func newL2DB(t *testing.T) *l2db.L2DB {
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
test.WipeDB(db)
|
||||
return l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
}
|
||||
|
||||
func newStateDB(t *testing.T) *statedb.LocalStateDB {
|
||||
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
|
||||
require.NoError(t, err)
|
||||
deleteme = append(deleteme, syncDBPath)
|
||||
syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
|
||||
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 48})
|
||||
assert.NoError(t, err)
|
||||
stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
|
||||
require.NoError(t, err)
|
||||
deleteme = append(deleteme, stateDBPath)
|
||||
stateDB, err := statedb.NewLocalStateDB(stateDBPath, 128, syncStateDB, statedb.TypeTxSelector, 0)
|
||||
stateDB, err := statedb.NewLocalStateDB(statedb.Config{Path: stateDBPath, Keep: 128,
|
||||
Type: statedb.TypeTxSelector, NLevels: 0}, syncStateDB)
|
||||
require.NoError(t, err)
|
||||
return stateDB
|
||||
}
|
||||
|
||||
1026
db/historydb/apiqueries.go
Normal file
1026
db/historydb/apiqueries.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,6 @@
|
||||
package historydb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strings"
|
||||
@@ -11,7 +9,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
//nolint:errcheck // driver for postgres DB
|
||||
@@ -30,12 +27,13 @@ const (
|
||||
|
||||
// HistoryDB persist the historic of the rollup
|
||||
type HistoryDB struct {
|
||||
db *sqlx.DB
|
||||
db *sqlx.DB
|
||||
apiConnCon *db.APIConnectionController
|
||||
}
|
||||
|
||||
// NewHistoryDB initialize the DB
|
||||
func NewHistoryDB(db *sqlx.DB) *HistoryDB {
|
||||
return &HistoryDB{db: db}
|
||||
func NewHistoryDB(db *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
|
||||
return &HistoryDB{db: db, apiConnCon: apiConnCon}
|
||||
}
|
||||
|
||||
// DB returns a pointer to the L2DB.db. This method should be used only for
|
||||
@@ -87,8 +85,8 @@ func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
|
||||
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to
|
||||
func (hdb *HistoryDB) GetBlocks(from, to int64) ([]common.Block, error) {
|
||||
// getBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to
|
||||
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
|
||||
var blocks []*common.Block
|
||||
err := meddler.QueryAll(
|
||||
hdb.db, &blocks,
|
||||
@@ -166,116 +164,6 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBatchAPI return the batch with the given batchNum
|
||||
func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
||||
batch := &BatchAPI{}
|
||||
return batch, tracerr.Wrap(meddler.QueryRow(
|
||||
hdb.db, batch,
|
||||
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
||||
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
||||
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
||||
block.timestamp, block.hash,
|
||||
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs
|
||||
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
||||
WHERE batch_num = $1;`, batchNum,
|
||||
))
|
||||
}
|
||||
|
||||
// GetBatchesAPI return the batches applying the given filters
|
||||
func (hdb *HistoryDB) GetBatchesAPI(
|
||||
minBatchNum, maxBatchNum, slotNum *uint,
|
||||
forgerAddr *ethCommon.Address,
|
||||
fromItem, limit *uint, order string,
|
||||
) ([]BatchAPI, uint64, error) {
|
||||
var query string
|
||||
var args []interface{}
|
||||
queryStr := `SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
|
||||
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
|
||||
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
|
||||
block.timestamp, block.hash,
|
||||
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs,
|
||||
count(*) OVER() AS total_items
|
||||
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
// minBatchNum filter
|
||||
if minBatchNum != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "batch.batch_num > ? "
|
||||
args = append(args, minBatchNum)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// maxBatchNum filter
|
||||
if maxBatchNum != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "batch.batch_num < ? "
|
||||
args = append(args, maxBatchNum)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// slotNum filter
|
||||
if slotNum != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "batch.slot_num = ? "
|
||||
args = append(args, slotNum)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// forgerAddr filter
|
||||
if forgerAddr != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "batch.forger_addr = ? "
|
||||
args = append(args, forgerAddr)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// pagination
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "batch.item_id >= ? "
|
||||
} else {
|
||||
queryStr += "batch.item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
}
|
||||
queryStr += "ORDER BY batch.item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += " ASC "
|
||||
} else {
|
||||
queryStr += " DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query = hdb.db.Rebind(queryStr)
|
||||
// log.Debug(query)
|
||||
batchPtrs := []*BatchAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
|
||||
if len(batches) == 0 {
|
||||
return batches, 0, nil
|
||||
}
|
||||
return batches, batches[0].TotalItems - uint64(len(batches)), nil
|
||||
}
|
||||
|
||||
// GetAllBatches retrieve all batches from the DB
|
||||
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
|
||||
var batches []*common.Batch
|
||||
@@ -375,22 +263,6 @@ func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
|
||||
return db.SlicePtrsToSlice(bids).([]common.Bid), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetBestBidAPI returns the best bid in specific slot by slotNum
|
||||
func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
|
||||
bid := &BidAPI{}
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
|
||||
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
|
||||
INNER JOIN (
|
||||
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
|
||||
GROUP BY bidder_addr
|
||||
) c ON bid.bidder_addr = c.bidder_addr
|
||||
INNER JOIN coordinator ON c.item_id = coordinator.item_id
|
||||
WHERE slot_num = $1 ORDER BY item_id DESC LIMIT 1;`, slotNum,
|
||||
)
|
||||
return *bid, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetBestBidCoordinator returns the forger address of the highest bidder in a slot by slotNum
|
||||
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
|
||||
bidCoord := &common.BidCoordinator{}
|
||||
@@ -416,133 +288,6 @@ func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinat
|
||||
return bidCoord, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetBestBidsAPI returns the best bid in specific slot by slotNum
|
||||
func (hdb *HistoryDB) GetBestBidsAPI(
|
||||
minSlotNum, maxSlotNum *int64,
|
||||
bidderAddr *ethCommon.Address,
|
||||
limit *uint, order string,
|
||||
) ([]BidAPI, uint64, error) {
|
||||
var query string
|
||||
var args []interface{}
|
||||
// JOIN the best bid of each slot with the latest update of each coordinator
|
||||
queryStr := `SELECT b.*, block.timestamp, coordinator.forger_addr, coordinator.url,
|
||||
COUNT(*) OVER() AS total_items FROM (
|
||||
SELECT slot_num, MAX(item_id) as maxitem
|
||||
FROM bid GROUP BY slot_num
|
||||
)
|
||||
AS x INNER JOIN bid AS b ON b.item_id = x.maxitem
|
||||
INNER JOIN block ON b.eth_block_num = block.eth_block_num
|
||||
INNER JOIN (
|
||||
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
|
||||
GROUP BY bidder_addr
|
||||
) c ON b.bidder_addr = c.bidder_addr
|
||||
INNER JOIN coordinator ON c.item_id = coordinator.item_id
|
||||
WHERE (b.slot_num >= ? AND b.slot_num <= ?)`
|
||||
args = append(args, minSlotNum)
|
||||
args = append(args, maxSlotNum)
|
||||
// Apply filters
|
||||
if bidderAddr != nil {
|
||||
queryStr += " AND b.bidder_addr = ? "
|
||||
args = append(args, bidderAddr)
|
||||
}
|
||||
queryStr += " ORDER BY b.slot_num "
|
||||
if order == OrderAsc {
|
||||
queryStr += "ASC "
|
||||
} else {
|
||||
queryStr += "DESC "
|
||||
}
|
||||
if limit != nil {
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
}
|
||||
query = hdb.db.Rebind(queryStr)
|
||||
bidPtrs := []*BidAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &bidPtrs, query, args...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
// log.Debug(query)
|
||||
bids := db.SlicePtrsToSlice(bidPtrs).([]BidAPI)
|
||||
if len(bids) == 0 {
|
||||
return bids, 0, nil
|
||||
}
|
||||
return bids, bids[0].TotalItems - uint64(len(bids)), nil
|
||||
}
|
||||
|
||||
// GetBidsAPI return the bids applying the given filters
|
||||
func (hdb *HistoryDB) GetBidsAPI(
|
||||
slotNum *int64, bidderAddr *ethCommon.Address,
|
||||
fromItem, limit *uint, order string,
|
||||
) ([]BidAPI, uint64, error) {
|
||||
var query string
|
||||
var args []interface{}
|
||||
// JOIN each bid with the latest update of each coordinator
|
||||
queryStr := `SELECT bid.*, block.timestamp, coord.forger_addr, coord.url,
|
||||
COUNT(*) OVER() AS total_items
|
||||
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
|
||||
INNER JOIN (
|
||||
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
|
||||
GROUP BY bidder_addr
|
||||
) c ON bid.bidder_addr = c.bidder_addr
|
||||
INNER JOIN coordinator coord ON c.item_id = coord.item_id `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
// slotNum filter
|
||||
if slotNum != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "bid.slot_num = ? "
|
||||
args = append(args, slotNum)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// bidder filter
|
||||
if bidderAddr != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "bid.bidder_addr = ? "
|
||||
args = append(args, bidderAddr)
|
||||
nextIsAnd = true
|
||||
}
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "bid.item_id >= ? "
|
||||
} else {
|
||||
queryStr += "bid.item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
}
|
||||
// pagination
|
||||
queryStr += "ORDER BY bid.item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += "ASC "
|
||||
} else {
|
||||
queryStr += "DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query, argsQ, err := sqlx.In(queryStr, args...)
|
||||
if err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
query = hdb.db.Rebind(query)
|
||||
bids := []*BidAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &bids, query, argsQ...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
if len(bids) == 0 {
|
||||
return []BidAPI{}, 0, nil
|
||||
}
|
||||
return db.SlicePtrsToSlice(bids).([]BidAPI), bids[0].TotalItems - uint64(len(bids)), nil
|
||||
}
|
||||
|
||||
// AddCoordinators insert Coordinators into the DB
|
||||
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
|
||||
return tracerr.Wrap(hdb.addCoordinators(hdb.db, coordinators))
|
||||
@@ -708,77 +453,6 @@ func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
|
||||
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetTokens returns a list of tokens from the DB
|
||||
func (hdb *HistoryDB) GetTokens(
|
||||
ids []common.TokenID, symbols []string, name string, fromItem,
|
||||
limit *uint, order string,
|
||||
) ([]TokenWithUSD, uint64, error) {
|
||||
var query string
|
||||
var args []interface{}
|
||||
queryStr := `SELECT * , COUNT(*) OVER() AS total_items FROM token `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
if len(ids) > 0 {
|
||||
queryStr += "WHERE token_id IN (?) "
|
||||
nextIsAnd = true
|
||||
args = append(args, ids)
|
||||
}
|
||||
if len(symbols) > 0 {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "symbol IN (?) "
|
||||
args = append(args, symbols)
|
||||
nextIsAnd = true
|
||||
}
|
||||
if name != "" {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "name ~ ? "
|
||||
args = append(args, name)
|
||||
nextIsAnd = true
|
||||
}
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "item_id >= ? "
|
||||
} else {
|
||||
queryStr += "item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
}
|
||||
// pagination
|
||||
queryStr += "ORDER BY item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += "ASC "
|
||||
} else {
|
||||
queryStr += "DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query, argsQ, err := sqlx.In(queryStr, args...)
|
||||
if err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
query = hdb.db.Rebind(query)
|
||||
tokens := []*TokenWithUSD{}
|
||||
if err := meddler.QueryAll(hdb.db, &tokens, query, argsQ...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
if len(tokens) == 0 {
|
||||
return []TokenWithUSD{}, 0, nil
|
||||
}
|
||||
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), uint64(len(tokens)) - tokens[0].TotalItems, nil
|
||||
}
|
||||
|
||||
// GetTokenSymbols returns all the token symbols from the DB
|
||||
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
|
||||
var tokenSymbols []string
|
||||
@@ -951,153 +625,6 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
|
||||
))
|
||||
}
|
||||
|
||||
// GetHistoryTx returns a tx from the DB given a TxID
|
||||
func (hdb *HistoryDB) GetHistoryTx(txID common.TxID) (*TxAPI, error) {
|
||||
// Warning: amount_success and deposit_amount_success have true as default for
|
||||
// performance reasons. The expected default value is false (when txs are unforged)
|
||||
// this case is handled at the function func (tx TxAPI) MarshalJSON() ([]byte, error)
|
||||
tx := &TxAPI{}
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
|
||||
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
|
||||
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
|
||||
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
|
||||
tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin,
|
||||
tx.deposit_amount, tx.deposit_amount_usd, tx.deposit_amount_success, tx.fee, tx.fee_usd, tx.nonce,
|
||||
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
|
||||
token.usd_update, block.timestamp
|
||||
FROM tx INNER JOIN token ON tx.token_id = token.token_id
|
||||
INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
||||
WHERE tx.id = $1;`, txID,
|
||||
)
|
||||
return tx, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct
|
||||
// and pagination info
|
||||
func (hdb *HistoryDB) GetHistoryTxs(
|
||||
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp,
|
||||
tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType,
|
||||
fromItem, limit *uint, order string,
|
||||
) ([]TxAPI, uint64, error) {
|
||||
// Warning: amount_success and deposit_amount_success have true as default for
|
||||
// performance reasons. The expected default value is false (when txs are unforged)
|
||||
// this case is handled at the function func (tx TxAPI) MarshalJSON() ([]byte, error)
|
||||
if ethAddr != nil && bjj != nil {
|
||||
return nil, 0, tracerr.Wrap(errors.New("ethAddr and bjj are incompatible"))
|
||||
}
|
||||
var query string
|
||||
var args []interface{}
|
||||
queryStr := `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
|
||||
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
|
||||
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
|
||||
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
|
||||
tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin,
|
||||
tx.deposit_amount, tx.deposit_amount_usd, tx.deposit_amount_success, tx.fee, tx.fee_usd, tx.nonce,
|
||||
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
|
||||
token.usd_update, block.timestamp, count(*) OVER() AS total_items
|
||||
FROM tx INNER JOIN token ON tx.token_id = token.token_id
|
||||
INNER JOIN block ON tx.eth_block_num = block.eth_block_num `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
// ethAddr filter
|
||||
if ethAddr != nil {
|
||||
queryStr += "WHERE (tx.from_eth_addr = ? OR tx.to_eth_addr = ?) "
|
||||
nextIsAnd = true
|
||||
args = append(args, ethAddr, ethAddr)
|
||||
} else if bjj != nil { // bjj filter
|
||||
queryStr += "WHERE (tx.from_bjj = ? OR tx.to_bjj = ?) "
|
||||
nextIsAnd = true
|
||||
args = append(args, bjj, bjj)
|
||||
}
|
||||
// tokenID filter
|
||||
if tokenID != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx.token_id = ? "
|
||||
args = append(args, tokenID)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// idx filter
|
||||
if idx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "(tx.effective_from_idx = ? OR tx.to_idx = ?) "
|
||||
args = append(args, idx, idx)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// batchNum filter
|
||||
if batchNum != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx.batch_num = ? "
|
||||
args = append(args, batchNum)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// txType filter
|
||||
if txType != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx.type = ? "
|
||||
args = append(args, txType)
|
||||
nextIsAnd = true
|
||||
}
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "tx.item_id >= ? "
|
||||
} else {
|
||||
queryStr += "tx.item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
nextIsAnd = true
|
||||
}
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx.batch_num IS NOT NULL "
|
||||
|
||||
// pagination
|
||||
queryStr += "ORDER BY tx.item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += " ASC "
|
||||
} else {
|
||||
queryStr += " DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query = hdb.db.Rebind(queryStr)
|
||||
// log.Debug(query)
|
||||
txsPtrs := []*TxAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
|
||||
if len(txs) == 0 {
|
||||
return txs, 0, nil
|
||||
}
|
||||
return txs, txs[0].TotalItems - uint64(len(txs)), nil
|
||||
}
|
||||
|
||||
// GetAllExits returns all exit from the DB
|
||||
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
|
||||
var exits []*common.ExitInfo
|
||||
@@ -1110,137 +637,6 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
|
||||
return db.SlicePtrsToSlice(exits).([]common.ExitInfo), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetExitAPI returns a exit from the DB
|
||||
func (hdb *HistoryDB) GetExitAPI(batchNum *uint, idx *common.Idx) (*ExitAPI, error) {
|
||||
exit := &ExitAPI{}
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
|
||||
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
|
||||
account.bjj, account.eth_addr,
|
||||
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
|
||||
exit_tree.delayed_withdraw_request, exit_tree.delayed_withdrawn,
|
||||
token.token_id, token.item_id AS token_item_id,
|
||||
token.eth_block_num AS token_block, token.eth_addr AS token_eth_addr, token.name, token.symbol,
|
||||
token.decimals, token.usd, token.usd_update
|
||||
FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx
|
||||
INNER JOIN token ON account.token_id = token.token_id
|
||||
WHERE exit_tree.batch_num = $1 AND exit_tree.account_idx = $2;`, batchNum, idx,
|
||||
)
|
||||
return exit, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetExitsAPI returns a list of exits from the DB and pagination info
|
||||
func (hdb *HistoryDB) GetExitsAPI(
|
||||
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp, tokenID *common.TokenID,
|
||||
idx *common.Idx, batchNum *uint, onlyPendingWithdraws *bool,
|
||||
fromItem, limit *uint, order string,
|
||||
) ([]ExitAPI, uint64, error) {
|
||||
if ethAddr != nil && bjj != nil {
|
||||
return nil, 0, tracerr.Wrap(errors.New("ethAddr and bjj are incompatible"))
|
||||
}
|
||||
var query string
|
||||
var args []interface{}
|
||||
queryStr := `SELECT exit_tree.item_id, exit_tree.batch_num,
|
||||
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
|
||||
account.bjj, account.eth_addr,
|
||||
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
|
||||
exit_tree.delayed_withdraw_request, exit_tree.delayed_withdrawn,
|
||||
token.token_id, token.item_id AS token_item_id,
|
||||
token.eth_block_num AS token_block, token.eth_addr AS token_eth_addr, token.name, token.symbol,
|
||||
token.decimals, token.usd, token.usd_update, COUNT(*) OVER() AS total_items
|
||||
FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx
|
||||
INNER JOIN token ON account.token_id = token.token_id `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
// ethAddr filter
|
||||
if ethAddr != nil {
|
||||
queryStr += "WHERE account.eth_addr = ? "
|
||||
nextIsAnd = true
|
||||
args = append(args, ethAddr)
|
||||
} else if bjj != nil { // bjj filter
|
||||
queryStr += "WHERE account.bjj = ? "
|
||||
nextIsAnd = true
|
||||
args = append(args, bjj)
|
||||
}
|
||||
// tokenID filter
|
||||
if tokenID != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "account.token_id = ? "
|
||||
args = append(args, tokenID)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// idx filter
|
||||
if idx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "exit_tree.account_idx = ? "
|
||||
args = append(args, idx)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// batchNum filter
|
||||
if batchNum != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "exit_tree.batch_num = ? "
|
||||
args = append(args, batchNum)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// onlyPendingWithdraws
|
||||
if onlyPendingWithdraws != nil {
|
||||
if *onlyPendingWithdraws {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "(exit_tree.instant_withdrawn IS NULL AND exit_tree.delayed_withdrawn IS NULL) "
|
||||
nextIsAnd = true
|
||||
}
|
||||
}
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "exit_tree.item_id >= ? "
|
||||
} else {
|
||||
queryStr += "exit_tree.item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
// nextIsAnd = true
|
||||
}
|
||||
// pagination
|
||||
queryStr += "ORDER BY exit_tree.item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += " ASC "
|
||||
} else {
|
||||
queryStr += " DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query = hdb.db.Rebind(queryStr)
|
||||
// log.Debug(query)
|
||||
exits := []*ExitAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
if len(exits) == 0 {
|
||||
return []ExitAPI{}, 0, nil
|
||||
}
|
||||
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
|
||||
}
|
||||
|
||||
// GetAllL1UserTxs returns all L1UserTxs from the DB
|
||||
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
|
||||
var txs []*common.L1Tx
|
||||
@@ -1381,19 +777,6 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
|
||||
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetBucketUpdates retrieves latest values for each bucket
|
||||
func (hdb *HistoryDB) GetBucketUpdates() ([]BucketUpdateAPI, error) {
|
||||
var bucketUpdates []*BucketUpdateAPI
|
||||
err := meddler.QueryAll(
|
||||
hdb.db, &bucketUpdates,
|
||||
`SELECT num_bucket, withdrawals FROM bucket_update
|
||||
WHERE item_id in(SELECT max(item_id) FROM bucket_update
|
||||
group by num_bucket)
|
||||
ORDER BY num_bucket ASC;`,
|
||||
)
|
||||
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
|
||||
if len(tokenExchanges) == 0 {
|
||||
return nil
|
||||
@@ -1698,274 +1081,22 @@ func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*Coordina
|
||||
return coordinator, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
|
||||
func (hdb *HistoryDB) GetCoordinatorsAPI(
|
||||
bidderAddr, forgerAddr *ethCommon.Address,
|
||||
fromItem, limit *uint, order string,
|
||||
) ([]CoordinatorAPI, uint64, error) {
|
||||
var query string
|
||||
var args []interface{}
|
||||
queryStr := `SELECT coordinator.*, COUNT(*) OVER() AS total_items
|
||||
FROM coordinator INNER JOIN (
|
||||
SELECT MAX(item_id) AS item_id FROM coordinator
|
||||
GROUP BY bidder_addr
|
||||
) c ON coordinator.item_id = c.item_id `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
if bidderAddr != nil {
|
||||
queryStr += "WHERE bidder_addr = ? "
|
||||
nextIsAnd = true
|
||||
args = append(args, bidderAddr)
|
||||
}
|
||||
if forgerAddr != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "forger_addr = ? "
|
||||
nextIsAnd = true
|
||||
args = append(args, forgerAddr)
|
||||
}
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "coordinator.item_id >= ? "
|
||||
} else {
|
||||
queryStr += "coordinator.item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
}
|
||||
// pagination
|
||||
queryStr += "ORDER BY coordinator.item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += " ASC "
|
||||
} else {
|
||||
queryStr += " DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query = hdb.db.Rebind(queryStr)
|
||||
|
||||
coordinators := []*CoordinatorAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &coordinators, query, args...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
if len(coordinators) == 0 {
|
||||
return []CoordinatorAPI{}, 0, nil
|
||||
}
|
||||
return db.SlicePtrsToSlice(coordinators).([]CoordinatorAPI),
|
||||
coordinators[0].TotalItems - uint64(len(coordinators)), nil
|
||||
}
|
||||
|
||||
// AddAuctionVars insert auction vars into the DB
|
||||
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
|
||||
return tracerr.Wrap(meddler.Insert(hdb.db, "auction_vars", auctionVars))
|
||||
}
|
||||
|
||||
// GetAuctionVars returns auction variables
|
||||
func (hdb *HistoryDB) GetAuctionVars() (*common.AuctionVariables, error) {
|
||||
auctionVars := &common.AuctionVariables{}
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, auctionVars, `SELECT * FROM auction_vars;`,
|
||||
)
|
||||
return auctionVars, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetAuctionVarsUntilSetSlotNum returns all the updates of the auction vars
|
||||
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
|
||||
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNum(slotNum int64, maxItems int) ([]MinBidInfo, error) {
|
||||
auctionVars := []*MinBidInfo{}
|
||||
query := `
|
||||
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
|
||||
WHERE default_slot_set_bid_slot_num < $1
|
||||
ORDER BY default_slot_set_bid_slot_num DESC
|
||||
LIMIT $2;
|
||||
`
|
||||
err := meddler.QueryAll(hdb.db, &auctionVars, query, slotNum, maxItems)
|
||||
if err != nil {
|
||||
// GetTokensTest used to get tokens in a testing context
|
||||
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
|
||||
tokens := []*TokenWithUSD{}
|
||||
if err := meddler.QueryAll(
|
||||
hdb.db, &tokens,
|
||||
"SELECT * FROM TOKEN",
|
||||
); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
|
||||
}
|
||||
|
||||
// GetAccountAPI returns an account by its index
|
||||
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
|
||||
account := &AccountAPI{}
|
||||
err := meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx,
|
||||
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
|
||||
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
||||
FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx)
|
||||
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// GetAccountsAPI returns a list of accounts from the DB and pagination info
|
||||
func (hdb *HistoryDB) GetAccountsAPI(
|
||||
tokenIDs []common.TokenID, ethAddr *ethCommon.Address,
|
||||
bjj *babyjub.PublicKeyComp, fromItem, limit *uint, order string,
|
||||
) ([]AccountAPI, uint64, error) {
|
||||
if ethAddr != nil && bjj != nil {
|
||||
return nil, 0, tracerr.Wrap(errors.New("ethAddr and bjj are incompatible"))
|
||||
}
|
||||
var query string
|
||||
var args []interface{}
|
||||
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
|
||||
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
|
||||
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
|
||||
COUNT(*) OVER() AS total_items
|
||||
FROM account INNER JOIN token ON account.token_id = token.token_id `
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
// ethAddr filter
|
||||
if ethAddr != nil {
|
||||
queryStr += "WHERE account.eth_addr = ? "
|
||||
nextIsAnd = true
|
||||
args = append(args, ethAddr)
|
||||
} else if bjj != nil { // bjj filter
|
||||
queryStr += "WHERE account.bjj = ? "
|
||||
nextIsAnd = true
|
||||
args = append(args, bjj)
|
||||
}
|
||||
// tokenID filter
|
||||
if len(tokenIDs) > 0 {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "account.token_id IN (?) "
|
||||
args = append(args, tokenIDs)
|
||||
nextIsAnd = true
|
||||
}
|
||||
if fromItem != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
if order == OrderAsc {
|
||||
queryStr += "account.item_id >= ? "
|
||||
} else {
|
||||
queryStr += "account.item_id <= ? "
|
||||
}
|
||||
args = append(args, fromItem)
|
||||
}
|
||||
// pagination
|
||||
queryStr += "ORDER BY account.item_id "
|
||||
if order == OrderAsc {
|
||||
queryStr += " ASC "
|
||||
} else {
|
||||
queryStr += " DESC "
|
||||
}
|
||||
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||
query, argsQ, err := sqlx.In(queryStr, args...)
|
||||
if err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
query = hdb.db.Rebind(query)
|
||||
|
||||
accounts := []*AccountAPI{}
|
||||
if err := meddler.QueryAll(hdb.db, &accounts, query, argsQ...); err != nil {
|
||||
return nil, 0, tracerr.Wrap(err)
|
||||
}
|
||||
if len(accounts) == 0 {
|
||||
return []AccountAPI{}, 0, nil
|
||||
}
|
||||
|
||||
return db.SlicePtrsToSlice(accounts).([]AccountAPI),
|
||||
accounts[0].TotalItems - uint64(len(accounts)), nil
|
||||
}
|
||||
|
||||
// GetMetrics returns metrics
|
||||
func (hdb *HistoryDB) GetMetrics(lastBatchNum common.BatchNum) (*Metrics, error) {
|
||||
metricsTotals := &MetricsTotals{}
|
||||
metrics := &Metrics{}
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
|
||||
COALESCE (MIN(tx.batch_num), 0) as batch_num, COALESCE (MIN(block.timestamp),
|
||||
NOW()) AS min_timestamp, COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
|
||||
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
||||
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
|
||||
// Avoid dividing by 0
|
||||
if seconds == 0 {
|
||||
seconds++
|
||||
}
|
||||
|
||||
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
|
||||
|
||||
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
|
||||
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
|
||||
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
|
||||
} else {
|
||||
metrics.TransactionsPerBatch = float64(0)
|
||||
}
|
||||
|
||||
err = meddler.QueryRow(
|
||||
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
||||
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
||||
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if metricsTotals.TotalBatches > 0 {
|
||||
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
|
||||
} else {
|
||||
metrics.BatchFrequency = 0
|
||||
}
|
||||
if metricsTotals.TotalTransactions > 0 {
|
||||
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
||||
} else {
|
||||
metrics.AvgTransactionFee = 0
|
||||
}
|
||||
err = meddler.QueryRow(
|
||||
hdb.db, metrics,
|
||||
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// GetAvgTxFee returns average transaction fee of the last 1h
|
||||
func (hdb *HistoryDB) GetAvgTxFee() (float64, error) {
|
||||
metricsTotals := &MetricsTotals{}
|
||||
err := meddler.QueryRow(
|
||||
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
|
||||
COALESCE (MIN(tx.batch_num), 0) as batch_num
|
||||
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
|
||||
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
|
||||
if err != nil {
|
||||
return 0, tracerr.Wrap(err)
|
||||
}
|
||||
err = meddler.QueryRow(
|
||||
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
|
||||
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
|
||||
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
|
||||
if err != nil {
|
||||
return 0, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var avgTransactionFee float64
|
||||
if metricsTotals.TotalTransactions > 0 {
|
||||
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
|
||||
} else {
|
||||
avgTransactionFee = 0
|
||||
}
|
||||
|
||||
return avgTransactionFee, nil
|
||||
if len(tokens) == 0 {
|
||||
return []TokenWithUSD{}, nil
|
||||
}
|
||||
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
)
|
||||
|
||||
var historyDB *HistoryDB
|
||||
var historyDBWithACC *HistoryDB
|
||||
|
||||
// In order to run the test you need to run a Posgres DB with
|
||||
// a database named "history" that is accessible by
|
||||
@@ -38,10 +39,12 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
historyDB = NewHistoryDB(db)
|
||||
historyDB = NewHistoryDB(db, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||
historyDBWithACC = NewHistoryDB(db, apiConnCon)
|
||||
// Run tests
|
||||
result := m.Run()
|
||||
// Close DB
|
||||
@@ -85,7 +88,7 @@ func TestBlocks(t *testing.T) {
|
||||
blocks...,
|
||||
)
|
||||
// Get all blocks from DB
|
||||
fetchedBlocks, err := historyDB.GetBlocks(fromBlock, toBlock)
|
||||
fetchedBlocks, err := historyDB.getBlocks(fromBlock, toBlock)
|
||||
assert.Equal(t, len(blocks), len(fetchedBlocks))
|
||||
// Compare generated vs getted blocks
|
||||
assert.NoError(t, err)
|
||||
@@ -245,9 +248,8 @@ func TestTokens(t *testing.T) {
|
||||
err := historyDB.AddTokens(tokens)
|
||||
assert.NoError(t, err)
|
||||
tokens = append([]common.Token{ethToken}, tokens...)
|
||||
limit := uint(10)
|
||||
// Fetch tokens
|
||||
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
fetchedTokens, err := historyDB.GetTokensTest()
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -267,7 +269,7 @@ func TestTokens(t *testing.T) {
|
||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
||||
}
|
||||
// Fetch tokens
|
||||
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
fetchedTokens, err = historyDB.GetTokensTest()
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -302,9 +304,8 @@ func TestTokensUTF8(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Work with nonUTFTokens as tokens one gets updated and non UTF-8 characters are lost
|
||||
nonUTFTokens = append([]common.Token{ethToken}, nonUTFTokens...)
|
||||
limit := uint(10)
|
||||
// Fetch tokens
|
||||
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
fetchedTokens, err := historyDB.GetTokensTest()
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -324,7 +325,7 @@ func TestTokensUTF8(t *testing.T) {
|
||||
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
|
||||
}
|
||||
// Fetch tokens
|
||||
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
|
||||
fetchedTokens, err = historyDB.GetTokensTest()
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched tokens vs generated tokens
|
||||
// All the tokens should have USDUpdate setted by the DB trigger
|
||||
@@ -610,10 +611,10 @@ func TestTxs(t *testing.T) {
|
||||
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
|
||||
|
||||
// Tx ID
|
||||
assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
|
||||
assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
|
||||
assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
|
||||
assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
|
||||
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String())
|
||||
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String())
|
||||
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String())
|
||||
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String())
|
||||
|
||||
// Tx From and To IDx
|
||||
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
|
||||
@@ -1087,9 +1088,8 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
|
||||
assert.Equal(t, escapeHatchWithdrawals, dbEscapeHatchWithdrawals)
|
||||
}
|
||||
|
||||
func TestGetMetrics(t *testing.T) {
|
||||
func TestGetMetricsAPI(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
@@ -1146,7 +1146,7 @@ func TestGetMetrics(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
res, err := historyDB.GetMetrics(common.BatchNum(numBatches))
|
||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
|
||||
@@ -1165,7 +1165,7 @@ func TestGetMetrics(t *testing.T) {
|
||||
assert.Equal(t, float64(0), res.AvgTransactionFee)
|
||||
}
|
||||
|
||||
func TestGetMetricsMoreThan24Hours(t *testing.T) {
|
||||
func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
testUsersLen := 3
|
||||
@@ -1226,7 +1226,7 @@ func TestGetMetricsMoreThan24Hours(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
res, err := historyDB.GetMetrics(common.BatchNum(numBatches))
|
||||
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
|
||||
@@ -1245,15 +1245,15 @@ func TestGetMetricsMoreThan24Hours(t *testing.T) {
|
||||
assert.Equal(t, float64(0), res.AvgTransactionFee)
|
||||
}
|
||||
|
||||
func TestGetMetricsEmpty(t *testing.T) {
|
||||
func TestGetMetricsAPIEmpty(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
_, err := historyDB.GetMetrics(0)
|
||||
_, err := historyDBWithACC.GetMetricsAPI(0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetAvgTxFeeEmpty(t *testing.T) {
|
||||
test.WipeDB(historyDB.DB())
|
||||
_, err := historyDB.GetAvgTxFee()
|
||||
_, err := historyDBWithACC.GetAvgTxFeeAPI()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
250
db/kvdb/kvdb.go
250
db/kvdb/kvdb.go
@@ -27,6 +27,8 @@ const (
|
||||
// PathLast defines the subpath of the last Batch in the subpath
|
||||
// of the StateDB
|
||||
PathLast = "last"
|
||||
// DefaultKeep is the default value for the Keep parameter
|
||||
DefaultKeep = 128
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,16 +36,18 @@ var (
|
||||
KeyCurrentBatch = []byte("k:currentbatch")
|
||||
// keyCurrentIdx is used as key in the db to store the CurrentIdx
|
||||
keyCurrentIdx = []byte("k:idx")
|
||||
// ErrNoLast is returned when the KVDB has been configured to not have
|
||||
// a Last checkpoint but a Last method is used
|
||||
ErrNoLast = fmt.Errorf("no last checkpoint")
|
||||
)
|
||||
|
||||
// KVDB represents the Key-Value DB object
|
||||
type KVDB struct {
|
||||
path string
|
||||
db *pebble.Storage
|
||||
cfg Config
|
||||
db *pebble.Storage
|
||||
// CurrentIdx holds the current Idx that the BatchBuilder is using
|
||||
CurrentIdx common.Idx
|
||||
CurrentBatch common.BatchNum
|
||||
keep int
|
||||
m sync.Mutex
|
||||
last *Last
|
||||
}
|
||||
@@ -61,13 +65,13 @@ func (k *Last) setNew() error {
|
||||
defer k.rw.Unlock()
|
||||
if k.db != nil {
|
||||
k.db.Close()
|
||||
k.db = nil
|
||||
}
|
||||
lastPath := path.Join(k.path, PathLast)
|
||||
err := os.RemoveAll(lastPath)
|
||||
if err != nil {
|
||||
if err := os.RemoveAll(lastPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
db, err := pebble.NewPebbleStorage(path.Join(k.path, lastPath), false)
|
||||
db, err := pebble.NewPebbleStorage(lastPath, false)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -80,6 +84,7 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
|
||||
defer k.rw.Unlock()
|
||||
if k.db != nil {
|
||||
k.db.Close()
|
||||
k.db = nil
|
||||
}
|
||||
lastPath := path.Join(k.path, PathLast)
|
||||
if err := kvdb.MakeCheckpointFromTo(batchNum, lastPath); err != nil {
|
||||
@@ -96,26 +101,48 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
|
||||
func (k *Last) close() {
|
||||
k.rw.Lock()
|
||||
defer k.rw.Unlock()
|
||||
k.db.Close()
|
||||
if k.db != nil {
|
||||
k.db.Close()
|
||||
k.db = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Config of the KVDB
|
||||
type Config struct {
|
||||
// Path where the checkpoints will be stored
|
||||
Path string
|
||||
// Keep is the number of old checkpoints to keep. If 0, all
|
||||
// checkpoints are kept.
|
||||
Keep int
|
||||
// At every checkpoint, check that there are no gaps between the
|
||||
// checkpoints
|
||||
NoGapsCheck bool
|
||||
// NoLast skips having an opened DB with a checkpoint to the last
|
||||
// batchNum for thread-safe reads.
|
||||
NoLast bool
|
||||
}
|
||||
|
||||
// NewKVDB creates a new KVDB, allowing to use an in-memory or in-disk storage.
|
||||
// Checkpoints older than the value defined by `keep` will be deleted.
|
||||
func NewKVDB(pathDB string, keep int) (*KVDB, error) {
|
||||
// func NewKVDB(pathDB string, keep int) (*KVDB, error) {
|
||||
func NewKVDB(cfg Config) (*KVDB, error) {
|
||||
var sto *pebble.Storage
|
||||
var err error
|
||||
sto, err = pebble.NewPebbleStorage(path.Join(pathDB, PathCurrent), false)
|
||||
sto, err = pebble.NewPebbleStorage(path.Join(cfg.Path, PathCurrent), false)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var last *Last
|
||||
if !cfg.NoLast {
|
||||
last = &Last{
|
||||
path: cfg.Path,
|
||||
}
|
||||
}
|
||||
kvdb := &KVDB{
|
||||
path: pathDB,
|
||||
cfg: cfg,
|
||||
db: sto,
|
||||
keep: keep,
|
||||
last: &Last{
|
||||
path: pathDB,
|
||||
},
|
||||
last: last,
|
||||
}
|
||||
// load currentBatch
|
||||
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
||||
@@ -133,29 +160,32 @@ func NewKVDB(pathDB string, keep int) (*KVDB, error) {
|
||||
}
|
||||
|
||||
// LastRead is a thread-safe method to query the last KVDB
|
||||
func (kvdb *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
|
||||
kvdb.last.rw.RLock()
|
||||
defer kvdb.last.rw.RUnlock()
|
||||
return fn(kvdb.last.db)
|
||||
func (k *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
|
||||
if k.last == nil {
|
||||
return tracerr.Wrap(ErrNoLast)
|
||||
}
|
||||
k.last.rw.RLock()
|
||||
defer k.last.rw.RUnlock()
|
||||
return fn(k.last.db)
|
||||
}
|
||||
|
||||
// DB returns the *pebble.Storage from the KVDB
|
||||
func (kvdb *KVDB) DB() *pebble.Storage {
|
||||
return kvdb.db
|
||||
func (k *KVDB) DB() *pebble.Storage {
|
||||
return k.db
|
||||
}
|
||||
|
||||
// StorageWithPrefix returns the db.Storage with the given prefix from the
|
||||
// current KVDB
|
||||
func (kvdb *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
|
||||
return kvdb.db.WithPrefix(prefix)
|
||||
func (k *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
|
||||
return k.db.WithPrefix(prefix)
|
||||
}
|
||||
|
||||
// Reset resets the KVDB to the checkpoint at the given batchNum. Reset does
|
||||
// not delete the checkpoints between old current and the new current, those
|
||||
// checkpoints will remain in the storage, and eventually will be deleted when
|
||||
// MakeCheckpoint overwrites them.
|
||||
func (kvdb *KVDB) Reset(batchNum common.BatchNum) error {
|
||||
return kvdb.reset(batchNum, true)
|
||||
func (k *KVDB) Reset(batchNum common.BatchNum) error {
|
||||
return k.reset(batchNum, true)
|
||||
}
|
||||
|
||||
// reset resets the KVDB to the checkpoint at the given batchNum. Reset does
|
||||
@@ -163,21 +193,19 @@ func (kvdb *KVDB) Reset(batchNum common.BatchNum) error {
|
||||
// checkpoints will remain in the storage, and eventually will be deleted when
|
||||
// MakeCheckpoint overwrites them. `closeCurrent` will close the currently
|
||||
// opened db before doing the reset.
|
||||
func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
currentPath := path.Join(kvdb.path, PathCurrent)
|
||||
func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
currentPath := path.Join(k.cfg.Path, PathCurrent)
|
||||
|
||||
if closeCurrent {
|
||||
if err := kvdb.db.Pebble().Close(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if closeCurrent && k.db != nil {
|
||||
k.db.Close()
|
||||
k.db = nil
|
||||
}
|
||||
// remove 'current'
|
||||
err := os.RemoveAll(currentPath)
|
||||
if err != nil {
|
||||
if err := os.RemoveAll(currentPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// remove all checkpoints > batchNum
|
||||
list, err := kvdb.ListCheckpoints()
|
||||
list, err := k.ListCheckpoints()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -190,7 +218,7 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
}
|
||||
}
|
||||
for _, bn := range list[start:] {
|
||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
@@ -201,23 +229,27 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
kvdb.db = sto
|
||||
kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
|
||||
kvdb.CurrentBatch = 0
|
||||
if err := kvdb.last.setNew(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
k.db = sto
|
||||
k.CurrentIdx = common.RollupConstReservedIDx // 255
|
||||
k.CurrentBatch = 0
|
||||
if k.last != nil {
|
||||
if err := k.last.setNew(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy 'batchNum' to 'current'
|
||||
if err := kvdb.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
|
||||
if err := k.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// copy 'batchNum' to 'last'
|
||||
if err := kvdb.last.set(kvdb, batchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
if k.last != nil {
|
||||
if err := k.last.set(k, batchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
// open the new 'current'
|
||||
@@ -225,15 +257,15 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
kvdb.db = sto
|
||||
k.db = sto
|
||||
|
||||
// get currentBatch num
|
||||
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
||||
k.CurrentBatch, err = k.GetCurrentBatch()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// idx is obtained from the statedb reset
|
||||
kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
|
||||
k.CurrentIdx, err = k.GetCurrentIdx()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -243,28 +275,28 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
|
||||
// ResetFromSynchronizer performs a reset in the KVDB getting the state from
|
||||
// synchronizerKVDB for the given batchNum.
|
||||
func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
|
||||
func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
|
||||
if synchronizerKVDB == nil {
|
||||
return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil"))
|
||||
}
|
||||
|
||||
currentPath := path.Join(kvdb.path, PathCurrent)
|
||||
if err := kvdb.db.Pebble().Close(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
currentPath := path.Join(k.cfg.Path, PathCurrent)
|
||||
if k.db != nil {
|
||||
k.db.Close()
|
||||
k.db = nil
|
||||
}
|
||||
|
||||
// remove 'current'
|
||||
err := os.RemoveAll(currentPath)
|
||||
if err != nil {
|
||||
if err := os.RemoveAll(currentPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// remove all checkpoints
|
||||
list, err := kvdb.ListCheckpoints()
|
||||
list, err := k.ListCheckpoints()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
for _, bn := range list {
|
||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
@@ -275,14 +307,14 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
kvdb.db = sto
|
||||
kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
|
||||
kvdb.CurrentBatch = 0
|
||||
k.db = sto
|
||||
k.CurrentIdx = common.RollupConstReservedIDx // 255
|
||||
k.CurrentBatch = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
|
||||
// copy synchronizer'BatchNumX' to 'BatchNumX'
|
||||
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
|
||||
@@ -290,7 +322,7 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
||||
}
|
||||
|
||||
// copy 'BatchNumX' to 'current'
|
||||
err = kvdb.MakeCheckpointFromTo(batchNum, currentPath)
|
||||
err = k.MakeCheckpointFromTo(batchNum, currentPath)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -300,15 +332,15 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
kvdb.db = sto
|
||||
k.db = sto
|
||||
|
||||
// get currentBatch num
|
||||
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
|
||||
k.CurrentBatch, err = k.GetCurrentBatch()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// get currentIdx
|
||||
kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
|
||||
k.CurrentIdx, err = k.GetCurrentIdx()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -317,8 +349,8 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
||||
}
|
||||
|
||||
// GetCurrentBatch returns the current BatchNum stored in the KVDB
|
||||
func (kvdb *KVDB) GetCurrentBatch() (common.BatchNum, error) {
|
||||
cbBytes, err := kvdb.db.Get(KeyCurrentBatch)
|
||||
func (k *KVDB) GetCurrentBatch() (common.BatchNum, error) {
|
||||
cbBytes, err := k.db.Get(KeyCurrentBatch)
|
||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -329,12 +361,12 @@ func (kvdb *KVDB) GetCurrentBatch() (common.BatchNum, error) {
|
||||
}
|
||||
|
||||
// setCurrentBatch stores the current BatchNum in the KVDB
|
||||
func (kvdb *KVDB) setCurrentBatch() error {
|
||||
tx, err := kvdb.db.NewTx()
|
||||
func (k *KVDB) setCurrentBatch() error {
|
||||
tx, err := k.db.NewTx()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
err = tx.Put(KeyCurrentBatch, kvdb.CurrentBatch.Bytes())
|
||||
err = tx.Put(KeyCurrentBatch, k.CurrentBatch.Bytes())
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -345,9 +377,9 @@ func (kvdb *KVDB) setCurrentBatch() error {
|
||||
}
|
||||
|
||||
// GetCurrentIdx returns the stored Idx from the KVDB, which is the last Idx
|
||||
// used for an Account in the KVDB.
|
||||
func (kvdb *KVDB) GetCurrentIdx() (common.Idx, error) {
|
||||
idxBytes, err := kvdb.db.Get(keyCurrentIdx)
|
||||
// used for an Account in the k.
|
||||
func (k *KVDB) GetCurrentIdx() (common.Idx, error) {
|
||||
idxBytes, err := k.db.Get(keyCurrentIdx)
|
||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||
return common.RollupConstReservedIDx, nil // 255, nil
|
||||
}
|
||||
@@ -358,10 +390,10 @@ func (kvdb *KVDB) GetCurrentIdx() (common.Idx, error) {
|
||||
}
|
||||
|
||||
// SetCurrentIdx stores Idx in the KVDB
|
||||
func (kvdb *KVDB) SetCurrentIdx(idx common.Idx) error {
|
||||
kvdb.CurrentIdx = idx
|
||||
func (k *KVDB) SetCurrentIdx(idx common.Idx) error {
|
||||
k.CurrentIdx = idx
|
||||
|
||||
tx, err := kvdb.db.NewTx()
|
||||
tx, err := k.db.NewTx()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -381,21 +413,20 @@ func (kvdb *KVDB) SetCurrentIdx(idx common.Idx) error {
|
||||
|
||||
// MakeCheckpoint does a checkpoint at the given batchNum in the defined path.
|
||||
// Internally this advances & stores the current BatchNum, and then stores a
|
||||
// Checkpoint of the current state of the KVDB.
|
||||
func (kvdb *KVDB) MakeCheckpoint() error {
|
||||
// Checkpoint of the current state of the k.
|
||||
func (k *KVDB) MakeCheckpoint() error {
|
||||
// advance currentBatch
|
||||
kvdb.CurrentBatch++
|
||||
k.CurrentBatch++
|
||||
|
||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, kvdb.CurrentBatch))
|
||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, k.CurrentBatch))
|
||||
|
||||
if err := kvdb.setCurrentBatch(); err != nil {
|
||||
if err := k.setCurrentBatch(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// if checkpoint BatchNum already exist in disk, delete it
|
||||
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
|
||||
err := os.RemoveAll(checkpointPath)
|
||||
if err != nil {
|
||||
if err := os.RemoveAll(checkpointPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
@@ -403,15 +434,17 @@ func (kvdb *KVDB) MakeCheckpoint() error {
|
||||
}
|
||||
|
||||
// execute Checkpoint
|
||||
if err := kvdb.db.Pebble().Checkpoint(checkpointPath); err != nil {
|
||||
if err := k.db.Pebble().Checkpoint(checkpointPath); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// copy 'CurrentBatch' to 'last'
|
||||
if err := kvdb.last.set(kvdb, kvdb.CurrentBatch); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
if k.last != nil {
|
||||
if err := k.last.set(k, k.CurrentBatch); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
// delete old checkpoints
|
||||
if err := kvdb.deleteOldCheckpoints(); err != nil {
|
||||
if err := k.deleteOldCheckpoints(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -419,8 +452,8 @@ func (kvdb *KVDB) MakeCheckpoint() error {
|
||||
}
|
||||
|
||||
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
|
||||
func (kvdb *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
|
||||
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
|
||||
@@ -431,8 +464,8 @@ func (kvdb *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
|
||||
|
||||
// ListCheckpoints returns the list of batchNums of the checkpoints, sorted.
|
||||
// If there's a gap between the list of checkpoints, an error is returned.
|
||||
func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
||||
files, err := ioutil.ReadDir(kvdb.path)
|
||||
func (k *KVDB) ListCheckpoints() ([]int, error) {
|
||||
files, err := ioutil.ReadDir(k.cfg.Path)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -449,12 +482,12 @@ func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
||||
}
|
||||
}
|
||||
sort.Ints(checkpoints)
|
||||
if len(checkpoints) > 0 {
|
||||
if !k.cfg.NoGapsCheck && len(checkpoints) > 0 {
|
||||
first := checkpoints[0]
|
||||
for _, checkpoint := range checkpoints[1:] {
|
||||
first++
|
||||
if checkpoint != first {
|
||||
log.Errorw("GAP", "checkpoints", checkpoints)
|
||||
log.Errorw("gap between checkpoints", "checkpoints", checkpoints)
|
||||
return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint))
|
||||
}
|
||||
}
|
||||
@@ -464,14 +497,14 @@ func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
||||
|
||||
// deleteOldCheckpoints deletes old checkpoints when there are more than
|
||||
// `s.keep` checkpoints
|
||||
func (kvdb *KVDB) deleteOldCheckpoints() error {
|
||||
list, err := kvdb.ListCheckpoints()
|
||||
func (k *KVDB) deleteOldCheckpoints() error {
|
||||
list, err := k.ListCheckpoints()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if len(list) > kvdb.keep {
|
||||
for _, checkpoint := range list[:len(list)-kvdb.keep] {
|
||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
|
||||
if k.cfg.Keep > 0 && len(list) > k.cfg.Keep {
|
||||
for _, checkpoint := range list[:len(list)-k.cfg.Keep] {
|
||||
if err := k.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
@@ -482,8 +515,8 @@ func (kvdb *KVDB) deleteOldCheckpoints() error {
|
||||
// MakeCheckpointFromTo makes a checkpoint from the current db at fromBatchNum
|
||||
// to the dest folder. This method is locking, so it can be called from
|
||||
// multiple places at the same time.
|
||||
func (kvdb *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
|
||||
source := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
|
||||
func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
|
||||
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
|
||||
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||
// if kvdb does not have checkpoint at batchNum, return err
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
|
||||
@@ -493,16 +526,15 @@ func (kvdb *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string
|
||||
// synchronizer to do a reset to a batchNum at the same time as the
|
||||
// pipeline is doing a txSelector.Reset and batchBuilder.Reset from
|
||||
// synchronizer to the same batchNum
|
||||
kvdb.m.Lock()
|
||||
defer kvdb.m.Unlock()
|
||||
k.m.Lock()
|
||||
defer k.m.Unlock()
|
||||
return pebbleMakeCheckpoint(source, dest)
|
||||
}
|
||||
|
||||
func pebbleMakeCheckpoint(source, dest string) error {
|
||||
// Remove dest folder (if it exists) before doing the checkpoint
|
||||
if _, err := os.Stat(dest); !os.IsNotExist(err) {
|
||||
err := os.RemoveAll(dest)
|
||||
if err != nil {
|
||||
if err := os.RemoveAll(dest); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
@@ -513,12 +545,7 @@ func pebbleMakeCheckpoint(source, dest string) error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
defer func() {
|
||||
errClose := sto.Pebble().Close()
|
||||
if errClose != nil {
|
||||
log.Errorw("Pebble.Close", "err", errClose)
|
||||
}
|
||||
}()
|
||||
defer sto.Close()
|
||||
|
||||
// execute Checkpoint
|
||||
err = sto.Pebble().Checkpoint(dest)
|
||||
@@ -530,7 +557,12 @@ func pebbleMakeCheckpoint(source, dest string) error {
|
||||
}
|
||||
|
||||
// Close the DB
|
||||
func (kvdb *KVDB) Close() {
|
||||
kvdb.db.Close()
|
||||
kvdb.last.close()
|
||||
func (k *KVDB) Close() {
|
||||
if k.db != nil {
|
||||
k.db.Close()
|
||||
k.db = nil
|
||||
}
|
||||
if k.last != nil {
|
||||
k.last.close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
db, err := NewKVDB(dir, 128)
|
||||
db, err := NewKVDB(Config{Path: dir, Keep: 128})
|
||||
require.NoError(t, err)
|
||||
|
||||
// add test key-values
|
||||
@@ -72,7 +72,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
err = db.Reset(3)
|
||||
require.NoError(t, err)
|
||||
|
||||
printCheckpoints(t, db.path)
|
||||
printCheckpoints(t, db.cfg.Path)
|
||||
|
||||
// check that currentBatch is as expected after Reset
|
||||
cb, err = db.GetCurrentBatch()
|
||||
@@ -99,7 +99,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
dirLocal, err := ioutil.TempDir("", "ldb")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dirLocal))
|
||||
ldb, err := NewKVDB(dirLocal, 128)
|
||||
ldb, err := NewKVDB(Config{Path: dirLocal, Keep: 128})
|
||||
require.NoError(t, err)
|
||||
|
||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||
@@ -120,7 +120,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
||||
ldb2, err := NewKVDB(dirLocal2, 128)
|
||||
ldb2, err := NewKVDB(Config{Path: dirLocal2, Keep: 128})
|
||||
require.NoError(t, err)
|
||||
|
||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||
@@ -139,9 +139,9 @@ func TestCheckpoints(t *testing.T) {
|
||||
|
||||
debug := false
|
||||
if debug {
|
||||
printCheckpoints(t, db.path)
|
||||
printCheckpoints(t, ldb.path)
|
||||
printCheckpoints(t, ldb2.path)
|
||||
printCheckpoints(t, db.cfg.Path)
|
||||
printCheckpoints(t, ldb.cfg.Path)
|
||||
printCheckpoints(t, ldb2.cfg.Path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ func TestListCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
db, err := NewKVDB(dir, 128)
|
||||
db, err := NewKVDB(Config{Path: dir, Keep: 128})
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 16
|
||||
@@ -181,7 +181,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
db, err := NewKVDB(dir, keep)
|
||||
db, err := NewKVDB(Config{Path: dir, Keep: keep})
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 32
|
||||
@@ -202,7 +202,7 @@ func TestGetCurrentIdx(t *testing.T) {
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
db, err := NewKVDB(dir, keep)
|
||||
db, err := NewKVDB(Config{Path: dir, Keep: keep})
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, err := db.GetCurrentIdx()
|
||||
@@ -211,7 +211,7 @@ func TestGetCurrentIdx(t *testing.T) {
|
||||
|
||||
db.Close()
|
||||
|
||||
db, err = NewKVDB(dir, keep)
|
||||
db, err = NewKVDB(Config{Path: dir, Keep: keep})
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, err = db.GetCurrentIdx()
|
||||
@@ -227,7 +227,7 @@ func TestGetCurrentIdx(t *testing.T) {
|
||||
|
||||
db.Close()
|
||||
|
||||
db, err = NewKVDB(dir, keep)
|
||||
db, err = NewKVDB(Config{Path: dir, Keep: keep})
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, err = db.GetCurrentIdx()
|
||||
|
||||
85
db/l2db/apiqueries.go
Normal file
85
db/l2db/apiqueries.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package l2db
|
||||
|
||||
import (
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
|
||||
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
return l2db.AddAccountCreationAuth(auth)
|
||||
}
|
||||
|
||||
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
|
||||
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
auth := new(AccountCreationAuthAPI)
|
||||
return auth, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, auth,
|
||||
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
||||
addr,
|
||||
))
|
||||
}
|
||||
|
||||
// AddTxAPI inserts a tx to the pool
|
||||
func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
row := l2db.db.QueryRow(
|
||||
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
|
||||
common.PoolL2TxStatePending,
|
||||
)
|
||||
var totalTxs uint32
|
||||
if err := row.Scan(&totalTxs); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if totalTxs >= l2db.maxTxs {
|
||||
return tracerr.New(
|
||||
"The pool is at full capacity. More transactions are not accepted currently",
|
||||
)
|
||||
}
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
|
||||
}
|
||||
|
||||
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
||||
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
|
||||
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
|
||||
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
|
||||
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
||||
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
||||
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
||||
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
||||
|
||||
// GetTxAPI return the specified Tx in PoolTxAPI format
|
||||
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
tx := new(PoolTxAPI)
|
||||
return tx, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, tx,
|
||||
selectPoolTxAPI+"WHERE tx_id = $1;",
|
||||
txID,
|
||||
))
|
||||
}
|
||||
@@ -25,17 +25,25 @@ type L2DB struct {
|
||||
safetyPeriod common.BatchNum
|
||||
ttl time.Duration
|
||||
maxTxs uint32 // limit of txs that are accepted in the pool
|
||||
apiConnCon *db.APIConnectionController
|
||||
}
|
||||
|
||||
// NewL2DB creates a L2DB.
|
||||
// To create it, it's needed db connection, safety period expressed in batches,
|
||||
// maxTxs that the DB should have and TTL (time to live) for pending txs.
|
||||
func NewL2DB(db *sqlx.DB, safetyPeriod common.BatchNum, maxTxs uint32, TTL time.Duration) *L2DB {
|
||||
func NewL2DB(
|
||||
db *sqlx.DB,
|
||||
safetyPeriod common.BatchNum,
|
||||
maxTxs uint32,
|
||||
TTL time.Duration,
|
||||
apiConnCon *db.APIConnectionController,
|
||||
) *L2DB {
|
||||
return &L2DB{
|
||||
db: db,
|
||||
safetyPeriod: safetyPeriod,
|
||||
ttl: TTL,
|
||||
maxTxs: maxTxs,
|
||||
apiConnCon: apiConnCon,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +55,6 @@ func (l2db *L2DB) DB() *sqlx.DB {
|
||||
|
||||
// AddAccountCreationAuth inserts an account creation authorization into the DB
|
||||
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
|
||||
// return meddler.Insert(l2db.db, "account_creation_auth", auth)
|
||||
_, err := l2db.db.Exec(
|
||||
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
|
||||
VALUES ($1, $2, $3);`,
|
||||
@@ -66,16 +73,6 @@ func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.Accoun
|
||||
))
|
||||
}
|
||||
|
||||
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
|
||||
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
|
||||
auth := new(AccountCreationAuthAPI)
|
||||
return auth, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, auth,
|
||||
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
|
||||
addr,
|
||||
))
|
||||
}
|
||||
|
||||
// AddTx inserts a tx to the pool
|
||||
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
|
||||
row := l2db.db.QueryRow(
|
||||
@@ -173,16 +170,6 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
|
||||
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
|
||||
}
|
||||
|
||||
// selectPoolTxAPI select part of queries to get PoolL2TxRead
|
||||
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
|
||||
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
|
||||
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
|
||||
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
|
||||
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
|
||||
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
|
||||
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
|
||||
|
||||
// selectPoolTxCommon select part of queries to get common.PoolL2Tx
|
||||
const selectPoolTxCommon = `SELECT tx_pool.tx_id, from_idx, to_idx, tx_pool.to_eth_addr,
|
||||
tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
|
||||
@@ -202,16 +189,6 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
|
||||
))
|
||||
}
|
||||
|
||||
// GetTxAPI return the specified Tx in PoolTxAPI format
|
||||
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
||||
tx := new(PoolTxAPI)
|
||||
return tx, tracerr.Wrap(meddler.QueryRow(
|
||||
l2db.db, tx,
|
||||
selectPoolTxAPI+"WHERE tx_id = $1;",
|
||||
txID,
|
||||
))
|
||||
}
|
||||
|
||||
// GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee
|
||||
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
|
||||
var txs []*common.PoolL2Tx
|
||||
@@ -346,9 +323,10 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
|
||||
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
|
||||
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
|
||||
_, err := l2db.db.Exec(
|
||||
`UPDATE tx_pool SET batch_num = NULL, state = $1
|
||||
WHERE (state = $2 OR state = $3) AND batch_num > $4`,
|
||||
`UPDATE tx_pool SET batch_num = NULL, state = $1
|
||||
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
|
||||
common.PoolL2TxStatePending,
|
||||
common.PoolL2TxStateForging,
|
||||
common.PoolL2TxStateForged,
|
||||
common.PoolL2TxStateInvalid,
|
||||
lastValidBatch,
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
)
|
||||
|
||||
var l2DB *L2DB
|
||||
var l2DBWithACC *L2DB
|
||||
var historyDB *historydb.HistoryDB
|
||||
var tc *til.Context
|
||||
var tokens map[common.TokenID]historydb.TokenWithUSD
|
||||
@@ -34,9 +35,11 @@ func TestMain(m *testing.M) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour)
|
||||
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
|
||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
||||
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
|
||||
test.WipeDB(l2DB.DB())
|
||||
historyDB = historydb.NewHistoryDB(db)
|
||||
historyDB = historydb.NewHistoryDB(db, nil)
|
||||
// Run tests
|
||||
result := m.Run()
|
||||
// Close DB
|
||||
@@ -267,7 +270,7 @@ func TestStartForging(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range startForgingTxIDs {
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -312,7 +315,7 @@ func TestDoneForging(t *testing.T) {
|
||||
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range doneForgingTxIDs {
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -344,7 +347,7 @@ func TestInvalidate(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range invalidTxIDs {
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -385,7 +388,7 @@ func TestInvalidateOldNonces(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
// Fetch txs and check that they've been updated correctly
|
||||
for _, id := range invalidTxIDs {
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
|
||||
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
|
||||
@@ -460,13 +463,13 @@ func TestReorg(t *testing.T) {
|
||||
err = l2DB.Reorg(lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
for _, id := range reorgedTxIDs {
|
||||
tx, err := l2DB.GetTxAPI(id)
|
||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, tx.BatchNum)
|
||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||
}
|
||||
for _, id := range nonReorgedTxIDs {
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||
}
|
||||
@@ -537,13 +540,13 @@ func TestReorg2(t *testing.T) {
|
||||
err = l2DB.Reorg(lastValidBatch)
|
||||
assert.NoError(t, err)
|
||||
for _, id := range reorgedTxIDs {
|
||||
tx, err := l2DB.GetTxAPI(id)
|
||||
tx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, tx.BatchNum)
|
||||
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
|
||||
}
|
||||
for _, id := range nonReorgedTxIDs {
|
||||
fetchedTx, err := l2DB.GetTxAPI(id)
|
||||
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
|
||||
}
|
||||
|
||||
@@ -52,19 +52,40 @@ const (
|
||||
// TypeBatchBuilder defines a StateDB used by the BatchBuilder, that
|
||||
// generates the ExitTree and the ZKInput when processing the txs
|
||||
TypeBatchBuilder = "batchbuilder"
|
||||
// MaxNLevels is the maximum value of NLevels for the merkle tree,
|
||||
// which comes from the fact that AccountIdx has 48 bits.
|
||||
MaxNLevels = 48
|
||||
)
|
||||
|
||||
// TypeStateDB determines the type of StateDB
|
||||
type TypeStateDB string
|
||||
|
||||
// Config of the StateDB
|
||||
type Config struct {
|
||||
// Path where the checkpoints will be stored
|
||||
Path string
|
||||
// Keep is the number of old checkpoints to keep. If 0, all
|
||||
// checkpoints are kept.
|
||||
Keep int
|
||||
// NoLast skips having an opened DB with a checkpoint to the last
|
||||
// batchNum for thread-safe reads.
|
||||
NoLast bool
|
||||
// Type of StateDB (
|
||||
Type TypeStateDB
|
||||
// NLevels is the number of merkle tree levels in case the Type uses a
|
||||
// merkle tree. If the Type doesn't use a merkle tree, NLevels should
|
||||
// be 0.
|
||||
NLevels int
|
||||
// At every checkpoint, check that there are no gaps between the
|
||||
// checkpoints
|
||||
noGapsCheck bool
|
||||
}
|
||||
|
||||
// StateDB represents the StateDB object
|
||||
type StateDB struct {
|
||||
path string
|
||||
Typ TypeStateDB
|
||||
db *kvdb.KVDB
|
||||
nLevels int
|
||||
MT *merkletree.MerkleTree
|
||||
keep int
|
||||
cfg Config
|
||||
db *kvdb.KVDB
|
||||
MT *merkletree.MerkleTree
|
||||
}
|
||||
|
||||
// Last offers a subset of view methods of the StateDB that can be
|
||||
@@ -104,36 +125,40 @@ func (s *Last) GetAccounts() ([]common.Account, error) {
|
||||
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
|
||||
// storage. Checkpoints older than the value defined by `keep` will be
|
||||
// deleted.
|
||||
func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
||||
// func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
||||
func NewStateDB(cfg Config) (*StateDB, error) {
|
||||
var kv *kvdb.KVDB
|
||||
var err error
|
||||
|
||||
kv, err = kvdb.NewKVDB(pathDB, keep)
|
||||
kv, err = kvdb.NewKVDB(kvdb.Config{Path: cfg.Path, Keep: cfg.Keep,
|
||||
NoGapsCheck: cfg.noGapsCheck, NoLast: cfg.NoLast})
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
var mt *merkletree.MerkleTree = nil
|
||||
if typ == TypeSynchronizer || typ == TypeBatchBuilder {
|
||||
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), nLevels)
|
||||
if cfg.Type == TypeSynchronizer || cfg.Type == TypeBatchBuilder {
|
||||
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), cfg.NLevels)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
if typ == TypeTxSelector && nLevels != 0 {
|
||||
if cfg.Type == TypeTxSelector && cfg.NLevels != 0 {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
|
||||
}
|
||||
|
||||
return &StateDB{
|
||||
path: pathDB,
|
||||
db: kv,
|
||||
nLevels: nLevels,
|
||||
MT: mt,
|
||||
Typ: typ,
|
||||
keep: keep,
|
||||
cfg: cfg,
|
||||
db: kv,
|
||||
MT: mt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Type returns the StateDB configured Type
|
||||
func (s *StateDB) Type() TypeStateDB {
|
||||
return s.cfg.Type
|
||||
}
|
||||
|
||||
// LastRead is a thread-safe method to query the last checkpoint of the StateDB
|
||||
// via the Last type methods
|
||||
func (s *StateDB) LastRead(fn func(sdbLast *Last) error) error {
|
||||
@@ -179,7 +204,7 @@ func (s *StateDB) LastGetCurrentBatch() (common.BatchNum, error) {
|
||||
func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
|
||||
var root *big.Int
|
||||
if err := s.LastRead(func(sdb *Last) error {
|
||||
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.nLevels)
|
||||
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.cfg.NLevels)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -195,7 +220,7 @@ func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
|
||||
// Internally this advances & stores the current BatchNum, and then stores a
|
||||
// Checkpoint of the current state of the StateDB.
|
||||
func (s *StateDB) MakeCheckpoint() error {
|
||||
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.Typ)
|
||||
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.cfg.Type)
|
||||
return s.db.MakeCheckpoint()
|
||||
}
|
||||
|
||||
@@ -230,8 +255,8 @@ func (s *StateDB) SetCurrentIdx(idx common.Idx) error {
|
||||
// those checkpoints will remain in the storage, and eventually will be
|
||||
// deleted when MakeCheckpoint overwrites them.
|
||||
func (s *StateDB) Reset(batchNum common.BatchNum) error {
|
||||
err := s.db.Reset(batchNum)
|
||||
if err != nil {
|
||||
log.Debugw("Making StateDB Reset", "batch", batchNum, "type", s.cfg.Type)
|
||||
if err := s.db.Reset(batchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if s.MT != nil {
|
||||
@@ -242,7 +267,6 @@ func (s *StateDB) Reset(batchNum common.BatchNum) error {
|
||||
}
|
||||
s.MT = mt
|
||||
}
|
||||
log.Debugw("Making StateDB Reset", "batch", batchNum)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -461,9 +485,10 @@ type LocalStateDB struct {
|
||||
// NewLocalStateDB returns a new LocalStateDB connected to the given
|
||||
// synchronizerDB. Checkpoints older than the value defined by `keep` will be
|
||||
// deleted.
|
||||
func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeStateDB,
|
||||
nLevels int) (*LocalStateDB, error) {
|
||||
s, err := NewStateDB(path, keep, typ, nLevels)
|
||||
func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error) {
|
||||
cfg.noGapsCheck = true
|
||||
cfg.NoLast = true
|
||||
s, err := NewStateDB(cfg)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -478,13 +503,13 @@ func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeSta
|
||||
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
if fromSynchronizer {
|
||||
err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db)
|
||||
if err != nil {
|
||||
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// open the MT for the current s.db
|
||||
if l.MT != nil {
|
||||
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT), l.MT.MaxLevels())
|
||||
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT),
|
||||
l.MT.MaxLevels())
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
// test values
|
||||
@@ -78,7 +78,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
|
||||
// call NewStateDB which should get the db at the last checkpoint state
|
||||
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
|
||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
v, err = sdb.db.DB().Get(k0)
|
||||
assert.NotNil(t, err)
|
||||
@@ -116,7 +116,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
bn, err := sdb.getCurrentBatch()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(0), bn)
|
||||
err = sdb.db.MakeCheckpoint()
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
bn, err = sdb.getCurrentBatch()
|
||||
require.NoError(t, err)
|
||||
@@ -158,7 +158,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
|
||||
// call NewStateDB which should get the db at the last checkpoint state
|
||||
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
|
||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
bn, err = sdb.getCurrentBatch()
|
||||
@@ -182,7 +182,7 @@ func TestStateDBWithoutMT(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -236,7 +236,7 @@ func TestStateDBWithMT(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -290,7 +290,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sdb.Reset(0)
|
||||
@@ -335,7 +335,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
assert.Equal(t, common.BatchNum(i+1), cb)
|
||||
}
|
||||
|
||||
// printCheckpoints(t, sdb.path)
|
||||
// printCheckpoints(t, sdb.cfg.Path)
|
||||
|
||||
// reset checkpoint
|
||||
err = sdb.Reset(3)
|
||||
@@ -371,7 +371,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
dirLocal, err := ioutil.TempDir("", "ldb")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dirLocal))
|
||||
ldb, err := NewLocalStateDB(dirLocal, 128, sdb, TypeBatchBuilder, 32)
|
||||
ldb, err := NewLocalStateDB(Config{Path: dirLocal, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||
@@ -392,28 +392,26 @@ func TestCheckpoints(t *testing.T) {
|
||||
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
||||
ldb2, err := NewLocalStateDB(dirLocal2, 128, sdb, TypeBatchBuilder, 32)
|
||||
ldb2, err := NewLocalStateDB(Config{Path: dirLocal2, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||
err = ldb2.Reset(4, true)
|
||||
require.NoError(t, err)
|
||||
// check that currentBatch is 4 after the Reset
|
||||
cb, err = ldb2.db.GetCurrentBatch()
|
||||
require.NoError(t, err)
|
||||
cb = ldb2.CurrentBatch()
|
||||
assert.Equal(t, common.BatchNum(4), cb)
|
||||
// advance one checkpoint in ldb2
|
||||
err = ldb2.db.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
cb, err = ldb2.db.GetCurrentBatch()
|
||||
err = ldb2.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
cb = ldb2.CurrentBatch()
|
||||
assert.Equal(t, common.BatchNum(5), cb)
|
||||
|
||||
debug := false
|
||||
if debug {
|
||||
printCheckpoints(t, sdb.path)
|
||||
printCheckpoints(t, ldb.path)
|
||||
printCheckpoints(t, ldb2.path)
|
||||
printCheckpoints(t, sdb.cfg.Path)
|
||||
printCheckpoints(t, ldb.cfg.Path)
|
||||
printCheckpoints(t, ldb2.cfg.Path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,7 +419,7 @@ func TestStateDBGetAccounts(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -468,7 +466,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
|
||||
@@ -542,7 +540,7 @@ func TestListCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 16
|
||||
@@ -575,7 +573,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 32
|
||||
@@ -596,7 +594,7 @@ func TestCurrentIdx(t *testing.T) {
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
idx := sdb.CurrentIdx()
|
||||
@@ -604,7 +602,7 @@ func TestCurrentIdx(t *testing.T) {
|
||||
|
||||
sdb.Close()
|
||||
|
||||
sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
|
||||
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
idx = sdb.CurrentIdx()
|
||||
@@ -618,9 +616,30 @@ func TestCurrentIdx(t *testing.T) {
|
||||
|
||||
sdb.Close()
|
||||
|
||||
sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
|
||||
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
idx = sdb.CurrentIdx()
|
||||
assert.Equal(t, common.Idx(255), idx)
|
||||
}
|
||||
|
||||
func TestResetFromBadCheckpoint(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.NoError(t, err)
|
||||
|
||||
// reset from a checkpoint that doesn't exist
|
||||
err = sdb.Reset(10)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestGetIdx(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var sk babyjub.PrivateKey
|
||||
|
||||
29
db/utils.go
29
db/utils.go
@@ -1,16 +1,19 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gobuffalo/packr/v2"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/marusama/semaphore/v2"
|
||||
migrate "github.com/rubenv/sql-migrate"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
@@ -84,6 +87,32 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// APIConnectionController is used to limit the SQL open connections used by the API
|
||||
type APIConnectionController struct {
|
||||
smphr semaphore.Semaphore
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewAPICnnectionController initialize APIConnectionController
|
||||
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
||||
return &APIConnectionController{
|
||||
smphr: semaphore.New(maxConnections),
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire reserves a SQL connection. If the connection is not acquired
|
||||
// within the timeout, the function will return an error
|
||||
func (acc *APIConnectionController) Acquire() (context.CancelFunc, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), acc.timeout) //nolint:govet
|
||||
return cancel, acc.smphr.Acquire(ctx, 1)
|
||||
}
|
||||
|
||||
// Release frees a SQL connection
|
||||
func (acc *APIConnectionController) Release() {
|
||||
acc.smphr.Release(1)
|
||||
}
|
||||
|
||||
// initMeddler registers tags to be used to read/write from SQL DBs using meddler
|
||||
func initMeddler() {
|
||||
meddler.Register("bigint", BigIntMeddler{})
|
||||
|
||||
@@ -254,7 +254,7 @@ type AuctionInterface interface {
|
||||
//
|
||||
|
||||
AuctionConstants() (*common.AuctionConstants, error)
|
||||
AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error)
|
||||
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error)
|
||||
AuctionEventInit() (*AuctionEventInitialize, int64, error)
|
||||
}
|
||||
|
||||
@@ -797,15 +797,22 @@ func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, erro
|
||||
}
|
||||
|
||||
// AuctionEventsByBlock returns the events in a block that happened in the
|
||||
// Auction Smart Contract and the blockHash where the eents happened. If there
|
||||
// are no events in that block, blockHash is nil.
|
||||
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error) {
|
||||
// Auction Smart Contract.
|
||||
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
|
||||
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
|
||||
// If there are no events in that block the result is nil.
|
||||
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
|
||||
blockHash *ethCommon.Hash) (*AuctionEvents, error) {
|
||||
var auctionEvents AuctionEvents
|
||||
var blockHash *ethCommon.Hash
|
||||
|
||||
var blockNumBigInt *big.Int
|
||||
if blockHash == nil {
|
||||
blockNumBigInt = big.NewInt(blockNum)
|
||||
}
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(blockNum),
|
||||
ToBlock: big.NewInt(blockNum),
|
||||
BlockHash: blockHash,
|
||||
FromBlock: blockNumBigInt,
|
||||
ToBlock: blockNumBigInt,
|
||||
Addresses: []ethCommon.Address{
|
||||
c.address,
|
||||
},
|
||||
@@ -814,15 +821,16 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
|
||||
logs, err := c.client.client.FilterLogs(context.TODO(), query)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if len(logs) > 0 {
|
||||
blockHash = &logs[0].BlockHash
|
||||
if len(logs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, vLog := range logs {
|
||||
if vLog.BlockHash != *blockHash {
|
||||
if blockHash != nil && vLog.BlockHash != *blockHash {
|
||||
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
||||
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||
}
|
||||
switch vLog.Topics[0] {
|
||||
case logAuctionNewBid:
|
||||
@@ -833,7 +841,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
}
|
||||
var newBid AuctionEventNewBid
|
||||
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
newBid.BidAmount = auxNewBid.BidAmount
|
||||
newBid.Slot = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
||||
@@ -842,19 +850,19 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
case logAuctionNewSlotDeadline:
|
||||
var newSlotDeadline AuctionEventNewSlotDeadline
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, "NewSlotDeadline", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline)
|
||||
case logAuctionNewClosedAuctionSlots:
|
||||
var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, "NewClosedAuctionSlots", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auctionEvents.NewClosedAuctionSlots = append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
|
||||
case logAuctionNewOutbidding:
|
||||
var newOutbidding AuctionEventNewOutbidding
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding)
|
||||
case logAuctionNewDonationAddress:
|
||||
@@ -864,26 +872,26 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
case logAuctionNewBootCoordinator:
|
||||
var newBootCoordinator AuctionEventNewBootCoordinator
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, "NewBootCoordinator", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, newBootCoordinator)
|
||||
case logAuctionNewOpenAuctionSlots:
|
||||
var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, "NewOpenAuctionSlots", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auctionEvents.NewOpenAuctionSlots = append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
|
||||
case logAuctionNewAllocationRatio:
|
||||
var newAllocationRatio AuctionEventNewAllocationRatio
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, "NewAllocationRatio", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, newAllocationRatio)
|
||||
case logAuctionSetCoordinator:
|
||||
var setCoordinator AuctionEventSetCoordinator
|
||||
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, "SetCoordinator", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
setCoordinator.ForgerAddress = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||
@@ -891,7 +899,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
case logAuctionNewForgeAllocated:
|
||||
var newForgeAllocated AuctionEventNewForgeAllocated
|
||||
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, "NewForgeAllocated", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||
@@ -904,7 +912,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
}
|
||||
var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid
|
||||
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, "NewDefaultSlotSetBid", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid
|
||||
newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64()
|
||||
@@ -917,11 +925,11 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *e
|
||||
case logAuctionHEZClaimed:
|
||||
var HEZClaimed AuctionEventHEZClaimed
|
||||
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", vLog.Data); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
auctionEvents.HEZClaimed = append(auctionEvents.HEZClaimed, HEZClaimed)
|
||||
}
|
||||
}
|
||||
return &auctionEvents, blockHash, nil
|
||||
return &auctionEvents, nil
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestAuctionSetSlotDeadline(t *testing.T) {
|
||||
assert.Equal(t, newSlotDeadline, slotDeadline)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline)
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func TestAuctionSetOpenAuctionSlots(t *testing.T) {
|
||||
assert.Equal(t, newOpenAuctionSlots, openAuctionSlots)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots)
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func TestAuctionSetClosedAuctionSlots(t *testing.T) {
|
||||
assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
|
||||
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
|
||||
@@ -153,7 +153,7 @@ func TestAuctionSetOutbidding(t *testing.T) {
|
||||
assert.Equal(t, newOutbidding, outbidding)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding)
|
||||
_, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst)
|
||||
@@ -176,7 +176,7 @@ func TestAuctionSetAllocationRatio(t *testing.T) {
|
||||
assert.Equal(t, newAllocationRatio, allocationRatio)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio)
|
||||
_, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst)
|
||||
@@ -205,7 +205,7 @@ func TestAuctionSetDonationAddress(t *testing.T) {
|
||||
assert.Equal(t, &newDonationAddress, donationAddress)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress)
|
||||
_, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst)
|
||||
@@ -224,7 +224,7 @@ func TestAuctionSetBootCoordinator(t *testing.T) {
|
||||
assert.Equal(t, &newBootCoordinator, bootCoordinator)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
|
||||
assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL)
|
||||
@@ -261,7 +261,7 @@ func TestAuctionChangeDefaultSlotSetBid(t *testing.T) {
|
||||
assert.Equal(t, minBid, newInitialMinBid)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet)
|
||||
assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid)
|
||||
@@ -287,7 +287,7 @@ func TestAuctionRegisterCoordinator(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress)
|
||||
assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress)
|
||||
@@ -306,7 +306,7 @@ func TestAuctionBid(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount)
|
||||
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
|
||||
@@ -346,7 +346,7 @@ func TestAuctionMultiBid(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
|
||||
assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot)
|
||||
@@ -376,7 +376,7 @@ func TestAuctionClaimHEZ(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
|
||||
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount)
|
||||
assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner)
|
||||
|
||||
@@ -264,7 +264,7 @@ type RollupInterface interface {
|
||||
//
|
||||
|
||||
RollupConstants() (*common.RollupConstants, error)
|
||||
RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error)
|
||||
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error)
|
||||
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
|
||||
RollupEventInit() (*RollupEventInitialize, int64, error)
|
||||
}
|
||||
@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
|
||||
}
|
||||
fromIdxBig := big.NewInt(fromIdx)
|
||||
toIdxBig := big.NewInt(toIdx)
|
||||
depositAmountF, err := common.NewFloat16(depositAmount)
|
||||
depositAmountF, err := common.NewFloat40(depositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountF, err := common.NewFloat16(amount)
|
||||
amountF, err := common.NewFloat40(amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
|
||||
}
|
||||
fromIdxBig := big.NewInt(fromIdx)
|
||||
toIdxBig := big.NewInt(toIdx)
|
||||
depositAmountF, err := common.NewFloat16(depositAmount)
|
||||
depositAmountF, err := common.NewFloat40(depositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
amountF, err := common.NewFloat16(amount)
|
||||
amountF, err := common.NewFloat40(amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -735,31 +735,40 @@ func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error)
|
||||
return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
|
||||
func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error) {
|
||||
// RollupEventsByBlock returns the events in a block that happened in the
|
||||
// Rollup Smart Contract.
|
||||
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
|
||||
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
|
||||
// If there are no events in that block the result is nil.
|
||||
func (c *RollupClient) RollupEventsByBlock(blockNum int64,
|
||||
blockHash *ethCommon.Hash) (*RollupEvents, error) {
|
||||
var rollupEvents RollupEvents
|
||||
var blockHash *ethCommon.Hash
|
||||
|
||||
var blockNumBigInt *big.Int
|
||||
if blockHash == nil {
|
||||
blockNumBigInt = big.NewInt(blockNum)
|
||||
}
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(blockNum),
|
||||
ToBlock: big.NewInt(blockNum),
|
||||
BlockHash: blockHash,
|
||||
FromBlock: blockNumBigInt,
|
||||
ToBlock: blockNumBigInt,
|
||||
Addresses: []ethCommon.Address{
|
||||
c.address,
|
||||
},
|
||||
BlockHash: nil,
|
||||
Topics: [][]ethCommon.Hash{},
|
||||
Topics: [][]ethCommon.Hash{},
|
||||
}
|
||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if len(logs) > 0 {
|
||||
blockHash = &logs[0].BlockHash
|
||||
if len(logs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, vLog := range logs {
|
||||
if vLog.BlockHash != *blockHash {
|
||||
if blockHash != nil && vLog.BlockHash != *blockHash {
|
||||
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
||||
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||
}
|
||||
switch vLog.Topics[0] {
|
||||
case logHermezL1UserTxEvent:
|
||||
@@ -767,11 +776,11 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var L1UserTx RollupEventL1UserTx
|
||||
err := c.contractAbi.UnpackIntoInterface(&L1UserTxAux, "L1UserTxEvent", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
L1Tx, err := common.L1UserTxFromBytes(L1UserTxAux.L1UserTx)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
toForgeL1TxsNum := new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
||||
L1Tx.ToForgeL1TxsNum = &toForgeL1TxsNum
|
||||
@@ -783,7 +792,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var addToken RollupEventAddToken
|
||||
err := c.contractAbi.UnpackIntoInterface(&addToken, "AddToken", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
addToken.TokenAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
rollupEvents.AddToken = append(rollupEvents.AddToken, addToken)
|
||||
@@ -791,7 +800,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var forgeBatch RollupEventForgeBatch
|
||||
err := c.contractAbi.UnpackIntoInterface(&forgeBatch, "ForgeBatch", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
forgeBatch.BatchNum = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
|
||||
forgeBatch.EthTxHash = vLog.TxHash
|
||||
@@ -803,7 +812,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
}
|
||||
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, "UpdateForgeL1L2BatchTimeout", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
rollupEvents.UpdateForgeL1L2BatchTimeout = append(rollupEvents.UpdateForgeL1L2BatchTimeout,
|
||||
RollupEventUpdateForgeL1L2BatchTimeout{
|
||||
@@ -813,7 +822,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var updateFeeAddToken RollupEventUpdateFeeAddToken
|
||||
err := c.contractAbi.UnpackIntoInterface(&updateFeeAddToken, "UpdateFeeAddToken", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
rollupEvents.UpdateFeeAddToken = append(rollupEvents.UpdateFeeAddToken, updateFeeAddToken)
|
||||
case logHermezWithdrawEvent:
|
||||
@@ -831,7 +840,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var updateBucketWithdraw RollupEventUpdateBucketWithdraw
|
||||
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, "UpdateBucketWithdraw", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals
|
||||
updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64())
|
||||
@@ -842,7 +851,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var withdrawalDelay RollupEventUpdateWithdrawalDelay
|
||||
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "UpdateWithdrawalDelay", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
rollupEvents.UpdateWithdrawalDelay = append(rollupEvents.UpdateWithdrawalDelay, withdrawalDelay)
|
||||
case logHermezUpdateBucketsParameters:
|
||||
@@ -850,7 +859,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var bucketsParameters RollupEventUpdateBucketsParameters
|
||||
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, "UpdateBucketsParameters", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
for i, bucket := range bucketsParametersAux.ArrayBuckets {
|
||||
bucketsParameters.ArrayBuckets[i].CeilUSD = bucket[0]
|
||||
@@ -863,7 +872,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
var tokensExchange RollupEventUpdateTokenExchange
|
||||
err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
rollupEvents.UpdateTokenExchange = append(rollupEvents.UpdateTokenExchange, tokensExchange)
|
||||
case logHermezSafeMode:
|
||||
@@ -885,7 +894,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
bucketsParameters)
|
||||
}
|
||||
}
|
||||
return &rollupEvents, blockHash, nil
|
||||
return &rollupEvents, nil
|
||||
}
|
||||
|
||||
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the
|
||||
@@ -893,7 +902,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethC
|
||||
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
|
||||
tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("TransactionByHash: %w", err))
|
||||
}
|
||||
txData := tx.Data()
|
||||
|
||||
@@ -932,7 +941,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
||||
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
|
||||
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
|
||||
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
|
||||
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
|
||||
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
|
||||
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
|
||||
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
|
||||
l1UserTxsData := []byte{}
|
||||
@@ -959,7 +968,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
||||
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
|
||||
}
|
||||
for i := 0; i < numTxsL1Coord; i++ {
|
||||
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
|
||||
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
|
||||
var signature []byte
|
||||
v := bytesL1Coordinator[0]
|
||||
s := bytesL1Coordinator[1:33]
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestRollupAddToken(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress)
|
||||
@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
|
||||
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
|
||||
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
|
||||
require.NoError(t, err)
|
||||
numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
|
||||
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
|
||||
for i := 0; i < numTxsL1; i++ {
|
||||
bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
|
||||
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
|
||||
var signature []byte
|
||||
v := bytesL1Coordinator[0]
|
||||
s := bytesL1Coordinator[1:33]
|
||||
@@ -174,7 +174,7 @@ func TestRollupForgeBatch(t *testing.T) {
|
||||
|
||||
currentBlockNum, err = rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum)
|
||||
@@ -203,7 +203,7 @@ func TestRollupUpdateForgeL1L2BatchTimeout(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
|
||||
@@ -216,7 +216,7 @@ func TestRollupUpdateFeeAddToken(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken)
|
||||
@@ -235,7 +235,7 @@ func TestRollupUpdateBucketsParameters(t *testing.T) {
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
blockStampBucket = currentBlockNum
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, bucketsParameters, rollupEvents.UpdateBucketsParameters[0].ArrayBuckets)
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func TestRollupUpdateWithdrawalDelay(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newWithdrawalDelay, int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
|
||||
}
|
||||
@@ -263,7 +263,7 @@ func TestRollupUpdateTokenExchange(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, addressArray, rollupEvents.UpdateTokenExchange[0].AddressArray)
|
||||
assert.Equal(t, valueArray, rollupEvents.UpdateTokenExchange[0].ValueArray)
|
||||
@@ -292,7 +292,7 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
@@ -324,7 +324,7 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
@@ -356,7 +356,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
@@ -388,7 +388,7 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -418,7 +418,7 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -447,7 +447,7 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -478,7 +478,7 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -508,7 +508,7 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -538,7 +538,7 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -569,7 +569,7 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -599,7 +599,7 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -629,7 +629,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -659,7 +659,7 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -688,7 +688,7 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -717,7 +717,7 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -747,7 +747,7 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -776,7 +776,7 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -807,7 +807,7 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
|
||||
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
|
||||
@@ -822,7 +822,7 @@ func TestRollupForgeBatch2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum)
|
||||
@@ -876,7 +876,7 @@ func TestRollupForgeBatch2(t *testing.T) {
|
||||
|
||||
currentBlockNum, err = rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err = rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err = rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(3), rollupEvents.ForgeBatch[0].BatchNum)
|
||||
@@ -928,7 +928,7 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx)
|
||||
@@ -951,7 +951,7 @@ func TestRollupSafeMode(t *testing.T) {
|
||||
|
||||
currentBlockNum, err := rollupClient.client.EthLastBlock()
|
||||
require.NoError(t, err)
|
||||
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
|
||||
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
|
||||
require.NoError(t, err)
|
||||
auxEvent := new(RollupEventSafeMode)
|
||||
assert.Equal(t, auxEvent, &rollupEvents.SafeMode[0])
|
||||
|
||||
@@ -134,7 +134,7 @@ type WDelayerInterface interface {
|
||||
WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error)
|
||||
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
|
||||
|
||||
WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error)
|
||||
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
|
||||
WDelayerConstants() (*common.WDelayerConstants, error)
|
||||
WDelayerEventInit() (*WDelayerEventInitialize, int64, error)
|
||||
}
|
||||
@@ -424,40 +424,47 @@ func (c *WDelayerClient) WDelayerEventInit() (*WDelayerEventInitialize, int64, e
|
||||
}
|
||||
|
||||
// WDelayerEventsByBlock returns the events in a block that happened in the
|
||||
// WDelayer Smart Contract and the blockHash where the eents happened. If
|
||||
// there are no events in that block, blockHash is nil.
|
||||
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error) {
|
||||
// WDelayer Smart Contract.
|
||||
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
|
||||
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
|
||||
// If there are no events in that block the result is nil.
|
||||
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
|
||||
blockHash *ethCommon.Hash) (*WDelayerEvents, error) {
|
||||
var wdelayerEvents WDelayerEvents
|
||||
var blockHash *ethCommon.Hash
|
||||
|
||||
var blockNumBigInt *big.Int
|
||||
if blockHash == nil {
|
||||
blockNumBigInt = big.NewInt(blockNum)
|
||||
}
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(blockNum),
|
||||
ToBlock: big.NewInt(blockNum),
|
||||
BlockHash: blockHash,
|
||||
FromBlock: blockNumBigInt,
|
||||
ToBlock: blockNumBigInt,
|
||||
Addresses: []ethCommon.Address{
|
||||
c.address,
|
||||
},
|
||||
BlockHash: nil,
|
||||
Topics: [][]ethCommon.Hash{},
|
||||
Topics: [][]ethCommon.Hash{},
|
||||
}
|
||||
|
||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if len(logs) > 0 {
|
||||
blockHash = &logs[0].BlockHash
|
||||
if len(logs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, vLog := range logs {
|
||||
if vLog.BlockHash != *blockHash {
|
||||
if blockHash != nil && vLog.BlockHash != *blockHash {
|
||||
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
|
||||
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
|
||||
}
|
||||
switch vLog.Topics[0] {
|
||||
case logWDelayerDeposit:
|
||||
var deposit WDelayerEventDeposit
|
||||
err := c.contractAbi.UnpackIntoInterface(&deposit, "Deposit", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
deposit.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
deposit.Token = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||
@@ -468,7 +475,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
||||
var withdraw WDelayerEventWithdraw
|
||||
err := c.contractAbi.UnpackIntoInterface(&withdraw, "Withdraw", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
withdraw.Token = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
withdraw.Owner = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||
@@ -482,7 +489,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
||||
var withdrawalDelay WDelayerEventNewWithdrawalDelay
|
||||
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "NewWithdrawalDelay", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
wdelayerEvents.NewWithdrawalDelay = append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
|
||||
|
||||
@@ -490,7 +497,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
||||
var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal
|
||||
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, "EscapeHatchWithdrawal", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
|
||||
escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
|
||||
@@ -501,7 +508,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
||||
var emergencyCouncil WDelayerEventNewEmergencyCouncil
|
||||
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, "NewEmergencyCouncil", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
wdelayerEvents.NewEmergencyCouncil = append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
|
||||
|
||||
@@ -509,10 +516,10 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents,
|
||||
var governanceAddress WDelayerEventNewHermezGovernanceAddress
|
||||
err := c.contractAbi.UnpackIntoInterface(&governanceAddress, "NewHermezGovernanceAddress", vLog.Data)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
wdelayerEvents.NewHermezGovernanceAddress = append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
|
||||
}
|
||||
}
|
||||
return &wdelayerEvents, blockHash, nil
|
||||
return &wdelayerEvents, nil
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestWDelayerSetHermezGovernanceAddress(t *testing.T) {
|
||||
assert.Equal(t, &auxAddressConst, auxAddress)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
|
||||
_, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst)
|
||||
@@ -81,7 +81,7 @@ func TestWDelayerSetEmergencyCouncil(t *testing.T) {
|
||||
assert.Equal(t, &auxAddressConst, auxAddress)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, auxAddressConst, wdelayerEvents.NewEmergencyCouncil[0].NewEmergencyCouncil)
|
||||
_, err = wdelayerClientAux.WDelayerTransferEmergencyCouncil(emergencyCouncilAddressConst)
|
||||
@@ -110,7 +110,7 @@ func TestWDelayerChangeWithdrawalDelay(t *testing.T) {
|
||||
assert.Equal(t, newWithdrawalDelay, withdrawalDelay)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, uint64(newWithdrawalDelay), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay)
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func TestWDelayerDeposit(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
|
||||
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
|
||||
@@ -150,7 +150,7 @@ func TestWDelayerWithdrawal(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount)
|
||||
assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner)
|
||||
@@ -166,7 +166,7 @@ func TestWDelayerSecondDeposit(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
|
||||
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
|
||||
@@ -181,7 +181,7 @@ func TestWDelayerEnableEmergencyMode(t *testing.T) {
|
||||
assert.Equal(t, true, emergencyMode)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
auxEvent := new(WDelayerEventEmergencyModeEnabled)
|
||||
assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0])
|
||||
@@ -210,7 +210,7 @@ func TestWDelayerEscapeHatchWithdrawal(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
|
||||
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token)
|
||||
assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To)
|
||||
|
||||
1
go.mod
1
go.mod
@@ -17,6 +17,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/lib/pq v1.8.0
|
||||
github.com/marusama/semaphore/v2 v2.4.1
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
|
||||
github.com/mitchellh/copystructure v1.0.0
|
||||
|
||||
3
go.sum
3
go.sum
@@ -415,6 +415,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
|
||||
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
|
||||
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
|
||||
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
|
||||
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
|
||||
55
node/node.go
55
node/node.go
@@ -83,8 +83,15 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
var apiConnCon *dbUtils.APIConnectionController
|
||||
if cfg.API.Explorer || mode == ModeCoordinator {
|
||||
apiConnCon = dbUtils.NewAPICnnectionController(
|
||||
cfg.API.MaxSQLConnections,
|
||||
cfg.API.SQLConnectionTimeout.Duration,
|
||||
)
|
||||
}
|
||||
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
historyDB := historydb.NewHistoryDB(db, apiConnCon)
|
||||
|
||||
ethClient, err := ethclient.Dial(cfg.Web3.URL)
|
||||
if err != nil {
|
||||
@@ -164,8 +171,12 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("cfg.StateDB.Keep = %v < %v, which is unsafe",
|
||||
cfg.StateDB.Keep, safeStateDBKeep))
|
||||
}
|
||||
stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, cfg.StateDB.Keep,
|
||||
statedb.TypeSynchronizer, 32)
|
||||
stateDB, err := statedb.NewStateDB(statedb.Config{
|
||||
Path: cfg.StateDB.Path,
|
||||
Keep: cfg.StateDB.Keep,
|
||||
Type: statedb.TypeSynchronizer,
|
||||
NLevels: statedb.MaxNLevels,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -193,6 +204,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
cfg.Coordinator.L2DB.SafetyPeriod,
|
||||
cfg.Coordinator.L2DB.MaxTxs,
|
||||
cfg.Coordinator.L2DB.TTL.Duration,
|
||||
apiConnCon,
|
||||
)
|
||||
|
||||
// Unlock FeeAccount EthAddr in the keystore to generate the
|
||||
@@ -248,14 +260,37 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
|
||||
MaxL1Tx: common.RollupConstMaxL1Tx,
|
||||
}
|
||||
verifierIdx, err := scConsts.Rollup.FindVerifierIdx(
|
||||
cfg.Coordinator.Circuit.MaxTx,
|
||||
cfg.Coordinator.Circuit.NLevels,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
var verifierIdx int
|
||||
if cfg.Coordinator.Debug.RollupVerifierIndex == nil {
|
||||
verifierIdx, err = scConsts.Rollup.FindVerifierIdx(
|
||||
cfg.Coordinator.Circuit.MaxTx,
|
||||
cfg.Coordinator.Circuit.NLevels,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
|
||||
} else {
|
||||
verifierIdx = *cfg.Coordinator.Debug.RollupVerifierIndex
|
||||
log.Infow("Using debug verifier index from config", "verifierIdx", verifierIdx)
|
||||
if verifierIdx >= len(scConsts.Rollup.Verifiers) {
|
||||
return nil, tracerr.Wrap(
|
||||
fmt.Errorf("verifierIdx (%v) >= "+
|
||||
"len(scConsts.Rollup.Verifiers) (%v)",
|
||||
verifierIdx, len(scConsts.Rollup.Verifiers)))
|
||||
}
|
||||
verifier := scConsts.Rollup.Verifiers[verifierIdx]
|
||||
if verifier.MaxTx != cfg.Coordinator.Circuit.MaxTx ||
|
||||
verifier.NLevels != cfg.Coordinator.Circuit.NLevels {
|
||||
return nil, tracerr.Wrap(
|
||||
fmt.Errorf("Circuit config and verifier params don't match. "+
|
||||
"circuit.MaxTx = %v, circuit.NLevels = %v, "+
|
||||
"verifier.MaxTx = %v, verifier.NLevels = %v",
|
||||
cfg.Coordinator.Circuit.MaxTx, cfg.Coordinator.Circuit.NLevels,
|
||||
verifier.MaxTx, verifier.NLevels,
|
||||
))
|
||||
}
|
||||
}
|
||||
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
|
||||
|
||||
coord, err = coordinator.NewCoordinator(
|
||||
coordinator.Config{
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
assert.NoError(t, err)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
// Clean DB
|
||||
test.WipeDB(historyDB.DB())
|
||||
// Populate DB
|
||||
@@ -46,8 +46,7 @@ func TestPriceUpdater(t *testing.T) {
|
||||
// Update prices
|
||||
pu.UpdatePrices(context.Background())
|
||||
// Check that prices have been updated
|
||||
limit := uint(10)
|
||||
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, historydb.OrderAsc)
|
||||
fetchedTokens, err := historyDB.GetTokensTest()
|
||||
require.NoError(t, err)
|
||||
// TokenID 0 (ETH) is always on the DB
|
||||
assert.Equal(t, 2, len(fetchedTokens))
|
||||
|
||||
@@ -195,15 +195,16 @@ type Config struct {
|
||||
|
||||
// Synchronizer implements the Synchronizer type
|
||||
type Synchronizer struct {
|
||||
ethClient eth.ClientInterface
|
||||
consts SCConsts
|
||||
historyDB *historydb.HistoryDB
|
||||
stateDB *statedb.StateDB
|
||||
cfg Config
|
||||
initVars SCVariables
|
||||
startBlockNum int64
|
||||
vars SCVariables
|
||||
stats *StatsHolder
|
||||
ethClient eth.ClientInterface
|
||||
consts SCConsts
|
||||
historyDB *historydb.HistoryDB
|
||||
stateDB *statedb.StateDB
|
||||
cfg Config
|
||||
initVars SCVariables
|
||||
startBlockNum int64
|
||||
vars SCVariables
|
||||
stats *StatsHolder
|
||||
resetStateFailed bool
|
||||
}
|
||||
|
||||
// NewSynchronizer creates a new Synchronizer
|
||||
@@ -445,8 +446,10 @@ func (s *Synchronizer) init() error {
|
||||
lastBlock = lastSavedBlock
|
||||
}
|
||||
if err := s.resetState(lastBlock); err != nil {
|
||||
s.resetStateFailed = true
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.resetStateFailed = false
|
||||
|
||||
log.Infow("Sync init block",
|
||||
"syncLastBlock", s.stats.Sync.LastBlock,
|
||||
@@ -462,16 +465,37 @@ func (s *Synchronizer) init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Synchronizer) resetIntermediateState() error {
|
||||
lastBlock, err := s.historyDB.GetLastBlock()
|
||||
if tracerr.Unwrap(err) == sql.ErrNoRows {
|
||||
lastBlock = &common.Block{}
|
||||
} else if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBlock: %w", err))
|
||||
}
|
||||
if err := s.resetState(lastBlock); err != nil {
|
||||
s.resetStateFailed = true
|
||||
return tracerr.Wrap(fmt.Errorf("resetState at block %v: %w", lastBlock.Num, err))
|
||||
}
|
||||
s.resetStateFailed = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync2 attems to synchronize an ethereum block starting from lastSavedBlock.
|
||||
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
|
||||
// If a block is synched, it will be returned and also stored in the DB. If a
|
||||
// reorg is detected, the number of discarded blocks will be returned and no
|
||||
// synchronization will be made.
|
||||
// TODO: Be smart about locking: only lock during the read/write operations
|
||||
func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block) (*common.BlockData, *int64, error) {
|
||||
func (s *Synchronizer) Sync2(ctx context.Context,
|
||||
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
|
||||
if s.resetStateFailed {
|
||||
if err := s.resetIntermediateState(); err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
var nextBlockNum int64 // next block number to sync
|
||||
if lastSavedBlock == nil {
|
||||
var err error
|
||||
// Get lastSavedBlock from History DB
|
||||
lastSavedBlock, err = s.historyDB.GetLastBlock()
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
@@ -527,6 +551,20 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// If there was an error during sync, reset to the last block
|
||||
// in the historyDB because the historyDB is written last in
|
||||
// the Sync method and is the source of consistency. This
|
||||
// allows reseting the stateDB in the case a batch was
|
||||
// processed but the historyDB block was not committed due to an
|
||||
// error.
|
||||
if err != nil {
|
||||
if err2 := s.resetIntermediateState(); err2 != nil {
|
||||
log.Errorw("sync revert", "err", err2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get data from the rollup contract
|
||||
rollupData, err := s.rollupSync(ethBlock)
|
||||
if err != nil {
|
||||
@@ -564,14 +602,14 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
||||
}
|
||||
|
||||
// Group all the block data into the structs to save into HistoryDB
|
||||
blockData := common.BlockData{
|
||||
blockData = &common.BlockData{
|
||||
Block: *ethBlock,
|
||||
Rollup: *rollupData,
|
||||
Auction: *auctionData,
|
||||
WDelayer: *wDelayerData,
|
||||
}
|
||||
|
||||
err = s.historyDB.AddBlockSCData(&blockData)
|
||||
err = s.historyDB.AddBlockSCData(blockData)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -613,7 +651,7 @@ func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block)
|
||||
)
|
||||
}
|
||||
|
||||
return &blockData, nil, nil
|
||||
return blockData, nil, nil
|
||||
}
|
||||
|
||||
// reorg manages a reorg, updating History and State DB as needed. Keeps
|
||||
@@ -645,14 +683,15 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
|
||||
log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1)
|
||||
|
||||
// Set History DB and State DB to the correct state
|
||||
err := s.historyDB.Reorg(block.Num)
|
||||
if err != nil {
|
||||
if err := s.historyDB.Reorg(block.Num); err != nil {
|
||||
return 0, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
if err := s.resetState(block); err != nil {
|
||||
s.resetStateFailed = true
|
||||
return 0, tracerr.Wrap(err)
|
||||
}
|
||||
s.resetStateFailed = false
|
||||
|
||||
return block.Num, nil
|
||||
}
|
||||
@@ -722,6 +761,11 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
||||
batchNum = 0
|
||||
}
|
||||
|
||||
err = s.stateDB.Reset(batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
||||
}
|
||||
|
||||
lastL1BatchBlockNum, err := s.historyDB.GetLastL1BatchBlockNum()
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastL1BatchBlockNum: %w", err))
|
||||
@@ -739,11 +783,6 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
||||
lastForgeL1TxsNum = &n
|
||||
}
|
||||
|
||||
err = s.stateDB.Reset(batchNum)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
|
||||
}
|
||||
|
||||
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
|
||||
|
||||
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
|
||||
@@ -761,19 +800,14 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
|
||||
// Get rollup events in the block, and make sure the block hash matches
|
||||
// the expected one.
|
||||
rollupEvents, blockHash, err := s.ethClient.RollupEventsByBlock(blockNum)
|
||||
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, ðBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
// No events in this block
|
||||
if blockHash == nil {
|
||||
if rollupEvents == nil {
|
||||
return &rollupData, nil
|
||||
}
|
||||
if *blockHash != ethBlock.Hash {
|
||||
log.Errorw("Block hash mismatch in Rollup events", "expected", ethBlock.Hash.String(),
|
||||
"got", blockHash.String())
|
||||
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
|
||||
}
|
||||
|
||||
var nextForgeL1TxsNum int64 // forgeL1TxsNum for the next L1Batch
|
||||
nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum()
|
||||
@@ -801,7 +835,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
forgeBatchArgs, sender, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash,
|
||||
evtForgeBatch.L1UserTxsLen)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
return nil, tracerr.Wrap(fmt.Errorf("RollupForgeBatchArgs: %w", err))
|
||||
}
|
||||
|
||||
batchNum := common.BatchNum(evtForgeBatch.BatchNum)
|
||||
@@ -884,6 +918,10 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if s.stateDB.CurrentBatch() != batchNum {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)",
|
||||
s.stateDB.CurrentBatch(), batchNum))
|
||||
}
|
||||
|
||||
// Transform processed PoolL2 txs to L2 and store in BatchData
|
||||
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
|
||||
@@ -1066,19 +1104,14 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
|
||||
var auctionData = common.NewAuctionData()
|
||||
|
||||
// Get auction events in the block
|
||||
auctionEvents, blockHash, err := s.ethClient.AuctionEventsByBlock(blockNum)
|
||||
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, ðBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
// No events in this block
|
||||
if blockHash == nil {
|
||||
if auctionEvents == nil {
|
||||
return &auctionData, nil
|
||||
}
|
||||
if *blockHash != ethBlock.Hash {
|
||||
log.Errorw("Block hash mismatch in Auction events", "expected", ethBlock.Hash.String(),
|
||||
"got", blockHash.String())
|
||||
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
|
||||
}
|
||||
|
||||
// Get bids
|
||||
for _, evt := range auctionEvents.NewBid {
|
||||
@@ -1168,19 +1201,14 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
|
||||
wDelayerData := common.NewWDelayerData()
|
||||
|
||||
// Get wDelayer events in the block
|
||||
wDelayerEvents, blockHash, err := s.ethClient.WDelayerEventsByBlock(blockNum)
|
||||
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, ðBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
// No events in this block
|
||||
if blockHash == nil {
|
||||
if wDelayerEvents == nil {
|
||||
return &wDelayerData, nil
|
||||
}
|
||||
if *blockHash != ethBlock.Hash {
|
||||
log.Errorw("Block hash mismatch in WDelayer events", "expected", ethBlock.Hash.String(),
|
||||
"got", blockHash.String())
|
||||
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
|
||||
}
|
||||
|
||||
for _, evt := range wDelayerEvents.Deposit {
|
||||
wDelayerData.Deposits = append(wDelayerData.Deposits, common.WDelayerTransfer{
|
||||
|
||||
@@ -307,14 +307,14 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
|
||||
require.NoError(t, err)
|
||||
deleteme = append(deleteme, dir)
|
||||
|
||||
stateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
||||
stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Init History DB
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
historyDB := historydb.NewHistoryDB(db)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
// Clear DB
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestDebugAPI(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.Nil(t, err)
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
require.Nil(t, err)
|
||||
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
|
||||
require.Nil(t, err)
|
||||
|
||||
@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
|
||||
cpy := c.nextBlock().copy()
|
||||
defer func() { c.revertIfErr(err, cpy) }()
|
||||
|
||||
_, err = common.NewFloat16(amount)
|
||||
_, err = common.NewFloat40(amount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
_, err = common.NewFloat16(depositAmount)
|
||||
_, err = common.NewFloat40(depositAmount)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -1116,15 +1116,20 @@ func (c *Client) RollupConstants() (*common.RollupConstants, error) {
|
||||
}
|
||||
|
||||
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
|
||||
func (c *Client) RollupEventsByBlock(blockNum int64) (*eth.RollupEvents, *ethCommon.Hash, error) {
|
||||
func (c *Client) RollupEventsByBlock(blockNum int64,
|
||||
blockHash *ethCommon.Hash) (*eth.RollupEvents, error) {
|
||||
c.rw.RLock()
|
||||
defer c.rw.RUnlock()
|
||||
|
||||
block, ok := c.blocks[blockNum]
|
||||
if !ok {
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||
}
|
||||
return &block.Rollup.Events, &block.Eth.Hash, nil
|
||||
if blockHash != nil && *blockHash != block.Eth.Hash {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
|
||||
blockHash, block.Eth.Hash))
|
||||
}
|
||||
return &block.Rollup.Events, nil
|
||||
}
|
||||
|
||||
// RollupEventInit returns the initialize event with its corresponding block number
|
||||
@@ -1573,15 +1578,20 @@ func (c *Client) AuctionConstants() (*common.AuctionConstants, error) {
|
||||
}
|
||||
|
||||
// AuctionEventsByBlock returns the events in a block that happened in the Auction Smart Contract
|
||||
func (c *Client) AuctionEventsByBlock(blockNum int64) (*eth.AuctionEvents, *ethCommon.Hash, error) {
|
||||
func (c *Client) AuctionEventsByBlock(blockNum int64,
|
||||
blockHash *ethCommon.Hash) (*eth.AuctionEvents, error) {
|
||||
c.rw.RLock()
|
||||
defer c.rw.RUnlock()
|
||||
|
||||
block, ok := c.blocks[blockNum]
|
||||
if !ok {
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||
}
|
||||
return &block.Auction.Events, &block.Eth.Hash, nil
|
||||
if blockHash != nil && *blockHash != block.Eth.Hash {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
|
||||
blockHash, block.Eth.Hash))
|
||||
}
|
||||
return &block.Auction.Events, nil
|
||||
}
|
||||
|
||||
// AuctionEventInit returns the initialize event with its corresponding block number
|
||||
@@ -1789,15 +1799,20 @@ func (c *Client) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amou
|
||||
}
|
||||
|
||||
// WDelayerEventsByBlock returns the events in a block that happened in the WDelayer Contract
|
||||
func (c *Client) WDelayerEventsByBlock(blockNum int64) (*eth.WDelayerEvents, *ethCommon.Hash, error) {
|
||||
func (c *Client) WDelayerEventsByBlock(blockNum int64,
|
||||
blockHash *ethCommon.Hash) (*eth.WDelayerEvents, error) {
|
||||
c.rw.RLock()
|
||||
defer c.rw.RUnlock()
|
||||
|
||||
block, ok := c.blocks[blockNum]
|
||||
if !ok {
|
||||
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
|
||||
}
|
||||
return &block.WDelayer.Events, &block.Eth.Hash, nil
|
||||
if blockHash != nil && *blockHash != block.Eth.Hash {
|
||||
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
|
||||
blockHash, block.Eth.Hash))
|
||||
}
|
||||
return &block.WDelayer.Events, nil
|
||||
}
|
||||
|
||||
// WDelayerConstants returns the Constants of the WDelayer Contract
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestClientAuction(t *testing.T) {
|
||||
blockNum, err := c.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
|
||||
auctionEvents, _, err := c.AuctionEventsByBlock(blockNum)
|
||||
auctionEvents, err := c.AuctionEventsByBlock(blockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 2, len(auctionEvents.NewBid))
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func TestClientRollup(t *testing.T) {
|
||||
|
||||
blockNum, err := c.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
rollupEvents, _, err := c.RollupEventsByBlock(blockNum)
|
||||
rollupEvents, err := c.RollupEventsByBlock(blockNum, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, N, len(rollupEvents.L1UserTx))
|
||||
assert.Equal(t, 1, len(rollupEvents.AddToken))
|
||||
@@ -192,7 +192,7 @@ func TestClientRollup(t *testing.T) {
|
||||
|
||||
blockNumA, err := c.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
rollupEvents, hashA, err := c.RollupEventsByBlock(blockNumA)
|
||||
rollupEvents, err = c.RollupEventsByBlock(blockNumA, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
|
||||
assert.Equal(t, 0, len(rollupEvents.AddToken))
|
||||
@@ -205,14 +205,14 @@ func TestClientRollup(t *testing.T) {
|
||||
|
||||
blockNumB, err := c.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
rollupEvents, hashB, err := c.RollupEventsByBlock(blockNumA)
|
||||
rollupEventsB, err := c.RollupEventsByBlock(blockNumA, nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
|
||||
assert.Equal(t, 0, len(rollupEvents.AddToken))
|
||||
assert.Equal(t, 0, len(rollupEvents.ForgeBatch))
|
||||
assert.Equal(t, 0, len(rollupEventsB.L1UserTx))
|
||||
assert.Equal(t, 0, len(rollupEventsB.AddToken))
|
||||
assert.Equal(t, 0, len(rollupEventsB.ForgeBatch))
|
||||
|
||||
assert.Equal(t, blockNumA, blockNumB)
|
||||
assert.NotEqual(t, hashA, hashB)
|
||||
assert.NotEqual(t, rollupEvents, rollupEventsB)
|
||||
|
||||
// Forge again
|
||||
rollupForgeBatchArgs0 := ð.RollupForgeBatchArgs{
|
||||
@@ -232,7 +232,7 @@ func TestClientRollup(t *testing.T) {
|
||||
|
||||
blockNum, err = c.EthLastBlock()
|
||||
require.Nil(t, err)
|
||||
rollupEvents, _, err = c.RollupEventsByBlock(blockNum)
|
||||
rollupEvents, err = c.RollupEventsByBlock(blockNum, nil)
|
||||
require.Nil(t, err)
|
||||
|
||||
rollupForgeBatchArgs1, sender, err := c.RollupForgeBatchArgs(rollupEvents.ForgeBatch[0].EthTxHash,
|
||||
|
||||
@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
||||
})
|
||||
}
|
||||
|
||||
hdb := historydb.NewHistoryDB(db)
|
||||
hdb := historydb.NewHistoryDB(db, nil)
|
||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||
Num: 1,
|
||||
}))
|
||||
@@ -75,12 +75,13 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpSyncDB")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 0)
|
||||
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
||||
@@ -310,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
||||
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
|
||||
h, err := zki.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String())
|
||||
assert.Equal(t, "12174727174629825205577542675894290689387326670869871089988393208259924373499", h.String())
|
||||
sendProofAndCheckResp(t, zki)
|
||||
|
||||
// batch3
|
||||
@@ -333,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
||||
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
|
||||
h, err = zki.HashGlobalData()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String())
|
||||
assert.Equal(t, "16351950370739934361208977436603065280805499094788807090831605833717933916063", h.String())
|
||||
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
|
||||
assert.Equal(t, "0", zki.EthAddr3[1].String())
|
||||
sendProofAndCheckResp(t, zki)
|
||||
|
||||
@@ -50,7 +50,7 @@ func initStateDB(t *testing.T, typ statedb.TypeStateDB) *statedb.StateDB {
|
||||
require.NoError(t, err)
|
||||
defer assert.Nil(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, typ, NLevels)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: typ, NLevels: NLevels})
|
||||
require.NoError(t, err)
|
||||
return sdb
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
|
||||
exits := make([]processedExit, nTx)
|
||||
|
||||
if tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
tp.zki = common.NewZKInputs(tp.config.ChainID, tp.config.MaxTx, tp.config.MaxL1Tx,
|
||||
tp.config.MaxFeeTx, tp.config.NLevels, (tp.s.CurrentBatch() + 1).BigInt())
|
||||
tp.zki.OldLastIdx = tp.s.CurrentIdx().BigInt()
|
||||
@@ -137,7 +137,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
|
||||
// TBD if ExitTree is only in memory or stored in disk, for the moment
|
||||
// is only needed in memory
|
||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
@@ -166,7 +166,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
if createdAccount != nil {
|
||||
createdAccounts = append(createdAccounts, *createdAccount)
|
||||
l1usertxs[i].EffectiveFromIdx = createdAccount.Idx
|
||||
@@ -195,7 +195,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
|
||||
}
|
||||
}
|
||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
if exitIdx != nil && exitTree != nil {
|
||||
exits[tp.i] = processedExit{
|
||||
exit: true,
|
||||
@@ -217,7 +217,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
if exitIdx != nil {
|
||||
log.Error("Unexpected Exit in L1CoordinatorTx")
|
||||
}
|
||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
if createdAccount != nil {
|
||||
createdAccounts = append(createdAccounts, *createdAccount)
|
||||
l1coordinatortxs[i].EffectiveFromIdx = createdAccount.Idx
|
||||
@@ -276,7 +276,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
// collectedFees will contain the amount of fee collected for each
|
||||
// TokenID
|
||||
var collectedFees map[common.TokenID]*big.Int
|
||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
collectedFees = make(map[common.TokenID]*big.Int)
|
||||
for tokenID := range coordIdxsMap {
|
||||
collectedFees[tokenID] = big.NewInt(0)
|
||||
@@ -317,7 +317,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
}
|
||||
}
|
||||
}
|
||||
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
if exitIdx != nil && exitTree != nil {
|
||||
exits[tp.i] = processedExit{
|
||||
exit: true,
|
||||
@@ -401,7 +401,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
}
|
||||
}
|
||||
|
||||
if tp.s.Typ == statedb.TypeTxSelector {
|
||||
if tp.s.Type() == statedb.TypeTxSelector {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -436,8 +436,8 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
}
|
||||
}
|
||||
|
||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
||||
// return exitInfos, createdAccounts and collectedFees, so Synchronizer will
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
// retuTypeexitInfos, createdAccounts and collectedFees, so Synchronizer will
|
||||
// be able to store it into HistoryDB for the concrete BatchNum
|
||||
return &ProcessTxOutput{
|
||||
ZKInputs: nil,
|
||||
@@ -501,11 +501,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
||||
tp.zki.OnChain[tp.i] = big.NewInt(1)
|
||||
|
||||
// L1Txs
|
||||
depositAmountF16, err := common.NewFloat16(tx.DepositAmount)
|
||||
depositAmountF40, err := common.NewFloat40(tx.DepositAmount)
|
||||
if err != nil {
|
||||
return nil, nil, false, nil, tracerr.Wrap(err)
|
||||
}
|
||||
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16))
|
||||
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40))
|
||||
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
||||
if tx.FromBJJ != common.EmptyBJJComp {
|
||||
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
|
||||
@@ -588,7 +588,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
|
||||
}
|
||||
|
||||
var createdAccount *common.Account
|
||||
if tp.s.Typ == statedb.TypeSynchronizer &&
|
||||
if tp.s.Type() == statedb.TypeSynchronizer &&
|
||||
(tx.Type == common.TxTypeCreateAccountDeposit ||
|
||||
tx.Type == common.TxTypeCreateAccountDepositTransfer) {
|
||||
var err error
|
||||
@@ -612,8 +612,8 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
var err error
|
||||
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
||||
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
||||
// this should never be reached
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
// thisTypeould never be reached
|
||||
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
||||
return nil, nil, false, tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
|
||||
}
|
||||
@@ -676,8 +676,8 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
}
|
||||
|
||||
// if StateDB type==TypeSynchronizer, will need to add Nonce
|
||||
if tp.s.Typ == statedb.TypeSynchronizer {
|
||||
// as type==TypeSynchronizer, always tx.ToIdx!=0
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
// as tType==TypeSynchronizer, always tx.ToIdx!=0
|
||||
acc, err := tp.s.GetAccount(tx.FromIdx)
|
||||
if err != nil {
|
||||
log.Errorw("GetAccount", "fromIdx", tx.FromIdx, "err", err)
|
||||
@@ -889,8 +889,8 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
||||
accumulated.Add(accumulated, fee)
|
||||
|
||||
if tp.s.Typ == statedb.TypeSynchronizer ||
|
||||
tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer ||
|
||||
tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
collected := collectedFees[accCoord.TokenID]
|
||||
collected.Add(collected, fee)
|
||||
}
|
||||
@@ -1094,8 +1094,8 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
||||
accumulated := tp.AccumulatedFees[accCoord.Idx]
|
||||
accumulated.Add(accumulated, fee)
|
||||
|
||||
if tp.s.Typ == statedb.TypeSynchronizer ||
|
||||
tp.s.Typ == statedb.TypeBatchBuilder {
|
||||
if tp.s.Type() == statedb.TypeSynchronizer ||
|
||||
tp.s.Type() == statedb.TypeBatchBuilder {
|
||||
collected := collectedFees[accCoord.TokenID]
|
||||
collected.Add(collected, fee)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package txprocessor
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
@@ -36,7 +34,8 @@ func TestComputeEffectiveAmounts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
set := `
|
||||
@@ -212,7 +211,8 @@ func TestProcessTxsBalances(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID := uint16(0)
|
||||
@@ -358,7 +358,8 @@ func TestProcessTxsSynchronizer(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID := uint16(0)
|
||||
@@ -489,7 +490,8 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID := uint16(0)
|
||||
@@ -580,7 +582,8 @@ func TestProcessTxsRootTestVectors(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// same values than in the js test
|
||||
@@ -631,22 +634,22 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
nLevels := 16
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, nLevels)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeBatchBuilder, NLevels: nLevels})
|
||||
assert.NoError(t, err)
|
||||
|
||||
users := txsets.GenerateJsUsers(t)
|
||||
|
||||
daMaxHex, err := hex.DecodeString("FFFF")
|
||||
daMaxF40 := common.Float40(0xFFFFFFFFFF)
|
||||
daMaxBI, err := daMaxF40.BigInt()
|
||||
require.NoError(t, err)
|
||||
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
|
||||
daMaxBI := daMaxF16.BigInt()
|
||||
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
|
||||
assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String())
|
||||
|
||||
daMax1Hex, err := hex.DecodeString("FFFE")
|
||||
daMax1F40 := common.Float40(0xFFFFFFFFFE)
|
||||
require.NoError(t, err)
|
||||
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
|
||||
daMax1BI := daMax1F16.BigInt()
|
||||
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
|
||||
daMax1BI, err := daMax1F40.BigInt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String())
|
||||
|
||||
l1Txs := []common.L1Tx{
|
||||
{
|
||||
@@ -700,7 +703,8 @@ func initTestMultipleCoordIdxForTokenID(t *testing.T) (*TxProcessor, *til.Contex
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeBatchBuilder, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID := uint16(1)
|
||||
@@ -798,7 +802,8 @@ func TestTwoExits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID := uint16(1)
|
||||
@@ -865,7 +870,8 @@ func TestTwoExits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir2))
|
||||
|
||||
sdb2, err := statedb.NewStateDB(dir2, 128, statedb.TypeSynchronizer, 32)
|
||||
sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128,
|
||||
Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
assert.NoError(t, err)
|
||||
|
||||
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/kvdb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
@@ -62,8 +63,14 @@ type TxSelector struct {
|
||||
// NewTxSelector returns a *TxSelector
|
||||
func NewTxSelector(coordAccount *CoordAccount, dbpath string,
|
||||
synchronizerStateDB *statedb.StateDB, l2 *l2db.L2DB) (*TxSelector, error) {
|
||||
localAccountsDB, err := statedb.NewLocalStateDB(dbpath, 128,
|
||||
synchronizerStateDB, statedb.TypeTxSelector, 0) // without merkletree
|
||||
localAccountsDB, err := statedb.NewLocalStateDB(
|
||||
statedb.Config{
|
||||
Path: dbpath,
|
||||
Keep: kvdb.DefaultKeep,
|
||||
Type: statedb.TypeTxSelector,
|
||||
NLevels: 0,
|
||||
},
|
||||
synchronizerStateDB) // without merkletree
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -29,12 +29,13 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
|
||||
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
|
||||
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
|
||||
Type: statedb.TypeTxSelector, NLevels: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
||||
@@ -105,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
|
||||
})
|
||||
}
|
||||
|
||||
hdb := historydb.NewHistoryDB(db)
|
||||
hdb := historydb.NewHistoryDB(db, nil)
|
||||
assert.NoError(t, hdb.AddBlock(&common.Block{
|
||||
Num: 1,
|
||||
}))
|
||||
@@ -423,9 +424,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr
|
||||
expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit
|
||||
expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr
|
||||
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr
|
||||
expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit
|
||||
expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr
|
||||
|
||||
// batch2
|
||||
// prepare the PoolL2Txs
|
||||
|
||||
Reference in New Issue
Block a user