Compare commits

...

9 Commits

Author SHA1 Message Date
arnaucube
6a01c0ac14 Update ZKInputs test vectors with float40 & other
- Add AmountF new parameter to ZKInputs
- Update ZKInputs test vectors with float40 checked with circom circuits
- Small fix at eth/rollup.go with lenL1L2TxsBytes with new length of
Float40
2021-02-12 16:40:36 +01:00
arnaucube
63151a285c Migrate all packages to use Float40
Migrate all packages to use Float40 & Add more test vectors at common
2021-02-12 14:27:07 +01:00
arnaucube
52d4197330 Add Float40 methods
This commit adds Float40 related methods, and keeps the Float16 version
which will be deleted in a near future once the Float40 migration is
ready.
2021-02-12 14:27:07 +01:00
arnau
ea63cba62a Merge pull request #538 from hermeznetwork/fix/txselTransferToBJJ
Fix txselector TransferToBJJ behaviour
2021-02-12 12:02:13 +01:00
Eduard S
c7e6267189 Fix txselector TransferToBJJ behaviour 2021-02-11 18:19:29 +01:00
Eduard S
2a77dac9c1 Merge pull request #536 from hermeznetwork/feature/sql-semaphore
Add semaphore for API queries to SQL
2021-02-10 13:47:27 +01:00
Arnau B
ac1fd9acf7 Add semaphore for API queries to SQL 2021-02-10 13:36:17 +01:00
arnau
1bf29636db Merge pull request #537 from hermeznetwork/fix/l2dbreorg
Fix l2db reorg of forging l2txs
2021-02-10 13:02:21 +01:00
Eduard S
2bf3b843ed Fix l2db reorg of forging l2txs 2021-02-09 17:13:29 +01:00
51 changed files with 2217 additions and 1317 deletions

View File

@@ -26,7 +26,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
return
}
// Insert to DB
if err := a.l2.AddAccountCreationAuth(commonAuth); err != nil {
if err := a.l2.AddAccountCreationAuthAPI(commonAuth); err != nil {
retSQLErr(err, c)
return
}

View File

@@ -11,6 +11,7 @@ import (
"net/http"
"os"
"strconv"
"sync"
"testing"
"time"
@@ -27,6 +28,7 @@ import (
"github.com/hermeznetwork/hermez-node/test/til"
"github.com/hermeznetwork/hermez-node/test/txsets"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/require"
)
// Pendinger is an interface that allows getting last returned item ID and PendingItems to be used for building fromItem
@@ -199,7 +201,8 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
hdb := historydb.NewHistoryDB(database)
apiConnCon := db.NewAPICnnectionController(1, time.Second)
hdb := historydb.NewHistoryDB(database, apiConnCon)
if err != nil {
panic(err)
}
@@ -218,7 +221,7 @@ func TestMain(m *testing.M) {
panic(err)
}
// L2DB
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour)
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants)
chainID := uint16(0)
@@ -574,6 +577,82 @@ func TestMain(m *testing.M) {
os.Exit(result)
}
func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO)
require.NoError(t, err)
// L2DB
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO)
// API
apiGinTO := gin.Default()
finishWait := make(chan interface{})
startWait := make(chan interface{})
apiGinTO.GET("/wait", func(c *gin.Context) {
cancel, err := apiConnConTO.Acquire()
defer cancel()
require.NoError(t, err)
defer apiConnConTO.Release()
startWait <- nil
<-finishWait
})
// Start server
serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO}
go func() {
if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed {
require.NoError(t, err)
}
}()
_config := getConfigTest(0)
_, err = NewAPI(
true,
true,
apiGinTO,
hdbTO,
nil,
l2DBTO,
&_config,
)
require.NoError(t, err)
client := &http.Client{}
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil)
require.NoError(t, err)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil)
require.NoError(t, err)
// Request that will get timed out
var wg sync.WaitGroup
wg.Add(1)
go func() {
// Request that will make the API busy
_, err = client.Do(httpReqWait)
require.NoError(t, err)
wg.Done()
}()
<-startWait
resp, err := client.Do(httpReq)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
defer resp.Body.Close() //nolint
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
// Unmarshal body into return struct
msg := &errorMsg{}
err = json.Unmarshal(body, msg)
require.NoError(t, err)
// Check that the error was the expected down
require.Equal(t, errSQLTimeout, msg.Message)
finishWait <- nil
// Stop server
wg.Wait()
require.NoError(t, serverTO.Shutdown(context.Background()))
require.NoError(t, databaseTO.Close())
}
func doGoodReqPaginated(
path, order string,
iterStruct Pendinger,

View File

@@ -108,7 +108,7 @@ func (a *API) getFullBatch(c *gin.Context) {
}
// Fetch txs forged in the batch from historyDB
maxTxsPerBatch := uint(2048) //nolint:gomnd
txs, _, err := a.h.GetHistoryTxs(
txs, _, err := a.h.GetTxsAPI(
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {

View File

@@ -30,6 +30,12 @@ const (
// Error for duplicated key
errDuplicatedKey = "Item already exists"
// Error for timeout due to SQL connection
errSQLTimeout = "The node is under heavy preasure, please try again later"
// Error message returned when context reaches timeout
errCtxTimeout = "context deadline exceeded"
)
var (
@@ -38,16 +44,20 @@ var (
)
func retSQLErr(err error, c *gin.Context) {
log.Warn("HTTP API SQL request error", "err", err)
if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error()
if errMsg == errCtxTimeout {
c.JSON(http.StatusServiceUnavailable, errorMsg{
Message: errSQLTimeout,
})
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
// https://www.postgresql.org/docs/current/errcodes-appendix.html
if sqlErr.Code == "23505" {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
}
}
if tracerr.Unwrap(err) == sql.ErrNoRows {
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
c.JSON(http.StatusNotFound, errorMsg{
Message: err.Error(),
})
@@ -59,7 +69,7 @@ func retSQLErr(err error, c *gin.Context) {
}
func retBadReq(err error, c *gin.Context) {
log.Warn("HTTP API Bad request error", "err", err)
log.Warnw("HTTP API Bad request error", "err", err)
c.JSON(http.StatusBadRequest, errorMsg{
Message: err.Error(),
})

View File

@@ -97,12 +97,12 @@ func (a *API) getSlot(c *gin.Context) {
retBadReq(err, c)
return
}
currentBlock, err := a.h.GetLastBlock()
currentBlock, err := a.h.GetLastBlockAPI()
if err != nil {
retBadReq(err, c)
return
}
auctionVars, err := a.h.GetAuctionVars()
auctionVars, err := a.h.GetAuctionVarsAPI()
if err != nil {
retBadReq(err, c)
return
@@ -200,12 +200,12 @@ func (a *API) getSlots(c *gin.Context) {
return
}
currentBlock, err := a.h.GetLastBlock()
currentBlock, err := a.h.GetLastBlockAPI()
if err != nil {
retBadReq(err, c)
return
}
auctionVars, err := a.h.GetAuctionVars()
auctionVars, err := a.h.GetAuctionVarsAPI()
if err != nil {
retBadReq(err, c)
return
@@ -220,13 +220,13 @@ func (a *API) getSlots(c *gin.Context) {
retBadReq(errors.New("It is necessary to add maxSlotNum filter"), c)
return
} else if *finishedAuction {
currentBlock, err := a.h.GetLastBlock()
currentBlock, err := a.h.GetLastBlockAPI()
if err != nil {
retBadReq(err, c)
return
}
currentSlot := a.getCurrentSlot(currentBlock.Num)
auctionVars, err := a.h.GetAuctionVars()
auctionVars, err := a.h.GetAuctionVarsAPI()
if err != nil {
retBadReq(err, c)
return

View File

@@ -141,7 +141,7 @@ func (a *API) UpdateNetworkInfo(
a.status.Network.NextForgers = nextForgers
// Update buckets withdrawals
bucketsUpdate, err := a.h.GetBucketUpdates()
bucketsUpdate, err := a.h.GetBucketUpdatesAPI()
if tracerr.Unwrap(err) == sql.ErrNoRows {
bucketsUpdate = nil
} else if err != nil {
@@ -201,7 +201,7 @@ func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNum(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -279,7 +279,7 @@ func (a *API) UpdateMetrics() error {
}
batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock()
metrics, err := a.h.GetMetrics(batchNum)
metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
@@ -293,7 +293,7 @@ func (a *API) UpdateMetrics() error {
// UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFee()
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil {
return tracerr.Wrap(err)
}

View File

@@ -2916,7 +2916,7 @@ components:
example: 101
l1UserTotalBytes:
type: integer
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx).
example: 72
maxL1UserTx:
type: integer

View File

@@ -22,7 +22,7 @@ func (a *API) getToken(c *gin.Context) {
}
tokenID := common.TokenID(*tokenIDUint)
// Fetch token from historyDB
token, err := a.h.GetToken(tokenID)
token, err := a.h.GetTokenAPI(tokenID)
if err != nil {
retSQLErr(err, c)
return
@@ -45,7 +45,7 @@ func (a *API) getTokens(c *gin.Context) {
return
}
// Fetch exits from historyDB
tokens, pendingItems, err := a.h.GetTokens(
tokens, pendingItems, err := a.h.GetTokensAPI(
tokenIDs, symbols, name, fromItem, limit, order,
)
if err != nil {

View File

@@ -34,7 +34,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
}
// Fetch txs from historyDB
txs, pendingItems, err := a.h.GetHistoryTxs(
txs, pendingItems, err := a.h.GetTxsAPI(
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
)
if err != nil {
@@ -61,7 +61,7 @@ func (a *API) getHistoryTx(c *gin.Context) {
return
}
// Fetch tx from historyDB
tx, err := a.h.GetHistoryTx(txID)
tx, err := a.h.GetTxAPI(txID)
if err != nil {
retSQLErr(err, c)
return

View File

@@ -28,7 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
return
}
// Insert to DB
if err := a.l2.AddTx(writeTx); err != nil {
if err := a.l2.AddTxAPI(writeTx); err != nil {
retSQLErr(err, c)
return
}

View File

@@ -3,6 +3,8 @@ Address = "localhost:8086"
Explorer = true
UpdateMetricsInterval = "10s"
UpdateRecommendedFeeInterval = "10s"
MaxSQLConnections = 100
SQLConnectionTimeout = "2s"
[PriceUpdater]
Interval = "10s"

View File

@@ -17,6 +17,11 @@ const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollu
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary
type AccountCreationAuth struct {

View File

@@ -24,8 +24,8 @@ const (
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
RollupConstL1CoordinatorTotalBytes = 101
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
// [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 72
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 78
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
RollupConstMaxL1UserTx = 128
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch

View File

@@ -30,6 +30,7 @@ func (f16 Float16) Bytes() []byte {
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 {
// WARNING b[:2] for a b where len(b)<2 can break
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert"
)
func TestConversions(t *testing.T) {
func TestConversionsFloat16(t *testing.T) {
testVector := map[Float16]string{
0x307B: "123000000",
0x1DC6: "454500",
@@ -32,14 +32,14 @@ func TestConversions(t *testing.T) {
bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi)
assert.Equal(t, nil, err)
assert.NoError(t, err)
fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestFloorFix2Float(t *testing.T) {
func TestFloorFix2FloatFloat16(t *testing.T) {
testVector := map[string]Float16{
"87999990000000000": 0x776f,
"87950000000000001": 0x776f,
@@ -57,10 +57,10 @@ func TestFloorFix2Float(t *testing.T) {
}
}
func TestConversionLosses(t *testing.T) {
func TestConversionLossesFloat16(t *testing.T) {
a := big.NewInt(1000)
b, err := NewFloat16(a)
assert.Equal(t, nil, err)
assert.NoError(t, err)
c := b.BigInt()
assert.Equal(t, c, a)

105
common/float40.go Normal file
View File

@@ -0,0 +1,105 @@
// Package common Float40 provides methods to work with Hermez custom half
// float precision, 40 bits, codification internally called Float40 has been
// adopted to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"bytes"
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
const (
// maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
)
var (
// ErrFloat40Overflow is used when a given nonce overflows the maximum
// capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40
ErrFloat40E31 = errors.New("Float40 error, e > 31")
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
// not be represented as Float40 due not enough precission
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
)
// Float40 represents a float in a 64 bit format
type Float40 uint64
// Bytes return a byte array of length 5 with the Float40 value encoded in
// BigEndian
func (f40 Float40) Bytes() ([]byte, error) {
if f40 > maxFloat40Value {
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
}
var f40Bytes [8]byte
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
var b [5]byte
copy(b[:], f40Bytes[3:])
return b[:], nil
}
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
// representation.
func Float40FromBytes(b []byte) Float40 {
var f40Bytes [8]byte
copy(f40Bytes[3:], b[:])
f40 := binary.BigEndian.Uint64(f40Bytes[:])
return Float40(f40)
}
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
// [ e | m ]
// [ 5 bits | 35 bits ]
func (f40 Float40) BigInt() (*big.Int, error) {
// take the 5 used bytes (FF * 5)
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
f40Bytes, err := f40.Bytes()
if err != nil {
return nil, err
}
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
return r, nil
}
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
// of loss during the encoding.
func NewFloat40(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
zero := big.NewInt(0)
ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00)
for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) &&
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, ErrFloat40E31
}
if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
return 0, ErrFloat40NotEnoughPrecission
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}

95
common/float40_test.go Normal file
View File

@@ -0,0 +1,95 @@
package common
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConversionsFloat40(t *testing.T) {
testVector := map[Float40]string{
6*0x800000000 + 123: "123000000",
2*0x800000000 + 4545: "454500",
30*0x800000000 + 10235: "10235000000000000000000000000000000",
0x000000000: "0",
0x800000000: "0",
0x0001: "1",
0x0401: "1025",
0x800000000 + 1: "10",
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
}
for test := range testVector {
fix, err := test.BigInt()
require.NoError(t, err)
assert.Equal(t, fix.String(), testVector[test])
bi, ok := new(big.Int).SetString(testVector[test], 10)
require.True(t, ok)
fl, err := NewFloat40(bi)
assert.NoError(t, err)
fx2, err := fl.BigInt()
require.NoError(t, err)
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestExpectError(t *testing.T) {
testVector := map[string]error{
"9922334455000000000000000000000000000000": nil,
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
"42949672950000000000000000000000000000000": nil,
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
"992233445500000000000000000000000000000000": ErrFloat40E31,
"343597383670000000000000000000000000000000": nil,
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383700000000000000000000000000000000": ErrFloat40E31,
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
_, err := NewFloat40(bi)
assert.Equal(t, testVector[test], err)
}
}
func BenchmarkFloat40(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Can not convert string to *big.Int")
}
return bigInt
}
type pair struct {
Float40 Float40
BigInt *big.Int
}
testVector := []pair{
{6*0x800000000 + 123, newBigInt("123000000")},
{2*0x800000000 + 4545, newBigInt("454500")},
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
{0x000000000, newBigInt("0")},
{0x800000000, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1025")},
{0x800000000 + 1, newBigInt("10")},
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
}
b.Run("NewFloat40()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float40.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = testVector[i%len(testVector)].Float40.BigInt()
}
})
}

View File

@@ -11,18 +11,11 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub"
)
const (
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
L1UserTxBytesLen = 72
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
L1CoordinatorTxBytesLen = 101
)
// L1Tx is a struct that represents a L1 tx
type L1Tx struct {
// Stored in DB: mandatory fileds
// TxID (12 bytes) for L1Tx is:
// TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of:
// bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type:
@@ -179,45 +172,38 @@ func (tx L1Tx) Tx() Tx {
// [ 8 bits ] empty (userFee) // 1 byte
// [ 40 bits ] empty (nonce) // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
var b [29]byte
// b[0:7] empty: no ToBJJSign, no fee, no nonce
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[13:19], toIdxBytes[:])
copy(b[11:17], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
copy(b[17:23], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[25:29], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
}
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat16 | Fee ]
// [ fromIdx | toIdx | amountFloat40 | Fee ]
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
@@ -231,13 +217,17 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
if tx.EffectiveAmount != nil {
amountFloat16, err := NewFloat16(tx.EffectiveAmount)
amountFloat40, err := NewFloat40(tx.EffectiveAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
// fee = 0 (as is L1Tx) b[10:11]
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
}
// fee = 0 (as is L1Tx)
return b[:], nil
}
@@ -247,7 +237,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+2]
amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength]
l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
@@ -260,8 +250,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
return nil, tracerr.Wrap(err)
}
l1tx.ToIdx = toIdx
l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
return &l1tx, nil
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
return &l1tx, err
}
// BytesGeneric returns the generic representation of a L1Tx. This method is
@@ -269,7 +259,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
var b [L1UserTxBytesLen]byte
var b [RollupConstL1UserTotalBytes]byte
copy(b[0:20], tx.FromEthAddr.Bytes())
if tx.FromBJJ != EmptyBJJComp {
pkCompL := tx.FromBJJ
@@ -281,22 +271,33 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
return nil, tracerr.Wrap(err)
}
copy(b[52:58], fromIdxBytes[:])
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[58:60], depositAmountFloat16.Bytes())
amountFloat16, err := NewFloat16(tx.Amount)
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[60:62], amountFloat16.Bytes())
copy(b[62:66], tx.TokenID.Bytes())
copy(b[58:63], depositAmountFloat40Bytes)
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[63:68], amountFloat40Bytes)
copy(b[68:72], tx.TokenID.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[66:72], toIdxBytes[:])
copy(b[72:78], toIdxBytes[:])
return b[:], nil
}
@@ -313,7 +314,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
if tx.UserOrigin {
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
}
var b [L1CoordinatorTxBytesLen]byte
var b [RollupConstL1CoordinatorTotalBytes]byte
v := compressedSignatureBytes[64]
s := compressedSignatureBytes[32:64]
r := compressedSignatureBytes[0:32]
@@ -329,7 +330,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
// L1UserTxFromBytes decodes a L1Tx from []byte
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
if len(b) != L1UserTxBytesLen {
if len(b) != RollupConstL1UserTotalBytes {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
}
@@ -347,13 +348,19 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return nil, tracerr.Wrap(err)
}
tx.FromIdx = fromIdx
tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
tx.TokenID, err = TokenIDFromBytes(b[62:66])
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt()
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.ToIdx, err = IdxFromBytes(b[66:72])
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt()
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.TokenID, err = TokenIDFromBytes(b[68:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.ToIdx, err = IdxFromBytes(b[72:78])
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -368,7 +375,7 @@ func signHash(data []byte) []byte {
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != L1CoordinatorTxBytesLen {
if len(b) != RollupConstL1CoordinatorTotalBytes {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
}

View File

@@ -50,64 +50,110 @@ func TestNewL1CoordinatorTx(t *testing.T) {
}
func TestL1TxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation (using
// PoolL2Tx values)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
TokenID: 5,
FromIdx: (1 << 48) - 1,
ToIdx: (1 << 48) - 1,
Amount: amount,
TokenID: (1 << 32) - 1,
}
chainID := uint16(0)
txCompressedData, err := tx.TxCompressedData(chainID)
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
assert.NoError(t, err)
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
// test vector value generated from javascript implementation
expectedStr := "7307597389635308713748674793997299267459594577423"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 0,
ToIdx: 0,
Amount: big.NewInt(0),
TokenID: 0,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = L1Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
assert.NoError(t, err)
expectedStr = "7b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
}
func TestBytesDataAvailability(t *testing.T) {
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
TokenID: 5,
ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
EffectiveAmount: amount,
}
txCompressedData, err := tx.BytesDataAvailability(32)
txCompressedData, err := tx.BytesDataAvailability(16)
assert.NoError(t, err)
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
tx = L1Tx{
FromIdx: 2,
ToIdx: 3,
EffectiveAmount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
}
func TestL1TxFromDataAvailability(t *testing.T) {
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
}
txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err)
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData))
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
FromIdx: 2,
ToIdx: 3,
EffectiveAmount: big.NewInt(4),
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
EffectiveAmount: amount,
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 0,
FromIdx: 0,
EffectiveAmount: big.NewInt(0),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 635,
FromIdx: 296,
EffectiveAmount: big.NewInt(1000000000000000000),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
@@ -172,12 +218,10 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
UserOrigin: true,
}
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
require.NoError(t, err)
encodedData, err := l1Tx.BytesUser()
require.NoError(t, err)
assert.Equal(t, expected, encodedData)
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
assert.Equal(t, expected, hex.EncodeToString(encodedData))
}
func TestL1CoordinatorTxByteParsers(t *testing.T) {

View File

@@ -89,11 +89,15 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
// TokenID
b = append(b, tx.TokenID.Bytes()[:]...)
// Amount
amountFloat16, err := NewFloat16(tx.Amount)
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
}
b = append(b, amountFloat16.Bytes()...)
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return txID, tracerr.Wrap(err)
}
b = append(b, amountFloat40Bytes...)
// Nonce
nonceBytes, err := tx.Nonce.Bytes()
if err != nil {
@@ -170,11 +174,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
}
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat16 | Fee ]
// [ fromIdx | toIdx | amountFloat40 | Fee ]
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
@@ -188,13 +192,16 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
}
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
amountFloat16, err := NewFloat16(tx.Amount)
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
b[idxLen*2+2] = byte(tx.Fee)
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
return b[:], nil
}
@@ -219,7 +226,10 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err)
}
tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
tx.Fee = FeeSelector(b[idxLen*2+2])
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt()
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
return tx, nil
}

View File

@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err := NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 1,
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 999,
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 4444,
@@ -90,25 +90,85 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
}
func TestL2TxByteParsers(t *testing.T) {
amount := new(big.Int)
amount.SetString("79000000", 10)
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
l2Tx := &L2Tx{
ToIdx: 256,
ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
Amount: amount,
FromIdx: 257,
Fee: 201,
Fee: (1 << 8) - 1,
}
// Data from the compatibility test
expected := "00000101000001002b16c9"
encodedData, err := l2Tx.BytesDataAvailability(32)
expected := "ffffffffffffffffffff"
encodedData, err := l2Tx.BytesDataAvailability(16)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected = "ffffffffffffffffffffffffffff"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 0,
Amount: big.NewInt(0),
Fee: 0,
}
expected = "0000000000000000000000000000"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 1061,
Amount: big.NewInt(420000000000),
Fee: 127,
}
expected = "000004250000000010fa56ea007f"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 256,
FromIdx: 257,
Amount: big.NewInt(79000000),
Fee: 201,
}
expected = "00000101000001000004b571c0c9"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
}

View File

@@ -36,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40
Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"`
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -122,18 +122,13 @@ func (tx *PoolL2Tx) SetID() error {
// [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
var b [29]byte
toBJJSign := byte(0)
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -149,19 +144,18 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[13:19], toIdxBytes[:])
copy(b[11:17], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
copy(b[17:23], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[25:29], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
@@ -170,9 +164,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
// TxCompressedDataEmpty calculates the TxCompressedData of an empty
// transaction
func TxCompressedDataEmpty(chainID uint16) *big.Int {
var b [31]byte
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
var b [29]byte
binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[25:29], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi
}
@@ -182,7 +176,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 40 bits ] amountFloat40 // 5 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
@@ -190,11 +184,16 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil {
tx.Amount = big.NewInt(0)
}
amountFloat16, err := NewFloat16(tx.Amount)
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [25]byte
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
toBJJSign := byte(0)
if tx.ToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -210,17 +209,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
copy(b[11:16], amountFloat40Bytes)
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[13:19], toIdxBytes[:])
copy(b[16:22], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[19:25], fromIdxBytes[:])
copy(b[22:28], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
@@ -236,7 +235,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 8 bits ] rqUserFee // 1 byte
// [ 40 bits ] rqNonce // 5 bytes
// [ 32 bits ] rqTokenID // 4 bytes
// [ 16 bits ] rqAmountFloat16 // 2 bytes
// [ 40 bits ] rqAmountFloat40 // 5 bytes
// [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
@@ -244,11 +243,16 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0)
}
amountFloat16, err := NewFloat16(tx.RqAmount)
amountFloat40, err := NewFloat40(tx.RqAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [25]byte
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
rqToBJJSign := byte(0)
if tx.RqToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
@@ -264,17 +268,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.RqTokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
copy(b[11:16], amountFloat40Bytes)
toIdxBytes, err := tx.RqToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[13:19], toIdxBytes[:])
copy(b[16:22], toIdxBytes[:])
fromIdxBytes, err := tx.RqFromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[19:25], fromIdxBytes[:])
copy(b[22:28], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
@@ -287,7 +291,22 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
if err != nil {
return nil, tracerr.Wrap(err)
}
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
var e1B [25]byte
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(e1B[0:5], amountFloat40Bytes)
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
copy(e1B[5:25], toEthAddr.Bytes())
e1 := new(big.Int).SetBytes(e1B[:])
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
@@ -299,7 +318,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
}
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp

View File

@@ -21,80 +21,104 @@ func TestNewPoolL2Tx(t *testing.T) {
}
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
}
func TestTxCompressedData(t *testing.T) {
chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
// test vectors values generated from javascript implementation
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
assert.NoError(t, err)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := PoolL2Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
TokenID: 5,
Nonce: 6,
ToBJJ: sk.Public().Compress(),
FromIdx: (1 << 48) - 1,
ToIdx: (1 << 48) - 1,
Amount: amount,
TokenID: (1 << 32) - 1,
Nonce: (1 << 40) - 1,
Fee: (1 << 3) - 1,
ToBJJ: skPositive.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
// using a different chainID
txCompressedData, err = tx.TxCompressedData(uint16(100))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err = tx.TxCompressedData(uint16(65535))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
require.NoError(t, err)
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err := tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
RqFromIdx: 7,
RqToIdx: 8,
RqAmount: big.NewInt(9),
RqTokenID: 10,
RqNonce: 11,
RqFee: 12,
RqToBJJ: sk.Public().Compress(),
FromIdx: 0,
ToIdx: 0,
Amount: big.NewInt(0),
TokenID: 0,
Nonce: 0,
Fee: 0,
ToBJJ: skNegative.Public().Compress(),
}
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, rqTxCompressedData.String())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
func TestTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 7,
ToIdx: 8,
Amount: big.NewInt(9),
TokenID: 10,
Nonce: 11,
Fee: 12,
ToBJJ: sk.Public().Compress(),
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
assert.Equal(t, "0", txCompressedDataV2.String())
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = PoolL2Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
Nonce: 76,
Fee: 214,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
txCompressedData, err = tx.TxCompressedData(uint16(1))
require.NoError(t, err)
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
Nonce: 4,
Fee: 5,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
FromIdx: 2,
ToIdx: 3,
TokenID: 4,
Nonce: 5,
Fee: 6,
ToBJJ: skPositive.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
}
func TestRqTxCompressedDataV2(t *testing.T) {
@@ -113,19 +137,16 @@ func TestRqTxCompressedDataV2(t *testing.T) {
txCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestHashToSign(t *testing.T) {
chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 2,
ToIdx: 3,
@@ -136,7 +157,7 @@ func TestHashToSign(t *testing.T) {
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
}
func TestVerifyTxSignature(t *testing.T) {
@@ -156,7 +177,7 @@ func TestVerifyTxSignature(t *testing.T) {
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress()

View File

@@ -102,6 +102,8 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"`
// OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -112,8 +114,8 @@ type ZKInputs struct {
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
// account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float16
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
// DepositAmountF encoded as float40
DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx]
// FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -326,6 +328,7 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx)
@@ -477,7 +480,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -496,7 +499,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ {

View File

@@ -204,9 +204,14 @@ type Node struct {
// UpdateMetricsInterval is the interval between updates of the
// API metrics
UpdateMetricsInterval Duration
// UpdateMetricsInterval is the interval between updates of the
// UpdateRecommendedFeeInterval is the interval between updates of the
// recommended fees
UpdateRecommendedFeeInterval Duration
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
Debug struct {
// APIAddress is the address where the debugAPI will listen if

View File

@@ -348,13 +348,13 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
}
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err)
}
if c.pipeline != nil {
c.pipeline.Stop(c.ctx)
c.pipeline = nil
}
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err)
}
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
// TODO: Check that we are in a slot in which we can't forge
}

View File

@@ -105,8 +105,8 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
test.WipeDB(db)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
historyDB := historydb.NewHistoryDB(db)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
require.NoError(t, err)

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
test.WipeDB(db)
return l2db.NewL2DB(db, 10, 100, 24*time.Hour)
return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
}
func newStateDB(t *testing.T) *statedb.LocalStateDB {

1026
db/historydb/apiqueries.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,6 @@
package historydb
import (
"errors"
"fmt"
"math"
"math/big"
"strings"
@@ -11,7 +9,6 @@ import (
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx"
//nolint:errcheck // driver for postgres DB
@@ -31,11 +28,12 @@ const (
// HistoryDB persist the historic of the rollup
type HistoryDB struct {
db *sqlx.DB
apiConnCon *db.APIConnectionController
}
// NewHistoryDB initialize the DB
func NewHistoryDB(db *sqlx.DB) *HistoryDB {
return &HistoryDB{db: db}
func NewHistoryDB(db *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
return &HistoryDB{db: db, apiConnCon: apiConnCon}
}
// DB returns a pointer to the L2DB.db. This method should be used only for
@@ -87,8 +85,8 @@ func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
}
// GetBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to
func (hdb *HistoryDB) GetBlocks(from, to int64) ([]common.Block, error) {
// getBlocks retrieve blocks from the DB, given a range of block numbers defined by from and to
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
var blocks []*common.Block
err := meddler.QueryAll(
hdb.db, &blocks,
@@ -166,116 +164,6 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
return nil
}
// GetBatchAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow(
hdb.db, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
block.timestamp, block.hash,
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE batch_num = $1;`, batchNum,
))
}
// GetBatchesAPI return the batches applying the given filters
func (hdb *HistoryDB) GetBatchesAPI(
minBatchNum, maxBatchNum, slotNum *uint,
forgerAddr *ethCommon.Address,
fromItem, limit *uint, order string,
) ([]BatchAPI, uint64, error) {
var query string
var args []interface{}
queryStr := `SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
block.timestamp, block.hash,
COALESCE ((SELECT COUNT(*) FROM tx WHERE batch_num = batch.batch_num), 0) AS forged_txs,
count(*) OVER() AS total_items
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num `
// Apply filters
nextIsAnd := false
// minBatchNum filter
if minBatchNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "batch.batch_num > ? "
args = append(args, minBatchNum)
nextIsAnd = true
}
// maxBatchNum filter
if maxBatchNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "batch.batch_num < ? "
args = append(args, maxBatchNum)
nextIsAnd = true
}
// slotNum filter
if slotNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "batch.slot_num = ? "
args = append(args, slotNum)
nextIsAnd = true
}
// forgerAddr filter
if forgerAddr != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "batch.forger_addr = ? "
args = append(args, forgerAddr)
nextIsAnd = true
}
// pagination
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "batch.item_id >= ? "
} else {
queryStr += "batch.item_id <= ? "
}
args = append(args, fromItem)
}
queryStr += "ORDER BY batch.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else {
queryStr += " DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr)
// log.Debug(query)
batchPtrs := []*BatchAPI{}
if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
if len(batches) == 0 {
return batches, 0, nil
}
return batches, batches[0].TotalItems - uint64(len(batches)), nil
}
// GetAllBatches retrieve all batches from the DB
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch
@@ -375,22 +263,6 @@ func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
return db.SlicePtrsToSlice(bids).([]common.Bid), tracerr.Wrap(err)
}
// GetBestBidAPI returns the best bid in specific slot by slotNum
func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
bid := &BidAPI{}
err := meddler.QueryRow(
hdb.db, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
INNER JOIN (
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
GROUP BY bidder_addr
) c ON bid.bidder_addr = c.bidder_addr
INNER JOIN coordinator ON c.item_id = coordinator.item_id
WHERE slot_num = $1 ORDER BY item_id DESC LIMIT 1;`, slotNum,
)
return *bid, tracerr.Wrap(err)
}
// GetBestBidCoordinator returns the forger address of the highest bidder in a slot by slotNum
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
bidCoord := &common.BidCoordinator{}
@@ -416,133 +288,6 @@ func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinat
return bidCoord, tracerr.Wrap(err)
}
// GetBestBidsAPI returns the best bid in specific slot by slotNum
func (hdb *HistoryDB) GetBestBidsAPI(
minSlotNum, maxSlotNum *int64,
bidderAddr *ethCommon.Address,
limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string
var args []interface{}
// JOIN the best bid of each slot with the latest update of each coordinator
queryStr := `SELECT b.*, block.timestamp, coordinator.forger_addr, coordinator.url,
COUNT(*) OVER() AS total_items FROM (
SELECT slot_num, MAX(item_id) as maxitem
FROM bid GROUP BY slot_num
)
AS x INNER JOIN bid AS b ON b.item_id = x.maxitem
INNER JOIN block ON b.eth_block_num = block.eth_block_num
INNER JOIN (
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
GROUP BY bidder_addr
) c ON b.bidder_addr = c.bidder_addr
INNER JOIN coordinator ON c.item_id = coordinator.item_id
WHERE (b.slot_num >= ? AND b.slot_num <= ?)`
args = append(args, minSlotNum)
args = append(args, maxSlotNum)
// Apply filters
if bidderAddr != nil {
queryStr += " AND b.bidder_addr = ? "
args = append(args, bidderAddr)
}
queryStr += " ORDER BY b.slot_num "
if order == OrderAsc {
queryStr += "ASC "
} else {
queryStr += "DESC "
}
if limit != nil {
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
}
query = hdb.db.Rebind(queryStr)
bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(hdb.db, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
// log.Debug(query)
bids := db.SlicePtrsToSlice(bidPtrs).([]BidAPI)
if len(bids) == 0 {
return bids, 0, nil
}
return bids, bids[0].TotalItems - uint64(len(bids)), nil
}
// GetBidsAPI return the bids applying the given filters
func (hdb *HistoryDB) GetBidsAPI(
slotNum *int64, bidderAddr *ethCommon.Address,
fromItem, limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string
var args []interface{}
// JOIN each bid with the latest update of each coordinator
queryStr := `SELECT bid.*, block.timestamp, coord.forger_addr, coord.url,
COUNT(*) OVER() AS total_items
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
INNER JOIN (
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
GROUP BY bidder_addr
) c ON bid.bidder_addr = c.bidder_addr
INNER JOIN coordinator coord ON c.item_id = coord.item_id `
// Apply filters
nextIsAnd := false
// slotNum filter
if slotNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "bid.slot_num = ? "
args = append(args, slotNum)
nextIsAnd = true
}
// bidder filter
if bidderAddr != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "bid.bidder_addr = ? "
args = append(args, bidderAddr)
nextIsAnd = true
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "bid.item_id >= ? "
} else {
queryStr += "bid.item_id <= ? "
}
args = append(args, fromItem)
}
// pagination
queryStr += "ORDER BY bid.item_id "
if order == OrderAsc {
queryStr += "ASC "
} else {
queryStr += "DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query, argsQ, err := sqlx.In(queryStr, args...)
if err != nil {
return nil, 0, tracerr.Wrap(err)
}
query = hdb.db.Rebind(query)
bids := []*BidAPI{}
if err := meddler.QueryAll(hdb.db, &bids, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
if len(bids) == 0 {
return []BidAPI{}, 0, nil
}
return db.SlicePtrsToSlice(bids).([]BidAPI), bids[0].TotalItems - uint64(len(bids)), nil
}
// AddCoordinators insert Coordinators into the DB
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
return tracerr.Wrap(hdb.addCoordinators(hdb.db, coordinators))
@@ -708,77 +453,6 @@ func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
}
// GetTokens returns a list of tokens from the DB
func (hdb *HistoryDB) GetTokens(
ids []common.TokenID, symbols []string, name string, fromItem,
limit *uint, order string,
) ([]TokenWithUSD, uint64, error) {
var query string
var args []interface{}
queryStr := `SELECT * , COUNT(*) OVER() AS total_items FROM token `
// Apply filters
nextIsAnd := false
if len(ids) > 0 {
queryStr += "WHERE token_id IN (?) "
nextIsAnd = true
args = append(args, ids)
}
if len(symbols) > 0 {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "symbol IN (?) "
args = append(args, symbols)
nextIsAnd = true
}
if name != "" {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "name ~ ? "
args = append(args, name)
nextIsAnd = true
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "item_id >= ? "
} else {
queryStr += "item_id <= ? "
}
args = append(args, fromItem)
}
// pagination
queryStr += "ORDER BY item_id "
if order == OrderAsc {
queryStr += "ASC "
} else {
queryStr += "DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query, argsQ, err := sqlx.In(queryStr, args...)
if err != nil {
return nil, 0, tracerr.Wrap(err)
}
query = hdb.db.Rebind(query)
tokens := []*TokenWithUSD{}
if err := meddler.QueryAll(hdb.db, &tokens, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
if len(tokens) == 0 {
return []TokenWithUSD{}, 0, nil
}
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), uint64(len(tokens)) - tokens[0].TotalItems, nil
}
// GetTokenSymbols returns all the token symbols from the DB
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
var tokenSymbols []string
@@ -951,153 +625,6 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
))
}
// GetHistoryTx returns a tx from the DB given a TxID
func (hdb *HistoryDB) GetHistoryTx(txID common.TxID) (*TxAPI, error) {
// Warning: amount_success and deposit_amount_success have true as default for
// performance reasons. The expected default value is false (when txs are unforged)
// this case is handled at the function func (tx TxAPI) MarshalJSON() ([]byte, error)
tx := &TxAPI{}
err := meddler.QueryRow(
hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin,
tx.deposit_amount, tx.deposit_amount_usd, tx.deposit_amount_success, tx.fee, tx.fee_usd, tx.nonce,
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
token.usd_update, block.timestamp
FROM tx INNER JOIN token ON tx.token_id = token.token_id
INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE tx.id = $1;`, txID,
)
return tx, tracerr.Wrap(err)
}
// GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct
// and pagination info
func (hdb *HistoryDB) GetHistoryTxs(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp,
tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType,
fromItem, limit *uint, order string,
) ([]TxAPI, uint64, error) {
// Warning: amount_success and deposit_amount_success have true as default for
// performance reasons. The expected default value is false (when txs are unforged)
// this case is handled at the function func (tx TxAPI) MarshalJSON() ([]byte, error)
if ethAddr != nil && bjj != nil {
return nil, 0, tracerr.Wrap(errors.New("ethAddr and bjj are incompatible"))
}
var query string
var args []interface{}
queryStr := `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin,
tx.deposit_amount, tx.deposit_amount_usd, tx.deposit_amount_success, tx.fee, tx.fee_usd, tx.nonce,
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
token.usd_update, block.timestamp, count(*) OVER() AS total_items
FROM tx INNER JOIN token ON tx.token_id = token.token_id
INNER JOIN block ON tx.eth_block_num = block.eth_block_num `
// Apply filters
nextIsAnd := false
// ethAddr filter
if ethAddr != nil {
queryStr += "WHERE (tx.from_eth_addr = ? OR tx.to_eth_addr = ?) "
nextIsAnd = true
args = append(args, ethAddr, ethAddr)
} else if bjj != nil { // bjj filter
queryStr += "WHERE (tx.from_bjj = ? OR tx.to_bjj = ?) "
nextIsAnd = true
args = append(args, bjj, bjj)
}
// tokenID filter
if tokenID != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.token_id = ? "
args = append(args, tokenID)
nextIsAnd = true
}
// idx filter
if idx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "(tx.effective_from_idx = ? OR tx.to_idx = ?) "
args = append(args, idx, idx)
nextIsAnd = true
}
// batchNum filter
if batchNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.batch_num = ? "
args = append(args, batchNum)
nextIsAnd = true
}
// txType filter
if txType != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.type = ? "
args = append(args, txType)
nextIsAnd = true
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "tx.item_id >= ? "
} else {
queryStr += "tx.item_id <= ? "
}
args = append(args, fromItem)
nextIsAnd = true
}
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.batch_num IS NOT NULL "
// pagination
queryStr += "ORDER BY tx.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else {
queryStr += " DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr)
// log.Debug(query)
txsPtrs := []*TxAPI{}
if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
if len(txs) == 0 {
return txs, 0, nil
}
return txs, txs[0].TotalItems - uint64(len(txs)), nil
}
// GetAllExits returns all exit from the DB
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
var exits []*common.ExitInfo
@@ -1110,137 +637,6 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
return db.SlicePtrsToSlice(exits).([]common.ExitInfo), tracerr.Wrap(err)
}
// GetExitAPI returns a exit from the DB
func (hdb *HistoryDB) GetExitAPI(batchNum *uint, idx *common.Idx) (*ExitAPI, error) {
exit := &ExitAPI{}
err := meddler.QueryRow(
hdb.db, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
account.bjj, account.eth_addr,
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
exit_tree.delayed_withdraw_request, exit_tree.delayed_withdrawn,
token.token_id, token.item_id AS token_item_id,
token.eth_block_num AS token_block, token.eth_addr AS token_eth_addr, token.name, token.symbol,
token.decimals, token.usd, token.usd_update
FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx
INNER JOIN token ON account.token_id = token.token_id
WHERE exit_tree.batch_num = $1 AND exit_tree.account_idx = $2;`, batchNum, idx,
)
return exit, tracerr.Wrap(err)
}
// GetExitsAPI returns a list of exits from the DB and pagination info
func (hdb *HistoryDB) GetExitsAPI(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp, tokenID *common.TokenID,
idx *common.Idx, batchNum *uint, onlyPendingWithdraws *bool,
fromItem, limit *uint, order string,
) ([]ExitAPI, uint64, error) {
if ethAddr != nil && bjj != nil {
return nil, 0, tracerr.Wrap(errors.New("ethAddr and bjj are incompatible"))
}
var query string
var args []interface{}
queryStr := `SELECT exit_tree.item_id, exit_tree.batch_num,
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
account.bjj, account.eth_addr,
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
exit_tree.delayed_withdraw_request, exit_tree.delayed_withdrawn,
token.token_id, token.item_id AS token_item_id,
token.eth_block_num AS token_block, token.eth_addr AS token_eth_addr, token.name, token.symbol,
token.decimals, token.usd, token.usd_update, COUNT(*) OVER() AS total_items
FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx
INNER JOIN token ON account.token_id = token.token_id `
// Apply filters
nextIsAnd := false
// ethAddr filter
if ethAddr != nil {
queryStr += "WHERE account.eth_addr = ? "
nextIsAnd = true
args = append(args, ethAddr)
} else if bjj != nil { // bjj filter
queryStr += "WHERE account.bjj = ? "
nextIsAnd = true
args = append(args, bjj)
}
// tokenID filter
if tokenID != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "account.token_id = ? "
args = append(args, tokenID)
nextIsAnd = true
}
// idx filter
if idx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "exit_tree.account_idx = ? "
args = append(args, idx)
nextIsAnd = true
}
// batchNum filter
if batchNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "exit_tree.batch_num = ? "
args = append(args, batchNum)
nextIsAnd = true
}
// onlyPendingWithdraws
if onlyPendingWithdraws != nil {
if *onlyPendingWithdraws {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "(exit_tree.instant_withdrawn IS NULL AND exit_tree.delayed_withdrawn IS NULL) "
nextIsAnd = true
}
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "exit_tree.item_id >= ? "
} else {
queryStr += "exit_tree.item_id <= ? "
}
args = append(args, fromItem)
// nextIsAnd = true
}
// pagination
queryStr += "ORDER BY exit_tree.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else {
queryStr += " DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr)
// log.Debug(query)
exits := []*ExitAPI{}
if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
if len(exits) == 0 {
return []ExitAPI{}, 0, nil
}
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
}
// GetAllL1UserTxs returns all L1UserTxs from the DB
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
var txs []*common.L1Tx
@@ -1381,19 +777,6 @@ func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
}
// GetBucketUpdates retrieves latest values for each bucket
func (hdb *HistoryDB) GetBucketUpdates() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
err := meddler.QueryAll(
hdb.db, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
if len(tokenExchanges) == 0 {
return nil
@@ -1698,274 +1081,22 @@ func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*Coordina
return coordinator, tracerr.Wrap(err)
}
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
func (hdb *HistoryDB) GetCoordinatorsAPI(
bidderAddr, forgerAddr *ethCommon.Address,
fromItem, limit *uint, order string,
) ([]CoordinatorAPI, uint64, error) {
var query string
var args []interface{}
queryStr := `SELECT coordinator.*, COUNT(*) OVER() AS total_items
FROM coordinator INNER JOIN (
SELECT MAX(item_id) AS item_id FROM coordinator
GROUP BY bidder_addr
) c ON coordinator.item_id = c.item_id `
// Apply filters
nextIsAnd := false
if bidderAddr != nil {
queryStr += "WHERE bidder_addr = ? "
nextIsAnd = true
args = append(args, bidderAddr)
}
if forgerAddr != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "forger_addr = ? "
nextIsAnd = true
args = append(args, forgerAddr)
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "coordinator.item_id >= ? "
} else {
queryStr += "coordinator.item_id <= ? "
}
args = append(args, fromItem)
}
// pagination
queryStr += "ORDER BY coordinator.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else {
queryStr += " DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr)
coordinators := []*CoordinatorAPI{}
if err := meddler.QueryAll(hdb.db, &coordinators, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
if len(coordinators) == 0 {
return []CoordinatorAPI{}, 0, nil
}
return db.SlicePtrsToSlice(coordinators).([]CoordinatorAPI),
coordinators[0].TotalItems - uint64(len(coordinators)), nil
}
// AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.db, "auction_vars", auctionVars))
}
// GetAuctionVars returns auction variables
func (hdb *HistoryDB) GetAuctionVars() (*common.AuctionVariables, error) {
auctionVars := &common.AuctionVariables{}
err := meddler.QueryRow(
hdb.db, auctionVars, `SELECT * FROM auction_vars;`,
)
return auctionVars, tracerr.Wrap(err)
}
// GetAuctionVarsUntilSetSlotNum returns all the updates of the auction vars
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNum(slotNum int64, maxItems int) ([]MinBidInfo, error) {
auctionVars := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;
`
err := meddler.QueryAll(hdb.db, &auctionVars, query, slotNum, maxItems)
if err != nil {
// GetTokensTest used to get tokens in a testing context
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
tokens := []*TokenWithUSD{}
if err := meddler.QueryAll(
hdb.db, &tokens,
"SELECT * FROM TOKEN",
); err != nil {
return nil, tracerr.Wrap(err)
}
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
}
// GetAccountAPI returns an account by its index
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
account := &AccountAPI{}
err := meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx,
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx)
if err != nil {
return nil, tracerr.Wrap(err)
}
return account, nil
}
// GetAccountsAPI returns a list of accounts from the DB and pagination info
func (hdb *HistoryDB) GetAccountsAPI(
tokenIDs []common.TokenID, ethAddr *ethCommon.Address,
bjj *babyjub.PublicKeyComp, fromItem, limit *uint, order string,
) ([]AccountAPI, uint64, error) {
if ethAddr != nil && bjj != nil {
return nil, 0, tracerr.Wrap(errors.New("ethAddr and bjj are incompatible"))
}
var query string
var args []interface{}
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
COUNT(*) OVER() AS total_items
FROM account INNER JOIN token ON account.token_id = token.token_id `
// Apply filters
nextIsAnd := false
// ethAddr filter
if ethAddr != nil {
queryStr += "WHERE account.eth_addr = ? "
nextIsAnd = true
args = append(args, ethAddr)
} else if bjj != nil { // bjj filter
queryStr += "WHERE account.bjj = ? "
nextIsAnd = true
args = append(args, bjj)
}
// tokenID filter
if len(tokenIDs) > 0 {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "account.token_id IN (?) "
args = append(args, tokenIDs)
nextIsAnd = true
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "account.item_id >= ? "
} else {
queryStr += "account.item_id <= ? "
}
args = append(args, fromItem)
}
// pagination
queryStr += "ORDER BY account.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else {
queryStr += " DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query, argsQ, err := sqlx.In(queryStr, args...)
if err != nil {
return nil, 0, tracerr.Wrap(err)
}
query = hdb.db.Rebind(query)
accounts := []*AccountAPI{}
if err := meddler.QueryAll(hdb.db, &accounts, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err)
}
if len(accounts) == 0 {
return []AccountAPI{}, 0, nil
}
return db.SlicePtrsToSlice(accounts).([]AccountAPI),
accounts[0].TotalItems - uint64(len(accounts)), nil
}
// GetMetrics returns metrics
func (hdb *HistoryDB) GetMetrics(lastBatchNum common.BatchNum) (*Metrics, error) {
metricsTotals := &MetricsTotals{}
metrics := &Metrics{}
err := meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num, COALESCE (MIN(block.timestamp),
NOW()) AS min_timestamp, COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`)
if err != nil {
return nil, tracerr.Wrap(err)
}
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds()
// Avoid dividing by 0
if seconds == 0 {
seconds++
}
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 {
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) /
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
} else {
metrics.TransactionsPerBatch = float64(0)
}
err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
if metricsTotals.TotalBatches > 0 {
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches)
} else {
metrics.BatchFrequency = 0
}
if metricsTotals.TotalTransactions > 0 {
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
metrics.AvgTransactionFee = 0
}
err = meddler.QueryRow(
hdb.db, metrics,
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`)
if err != nil {
return nil, tracerr.Wrap(err)
}
return metrics, nil
}
// GetAvgTxFee returns average transaction fee of the last 1h
func (hdb *HistoryDB) GetAvgTxFee() (float64, error) {
metricsTotals := &MetricsTotals{}
err := meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
if err != nil {
return 0, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return 0, tracerr.Wrap(err)
}
var avgTransactionFee float64
if metricsTotals.TotalTransactions > 0 {
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
avgTransactionFee = 0
}
return avgTransactionFee, nil
if len(tokens) == 0 {
return []TokenWithUSD{}, nil
}
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
}

View File

@@ -22,6 +22,7 @@ import (
)
var historyDB *HistoryDB
var historyDBWithACC *HistoryDB
// In order to run the test you need to run a Posgres DB with
// a database named "history" that is accessible by
@@ -38,10 +39,12 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
historyDB = NewHistoryDB(db)
historyDB = NewHistoryDB(db, nil)
if err != nil {
panic(err)
}
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
historyDBWithACC = NewHistoryDB(db, apiConnCon)
// Run tests
result := m.Run()
// Close DB
@@ -85,7 +88,7 @@ func TestBlocks(t *testing.T) {
blocks...,
)
// Get all blocks from DB
fetchedBlocks, err := historyDB.GetBlocks(fromBlock, toBlock)
fetchedBlocks, err := historyDB.getBlocks(fromBlock, toBlock)
assert.Equal(t, len(blocks), len(fetchedBlocks))
// Compare generated vs getted blocks
assert.NoError(t, err)
@@ -245,9 +248,8 @@ func TestTokens(t *testing.T) {
err := historyDB.AddTokens(tokens)
assert.NoError(t, err)
tokens = append([]common.Token{ethToken}, tokens...)
limit := uint(10)
// Fetch tokens
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
fetchedTokens, err := historyDB.GetTokensTest()
assert.NoError(t, err)
// Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger
@@ -267,7 +269,7 @@ func TestTokens(t *testing.T) {
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
}
// Fetch tokens
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
fetchedTokens, err = historyDB.GetTokensTest()
assert.NoError(t, err)
// Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger
@@ -302,9 +304,8 @@ func TestTokensUTF8(t *testing.T) {
assert.NoError(t, err)
// Work with nonUTFTokens as tokens one gets updated and non UTF-8 characters are lost
nonUTFTokens = append([]common.Token{ethToken}, nonUTFTokens...)
limit := uint(10)
// Fetch tokens
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
fetchedTokens, err := historyDB.GetTokensTest()
assert.NoError(t, err)
// Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger
@@ -324,7 +325,7 @@ func TestTokensUTF8(t *testing.T) {
assert.NoError(t, historyDB.UpdateTokenValue(token.Symbol, value))
}
// Fetch tokens
fetchedTokens, _, err = historyDB.GetTokens(nil, nil, "", nil, &limit, OrderAsc)
fetchedTokens, err = historyDB.GetTokensTest()
assert.NoError(t, err)
// Compare fetched tokens vs generated tokens
// All the tokens should have USDUpdate setted by the DB trigger
@@ -610,10 +611,10 @@ func TestTxs(t *testing.T) {
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
// Tx ID
assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String())
// Tx From and To IDx
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)
@@ -1087,9 +1088,8 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
assert.Equal(t, escapeHatchWithdrawals, dbEscapeHatchWithdrawals)
}
func TestGetMetrics(t *testing.T) {
func TestGetMetricsAPI(t *testing.T) {
test.WipeDB(historyDB.DB())
set := `
Type: Blockchain
@@ -1146,7 +1146,7 @@ func TestGetMetrics(t *testing.T) {
assert.NoError(t, err)
}
res, err := historyDB.GetMetrics(common.BatchNum(numBatches))
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch)
@@ -1165,7 +1165,7 @@ func TestGetMetrics(t *testing.T) {
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
func TestGetMetricsMoreThan24Hours(t *testing.T) {
func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
test.WipeDB(historyDB.DB())
testUsersLen := 3
@@ -1226,7 +1226,7 @@ func TestGetMetricsMoreThan24Hours(t *testing.T) {
assert.NoError(t, err)
}
res, err := historyDB.GetMetrics(common.BatchNum(numBatches))
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches))
assert.NoError(t, err)
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001)
@@ -1245,15 +1245,15 @@ func TestGetMetricsMoreThan24Hours(t *testing.T) {
assert.Equal(t, float64(0), res.AvgTransactionFee)
}
func TestGetMetricsEmpty(t *testing.T) {
func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDB.GetMetrics(0)
_, err := historyDBWithACC.GetMetricsAPI(0)
assert.NoError(t, err)
}
func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDB.GetAvgTxFee()
_, err := historyDBWithACC.GetAvgTxFeeAPI()
assert.NoError(t, err)
}

85
db/l2db/apiqueries.go Normal file
View File

@@ -0,0 +1,85 @@
package l2db
import (
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
return l2db.AddAccountCreationAuth(auth)
}
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow(
l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr,
))
}
// AddTxAPI inserts a tx to the pool
func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
common.PoolL2TxStatePending,
)
var totalTxs uint32
if err := row.Scan(&totalTxs); err != nil {
return tracerr.Wrap(err)
}
if totalTxs >= l2db.maxTxs {
return tracerr.New(
"The pool is at full capacity. More transactions are not accepted currently",
)
}
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
}
// selectPoolTxAPI select part of queries to get PoolL2TxRead
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// GetTxAPI return the specified Tx in PoolTxAPI format
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
cancel, err := l2db.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer l2db.apiConnCon.Release()
tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow(
l2db.db, tx,
selectPoolTxAPI+"WHERE tx_id = $1;",
txID,
))
}

View File

@@ -25,17 +25,25 @@ type L2DB struct {
safetyPeriod common.BatchNum
ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool
apiConnCon *db.APIConnectionController
}
// NewL2DB creates a L2DB.
// To create it, it's needed db connection, safety period expressed in batches,
// maxTxs that the DB should have and TTL (time to live) for pending txs.
func NewL2DB(db *sqlx.DB, safetyPeriod common.BatchNum, maxTxs uint32, TTL time.Duration) *L2DB {
func NewL2DB(
db *sqlx.DB,
safetyPeriod common.BatchNum,
maxTxs uint32,
TTL time.Duration,
apiConnCon *db.APIConnectionController,
) *L2DB {
return &L2DB{
db: db,
safetyPeriod: safetyPeriod,
ttl: TTL,
maxTxs: maxTxs,
apiConnCon: apiConnCon,
}
}
@@ -47,7 +55,6 @@ func (l2db *L2DB) DB() *sqlx.DB {
// AddAccountCreationAuth inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
// return meddler.Insert(l2db.db, "account_creation_auth", auth)
_, err := l2db.db.Exec(
`INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES ($1, $2, $3);`,
@@ -66,16 +73,6 @@ func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.Accoun
))
}
// GetAccountCreationAuthAPI returns an account creation authorization from the DB
func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCreationAuthAPI, error) {
auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow(
l2db.db, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr,
))
}
// AddTx inserts a tx to the pool
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
row := l2db.db.QueryRow(
@@ -173,16 +170,6 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx))
}
// selectPoolTxAPI select part of queries to get PoolL2TxRead
const selectPoolTxAPI = `SELECT tx_pool.tx_id, hez_idx(tx_pool.from_idx, token.symbol) AS from_idx, tx_pool.effective_from_eth_addr,
tx_pool.effective_from_bjj, hez_idx(tx_pool.to_idx, token.symbol) AS to_idx, tx_pool.effective_to_eth_addr,
tx_pool.effective_to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, tx_pool.batch_num, hez_idx(tx_pool.rq_from_idx, token.symbol) AS rq_from_idx,
hez_idx(tx_pool.rq_to_idx, token.symbol) AS rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
token.item_id AS token_item_id, token.eth_block_num, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// selectPoolTxCommon select part of queries to get common.PoolL2Tx
const selectPoolTxCommon = `SELECT tx_pool.tx_id, from_idx, to_idx, tx_pool.to_eth_addr,
tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
@@ -202,16 +189,6 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
))
}
// GetTxAPI return the specified Tx in PoolTxAPI format
func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow(
l2db.db, tx,
selectPoolTxAPI+"WHERE tx_id = $1;",
txID,
))
}
// GetPendingTxs return all the pending txs of the L2DB, that have a non NULL AbsoluteFee
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx
@@ -347,8 +324,9 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
_, err := l2db.db.Exec(
`UPDATE tx_pool SET batch_num = NULL, state = $1
WHERE (state = $2 OR state = $3) AND batch_num > $4`,
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
common.PoolL2TxStatePending,
common.PoolL2TxStateForging,
common.PoolL2TxStateForged,
common.PoolL2TxStateInvalid,
lastValidBatch,

View File

@@ -21,6 +21,7 @@ import (
)
var l2DB *L2DB
var l2DBWithACC *L2DB
var historyDB *historydb.HistoryDB
var tc *til.Context
var tokens map[common.TokenID]historydb.TokenWithUSD
@@ -34,9 +35,11 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour)
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db)
historyDB = historydb.NewHistoryDB(db, nil)
// Run tests
result := m.Run()
// Close DB
@@ -267,7 +270,7 @@ func TestStartForging(t *testing.T) {
assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly
for _, id := range startForgingTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id)
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -312,7 +315,7 @@ func TestDoneForging(t *testing.T) {
// Fetch txs and check that they've been updated correctly
for _, id := range doneForgingTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id)
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -344,7 +347,7 @@ func TestInvalidate(t *testing.T) {
assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id)
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -385,7 +388,7 @@ func TestInvalidateOldNonces(t *testing.T) {
assert.NoError(t, err)
// Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id)
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
require.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
@@ -460,13 +463,13 @@ func TestReorg(t *testing.T) {
err = l2DB.Reorg(lastValidBatch)
assert.NoError(t, err)
for _, id := range reorgedTxIDs {
tx, err := l2DB.GetTxAPI(id)
tx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
}
for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id)
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
}
@@ -537,13 +540,13 @@ func TestReorg2(t *testing.T) {
err = l2DB.Reorg(lastValidBatch)
assert.NoError(t, err)
for _, id := range reorgedTxIDs {
tx, err := l2DB.GetTxAPI(id)
tx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State)
}
for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DB.GetTxAPI(id)
fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
}

View File

@@ -1,16 +1,19 @@
package db
import (
"context"
"database/sql"
"fmt"
"math/big"
"reflect"
"strings"
"time"
"github.com/gobuffalo/packr/v2"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
)
@@ -84,6 +87,32 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
return db, nil
}
// APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct {
smphr semaphore.Semaphore
timeout time.Duration
}
// NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{
smphr: semaphore.New(maxConnections),
timeout: timeout,
}
}
// Acquire reserves a SQL connection. If the connection is not acquired
// within the timeout, the function will return an error
func (acc *APIConnectionController) Acquire() (context.CancelFunc, error) {
ctx, cancel := context.WithTimeout(context.Background(), acc.timeout) //nolint:govet
return cancel, acc.smphr.Acquire(ctx, 1)
}
// Release frees a SQL connection
func (acc *APIConnectionController) Release() {
acc.smphr.Release(1)
}
// initMeddler registers tags to be used to read/write from SQL DBs using meddler
func initMeddler() {
meddler.Register("bigint", BigIntMeddler{})

View File

@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
}
fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat16(depositAmount)
depositAmountF, err := common.NewFloat40(depositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountF, err := common.NewFloat16(amount)
amountF, err := common.NewFloat40(amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
}
fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat16(depositAmount)
depositAmountF, err := common.NewFloat40(depositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountF, err := common.NewFloat16(amount)
amountF, err := common.NewFloat40(amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -939,9 +939,9 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
FeeIdxCoordinator: []common.Idx{},
}
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
l1UserTxsData := []byte{}
@@ -968,7 +968,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
}
for i := 0; i < numTxsL1Coord; i++ {
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
var signature []byte
v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33]

View File

@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
require.NoError(t, err)
numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
for i := 0; i < numTxsL1; i++ {
bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
var signature []byte
v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33]

1
go.mod
View File

@@ -17,6 +17,7 @@ require (
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0

3
go.sum
View File

@@ -415,6 +415,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=

View File

@@ -83,8 +83,15 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err != nil {
return nil, tracerr.Wrap(err)
}
var apiConnCon *dbUtils.APIConnectionController
if cfg.API.Explorer || mode == ModeCoordinator {
apiConnCon = dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
}
historyDB := historydb.NewHistoryDB(db)
historyDB := historydb.NewHistoryDB(db, apiConnCon)
ethClient, err := ethclient.Dial(cfg.Web3.URL)
if err != nil {
@@ -197,6 +204,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon,
)
// Unlock FeeAccount EthAddr in the keystore to generate the

View File

@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
assert.NoError(t, err)
historyDB := historydb.NewHistoryDB(db)
historyDB := historydb.NewHistoryDB(db, nil)
// Clean DB
test.WipeDB(historyDB.DB())
// Populate DB
@@ -46,8 +46,7 @@ func TestPriceUpdater(t *testing.T) {
// Update prices
pu.UpdatePrices(context.Background())
// Check that prices have been updated
limit := uint(10)
fetchedTokens, _, err := historyDB.GetTokens(nil, nil, "", nil, &limit, historydb.OrderAsc)
fetchedTokens, err := historyDB.GetTokensTest()
require.NoError(t, err)
// TokenID 0 (ETH) is always on the DB
assert.Equal(t, 2, len(fetchedTokens))

View File

@@ -314,7 +314,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
historyDB := historydb.NewHistoryDB(db)
historyDB := historydb.NewHistoryDB(db, nil)
// Clear DB
test.WipeDB(historyDB.DB())

View File

@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
cpy := c.nextBlock().copy()
defer func() { c.revertIfErr(err, cpy) }()
_, err = common.NewFloat16(amount)
_, err = common.NewFloat40(amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
_, err = common.NewFloat16(depositAmount)
_, err = common.NewFloat40(depositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}

View File

@@ -142,10 +142,12 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
// same values than in the js test
users = GenerateJsUsers(t)
depositAmount, err := common.Float40(10400).BigInt()
require.Nil(t, err)
l1UserTxs = []common.L1Tx{
{
FromIdx: 0,
DepositAmount: big.NewInt(16000000),
DepositAmount: depositAmount,
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: users[0].BJJ.Public().Compress(),

View File

@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
})
}
hdb := historydb.NewHistoryDB(db)
hdb := historydb.NewHistoryDB(db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1,
}))
@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)
@@ -311,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
h, err := zki.HashGlobalData()
require.NoError(t, err)
assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String())
assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String())
sendProofAndCheckResp(t, zki)
// batch3
@@ -334,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
h, err = zki.HashGlobalData()
require.NoError(t, err)
assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String())
assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String())
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
assert.Equal(t, "0", zki.EthAddr3[1].String())
sendProofAndCheckResp(t, zki)

View File

@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
os.Exit(exitVal)
}
const MaxTx = 376
const MaxTx = 352
const NLevels = 32
const MaxL1Tx = 256
const MaxFeeTx = 64
@@ -61,6 +61,7 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
return
}
log.Infof("sending proof to %s", proofServerURL)
// Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err)

View File

@@ -501,11 +501,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
tp.zki.OnChain[tp.i] = big.NewInt(1)
// L1Txs
depositAmountF16, err := common.NewFloat16(tx.DepositAmount)
depositAmountF40, err := common.NewFloat40(tx.DepositAmount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16))
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40))
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
if tx.FromBJJ != common.EmptyBJJComp {
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
@@ -515,6 +515,20 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
tp.zki.ISOnChain[tp.i] = big.NewInt(1)
}
if tx.Type == common.TxTypeForceTransfer ||
tx.Type == common.TxTypeDepositTransfer ||
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
tx.Type == common.TxTypeForceExit {
// in the cases where at L1Tx there is usage of the
// Amount parameter, add it at the ZKInputs.AmountF
// slot
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
}
}
switch tx.Type {
@@ -657,6 +671,11 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
tp.zki.OnChain[tp.i] = big.NewInt(0)
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
tp.zki.NewAccount[tp.i] = big.NewInt(0)
// L2Txs

View File

@@ -1,8 +1,6 @@
package txprocessor
import (
"encoding/binary"
"encoding/hex"
"io/ioutil"
"math/big"
"os"
@@ -642,17 +640,16 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
users := txsets.GenerateJsUsers(t)
daMaxHex, err := hex.DecodeString("FFFF")
daMaxF40 := common.Float40(0xFFFFFFFFFF)
daMaxBI, err := daMaxF40.BigInt()
require.NoError(t, err)
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
daMaxBI := daMaxF16.BigInt()
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String())
daMax1Hex, err := hex.DecodeString("FFFE")
daMax1F40 := common.Float40(0xFFFFFFFFFE)
require.NoError(t, err)
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
daMax1BI := daMax1F16.BigInt()
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
daMax1BI, err := daMax1F40.BigInt()
require.NoError(t, err)
assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String())
l1Txs := []common.L1Tx{
{

File diff suppressed because one or more lines are too long

View File

@@ -3,7 +3,6 @@ package txselector
// current: very simple version of TxSelector
import (
"bytes"
"fmt"
"math/big"
"sort"
@@ -236,7 +235,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator"
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue
}
@@ -261,7 +261,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String())
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
@@ -273,7 +275,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs
// array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
@@ -291,18 +294,31 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil {
log.Debug(err)
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error())
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s",
err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
if accAuth != nil && l1CoordinatorTx != nil {
if l1CoordinatorTx != nil {
// If ToEthAddr == 0xff.. this means that we
// are handling a TransferToBJJ, which doesn't
// require an authorization because it doesn't
// contain a valid ethereum address.
// Otherwise only create the account if we have
// the corresponding authorization
if validL2Tx.ToEthAddr == common.FFAddr {
accAuths = append(accAuths, common.EmptyEthSignature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
} else if accAuth != nil {
accAuths = append(accAuths, accAuth.Signature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
}
}
if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx)
}
@@ -314,8 +330,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
"ToIdx", l2Txs[i].ToIdx)
// Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d",
l2Txs[i].ToIdx)
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+
"ToIdx: %d", l2Txs[i].ToIdx)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
@@ -327,7 +343,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info
// parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+
"does not correspond to the Account.EthAddr. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
@@ -341,7 +359,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info
// parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+
"does not correspond to the Account.BJJ. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
@@ -415,7 +435,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
log.Error(err)
// Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error())
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
continue
}
@@ -471,8 +491,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth
if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) &&
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
// case: ToEthAddr != 0x00 neither 0xff
if l2Tx.ToBJJ != common.EmptyBJJComp {
// case: ToBJJ!=0:
@@ -528,8 +547,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
DepositAmount: big.NewInt(0),
Type: common.TxTypeCreateAccountDeposit,
}
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) &&
l2Tx.ToBJJ != common.EmptyBJJComp {
} else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp {
// if idx exist for EthAddr&BJJ use it
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
l2Tx.TokenID)
@@ -555,7 +573,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
}
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
// L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx"))
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
}
return &l2Tx, l1CoordinatorTx, accAuth, nil
@@ -564,7 +583,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ {
if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) &&
if l1CoordinatorTxs[i].FromEthAddr == addr &&
l1CoordinatorTxs[i].TokenID == tokenID &&
l1CoordinatorTxs[i].FromBJJ == bjj {
return true

View File

@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
@@ -48,7 +48,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil,
}
fmt.Printf("%v", coordAccount)
// fmt.Printf("%v\n", coordAccount)
auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(),
@@ -106,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
})
}
hdb := historydb.NewHistoryDB(db)
hdb := historydb.NewHistoryDB(db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1,
}))
@@ -424,9 +424,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr
expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit
expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr
expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit
expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr
// batch2
// prepare the PoolL2Txs
@@ -497,3 +497,134 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestTransferToBjj(t *testing.T) {
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
CreateAccountDeposit(1) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
addTokens(t, tc, txsel.l2db.DB())
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 20,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to create some accounts with positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx.
batchPoolL2 := `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// Now the BJJ-only account for B is already created, so the transfer
// happens without an L1CoordTx that creates the user account.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx. Since it's a
// transfer of a token for which the coordinator doesn't have a fee
// account, another L1CoordTx will be created for the coordinator to
// receive the fees.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(1) B-A: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
// to receive the fees by the coordinator and another one for the
// recipient of the l2tx
assert.Equal(t, 2, len(oL1CoordTxs))
// [0] Coordinator account cration for token 1
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
// [1] User A BJJ-only account creation for token 1
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}