Compare commits

..

2 Commits

Author SHA1 Message Date
Eduard S
3a7e18d113 Make proposal 2021-02-10 13:26:14 +01:00
Arnau B
4661fb196a Add semaphore for API queries to SQL 2021-02-10 13:10:19 +01:00
65 changed files with 1387 additions and 2743 deletions

View File

@@ -4,10 +4,6 @@ Go implementation of the Hermez node.
## Developing
### Go version
The `hermez-node` has been tested with go version 1.14
### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can

View File

@@ -216,7 +216,7 @@ func TestMain(m *testing.M) {
panic(err)
}
}()
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeTxSelector, NLevels: 0})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
if err != nil {
panic(err)
}

View File

@@ -2916,7 +2916,7 @@ components:
example: 101
l1UserTotalBytes:
type: integer
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx).
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx).
example: 72
maxL1UserTx:
type: integer

View File

@@ -2,7 +2,6 @@ package batchbuilder
import (
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/kvdb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr"
@@ -29,14 +28,8 @@ type ConfigBatch struct {
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
// method
func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchNum common.BatchNum, nLevels uint64) (*BatchBuilder, error) {
localStateDB, err := statedb.NewLocalStateDB(
statedb.Config{
Path: dbpath,
Keep: kvdb.DefaultKeep,
Type: statedb.TypeBatchBuilder,
NLevels: int(nLevels),
},
synchronizerStateDB)
localStateDB, err := statedb.NewLocalStateDB(dbpath, 128, synchronizerStateDB,
statedb.TypeBatchBuilder, int(nLevels))
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -54,7 +47,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
// it can just roll back the internal copy.
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
return bb.localStateDB.Reset(batchNum, fromSynchronizer)
}
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch

View File

@@ -15,7 +15,7 @@ func TestBatchBuilder(t *testing.T) {
require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
synchDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeBatchBuilder, NLevels: 0})
synchDB, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 0)
assert.Nil(t, err)
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")

View File

@@ -2,10 +2,6 @@
This is the main cli for the node
## Go version
The `hermez-node` has been tested with go version 1.14
## Usage
```
@@ -69,64 +65,29 @@ when running the coordinator in sync mode
- The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section.
## Building
*All commands assume you are at the `cli/node` directory.*
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
```
The executable is `node`.
## Usage Examples
The following commands assume you have built the node previously. You can also
run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer:
```
./node --mode sync --cfg cfg.buidler.toml run
go run . --mode sync --cfg cfg.buidler.toml run
```
Run the node in mode coordinator:
```
./node --mode coord --cfg cfg.buidler.toml run
go run . --mode coord --cfg cfg.buidler.toml run
```
Import an ethereum private key into the keystore:
```
./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
```
Generate a new BabyJubJub key pair:
```
./node --mode coord --cfg cfg.buidler.toml genbjj
go run . --mode coord --cfg cfg.buidler.toml genbjj
```
Wipe the entier SQL database (this will destroy all synchronized and pool
data):
Wipe the entier SQL database (this will destroy all synchronized and pool data):
```
./node --mode coord --cfg cfg.buidler.toml wipesql
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
go run . --mode coord --cfg cfg.buidler.toml wipesql
```

View File

@@ -41,20 +41,15 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZName = "Hermez Network Token"
[Coordinator]
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6
StartSlotBlocksDelay = 2
ScheduleBatchBlocksAheadCheck = 3
SendBatchBlocksMarginCheck = 1
L1BatchTimeoutPerc = 0.999
ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s"
ForgeDelay = "10s"
ForgeNoTxsDelay = "0s"
[Coordinator.FeeAccount]
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
@@ -85,13 +80,13 @@ MaxTx = 512
NLevels = 32
[Coordinator.EthClient]
ReceiptTimeout = "60s"
ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms"
Attempts = 4
AttemptsDelay = "500ms"
TxResendTimeout = "2m"
NoReuseNonce = false
MaxGasPrice = "5000000000"
GasPriceIncPerc = 10
CallGasLimit = 300000
GasPriceDiv = 100
[Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore"
@@ -103,4 +98,3 @@ Coordinator = true
[Coordinator.Debug]
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
LightScrypt = true
# RollupVerifierIndex = 0

View File

@@ -11,8 +11,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/config"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node"
"github.com/hermeznetwork/tracerr"
@@ -25,7 +23,6 @@ const (
flagMode = "mode"
flagSK = "privatekey"
flagYes = "yes"
flagBlock = "block"
modeSync = "sync"
modeCoord = "coord"
)
@@ -142,47 +139,6 @@ func cmdRun(c *cli.Context) error {
return nil
}
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
cfg := _cfg.node
blockNum := c.Int64(flagBlock)
log.Infof("Discarding all blocks up to block %v...", blockNum)
db, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.Port,
cfg.PostgreSQL.Host,
cfg.PostgreSQL.User,
cfg.PostgreSQL.Password,
cfg.PostgreSQL.Name,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
historyDB := historydb.NewHistoryDB(db, nil)
if err := historyDB.Reorg(blockNum); err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
}
batchNum, err := historyDB.GetLastBatchNum()
if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
l2DB := l2db.NewL2DB(
db,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)
if err := l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
}
return nil
}
// Config is the configuration of the hermez node execution
type Config struct {
mode node.Mode
@@ -283,18 +239,6 @@ func main() {
Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun,
},
{
Name: "discard",
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: []cli.Flag{
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}},
},
}
err := app.Run(os.Args)

View File

@@ -17,11 +17,6 @@ const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollu
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary
type AccountCreationAuth struct {

View File

@@ -27,24 +27,6 @@ type Batch struct {
TotalFeesUSD *float64 `meddler:"total_fees_usd"`
}
// NewEmptyBatch creates a new empty batch
func NewEmptyBatch() *Batch {
return &Batch{
BatchNum: 0,
EthBlockNum: 0,
ForgerAddr: ethCommon.Address{},
CollectedFees: make(map[TokenID]*big.Int),
FeeIdxsCoordinator: make([]Idx, 0),
StateRoot: big.NewInt(0),
NumAccounts: 0,
LastIdx: 0,
ExitRoot: big.NewInt(0),
ForgeL1TxsNum: nil,
SlotNum: 0,
TotalFeesUSD: nil,
}
}
// BatchNum identifies a batch
type BatchNum int64

View File

@@ -33,8 +33,7 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
}
// This result will be negative
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
return -1
}
// SlotBlocks returns the first and the last block numbers included in that slot

View File

@@ -24,8 +24,8 @@ const (
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
RollupConstL1CoordinatorTotalBytes = 101
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
// [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 78
// [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 72
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
RollupConstMaxL1UserTx = 128
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch

131
common/float16.go Normal file
View File

@@ -0,0 +1,131 @@
// Package common Float16 provides methods to work with Hermez custom half float
// precision, 16 bits, codification internally called Float16 has been adopted
// to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
var (
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
ErrRoundingLoss = errors.New("input value causes rounding loss")
)
// Float16 represents a float in a 16 bit format
type Float16 uint16
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
func (f16 Float16) Bytes() []byte {
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(f16))
return b[:]
}
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 {
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16
}
// BigInt converts the Float16 to a *big.Int integer
func (f16 *Float16) BigInt() *big.Int {
fl := int64(*f16)
m := big.NewInt(fl & 0x3FF)
e := big.NewInt(fl >> 11)
e5 := (fl >> 10) & 0x01
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
res := m.Mul(m, exp)
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
res.Add(res, exp.Div(exp, big.NewInt(2)))
}
return res
}
// floorFix2Float converts a fix to a float, always rounding down
func floorFix2Float(_f *big.Int) Float16 {
zero := big.NewInt(0)
ten := big.NewInt(10)
e := int64(0)
m := big.NewInt(0)
m.Set(_f)
if m.Cmp(zero) == 0 {
return 0
}
s := big.NewInt(0).Rsh(m, 10)
for s.Cmp(zero) != 0 {
m.Div(m, ten)
s.Rsh(m, 10)
e++
}
return Float16(m.Int64() | e<<11)
}
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
// case of loss during the encoding.
func NewFloat16(f *big.Int) (Float16, error) {
fl1 := floorFix2Float(f)
fi1 := fl1.BigInt()
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
m3 := (fl1 & 0x3FF) + 1
e3 := fl1 >> 11
if m3&0x400 == 0 {
m3 = 0x66
e3++
}
fl3 := m3 + e3<<11
fi3 := fl3.BigInt()
res := fl1
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
if d.Cmp(d2) == 1 {
res = fl2
d = d2
}
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
if d.Cmp(d3) == 1 {
res = fl3
}
// Do rounding check
if res.BigInt().Cmp(f) == 0 {
return res, nil
}
return res, tracerr.Wrap(ErrRoundingLoss)
}
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
// case of loss during the encoding.
func NewFloat16Floor(f *big.Int) Float16 {
fl1 := floorFix2Float(f)
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
if fi2.Cmp(f) < 1 {
return fl2
}
return fl1
}

132
common/float16_test.go Normal file
View File

@@ -0,0 +1,132 @@
package common
import (
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
)
func TestConversions(t *testing.T) {
testVector := map[Float16]string{
0x307B: "123000000",
0x1DC6: "454500",
0xFFFF: "10235000000000000000000000000000000",
0x0000: "0",
0x0400: "0",
0x0001: "1",
0x0401: "1",
0x0800: "0",
0x0c00: "5",
0x0801: "10",
0x0c01: "15",
}
for test := range testVector {
fix := test.BigInt()
assert.Equal(t, fix.String(), testVector[test])
bi := big.NewInt(0)
bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi)
assert.Equal(t, nil, err)
fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestFloorFix2Float(t *testing.T) {
testVector := map[string]Float16{
"87999990000000000": 0x776f,
"87950000000000001": 0x776f,
"87950000000000000": 0x776f,
"87949999999999999": 0x736f,
}
for test := range testVector {
bi := big.NewInt(0)
bi.SetString(test, 10)
testFloat := NewFloat16Floor(bi)
assert.Equal(t, testFloat, testVector[test])
}
}
func TestConversionLosses(t *testing.T) {
a := big.NewInt(1000)
b, err := NewFloat16(a)
assert.Equal(t, nil, err)
c := b.BigInt()
assert.Equal(t, c, a)
a = big.NewInt(1024)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32767)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32768)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(65536000)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
}
func BenchmarkFloat16(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Bad big int")
}
return bigInt
}
type pair struct {
Float16 Float16
BigInt *big.Int
}
testVector := []pair{
{0x307B, newBigInt("123000000")},
{0x1DC6, newBigInt("454500")},
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
{0x0000, newBigInt("0")},
{0x0400, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1")},
{0x0800, newBigInt("0")},
{0x0c00, newBigInt("5")},
{0x0801, newBigInt("10")},
{0x0c01, newBigInt("15")},
}
b.Run("floorFix2Float()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
}
})
b.Run("NewFloat16()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float16.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
testVector[i%len(testVector)].Float16.BigInt()
}
})
}

View File

@@ -1,103 +0,0 @@
// Package common Float40 provides methods to work with Hermez custom half
// float precision, 40 bits, codification internally called Float40 has been
// adopted to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
const (
// maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
)
var (
// ErrFloat40Overflow is used when a given Float40 overflows the
// maximum capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40
ErrFloat40E31 = errors.New("Float40 error, e > 31")
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
// not be represented as Float40 due not enough precission
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
)
// Float40 represents a float in a 64 bit format
type Float40 uint64
// Bytes return a byte array of length 5 with the Float40 value encoded in
// BigEndian
func (f40 Float40) Bytes() ([]byte, error) {
if f40 > maxFloat40Value {
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
}
var f40Bytes [8]byte
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
var b [5]byte
copy(b[:], f40Bytes[3:])
return b[:], nil
}
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
// representation.
func Float40FromBytes(b []byte) Float40 {
var f40Bytes [8]byte
copy(f40Bytes[3:], b[:])
f40 := binary.BigEndian.Uint64(f40Bytes[:])
return Float40(f40)
}
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
// [ e | m ]
// [ 5 bits | 35 bits ]
func (f40 Float40) BigInt() (*big.Int, error) {
// take the 5 used bytes (FF * 5)
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
f40Bytes, err := f40.Bytes()
if err != nil {
return nil, err
}
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
return r, nil
}
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
// of loss during the encoding.
func NewFloat40(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
zero := big.NewInt(0)
ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00)
for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, ErrFloat40E31
}
if m.Cmp(thres) >= 0 {
return 0, ErrFloat40NotEnoughPrecission
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}

View File

@@ -1,95 +0,0 @@
package common
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConversionsFloat40(t *testing.T) {
testVector := map[Float40]string{
6*0x800000000 + 123: "123000000",
2*0x800000000 + 4545: "454500",
30*0x800000000 + 10235: "10235000000000000000000000000000000",
0x000000000: "0",
0x800000000: "0",
0x0001: "1",
0x0401: "1025",
0x800000000 + 1: "10",
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
}
for test := range testVector {
fix, err := test.BigInt()
require.NoError(t, err)
assert.Equal(t, fix.String(), testVector[test])
bi, ok := new(big.Int).SetString(testVector[test], 10)
require.True(t, ok)
fl, err := NewFloat40(bi)
assert.NoError(t, err)
fx2, err := fl.BigInt()
require.NoError(t, err)
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestExpectError(t *testing.T) {
testVector := map[string]error{
"9922334455000000000000000000000000000000": nil,
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
"42949672950000000000000000000000000000000": nil,
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
"992233445500000000000000000000000000000000": ErrFloat40E31,
"343597383670000000000000000000000000000000": nil,
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383700000000000000000000000000000000": ErrFloat40E31,
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
_, err := NewFloat40(bi)
assert.Equal(t, testVector[test], err)
}
}
func BenchmarkFloat40(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Can not convert string to *big.Int")
}
return bigInt
}
type pair struct {
Float40 Float40
BigInt *big.Int
}
testVector := []pair{
{6*0x800000000 + 123, newBigInt("123000000")},
{2*0x800000000 + 4545, newBigInt("454500")},
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
{0x000000000, newBigInt("0")},
{0x800000000, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1025")},
{0x800000000 + 1, newBigInt("10")},
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
}
b.Run("NewFloat40()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float40.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = testVector[i%len(testVector)].Float40.BigInt()
}
})
}

View File

@@ -11,11 +11,18 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub"
)
const (
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
L1UserTxBytesLen = 72
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
L1CoordinatorTxBytesLen = 101
)
// L1Tx is a struct that represents a L1 tx
type L1Tx struct {
// Stored in DB: mandatory fileds
// TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of:
// TxID (12 bytes) for L1Tx is:
// bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type:
@@ -172,38 +179,45 @@ func (tx L1Tx) Tx() Tx {
// [ 8 bits ] empty (userFee) // 1 byte
// [ 40 bits ] empty (nonce) // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
// b[0:7] empty: no ToBJJSign, no fee, no nonce
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[11:17], toIdxBytes[:])
copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[17:23], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[25:29], SignatureConstantBytes[:])
copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
}
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat40 | Fee ]
// [ fromIdx | toIdx | amountFloat16 | Fee ]
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
@@ -217,17 +231,13 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
if tx.EffectiveAmount != nil {
amountFloat40, err := NewFloat40(tx.EffectiveAmount)
amountFloat16, err := NewFloat16(tx.EffectiveAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
}
// fee = 0 (as is L1Tx)
// fee = 0 (as is L1Tx) b[10:11]
return b[:], nil
}
@@ -237,7 +247,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength]
amountBytes := b[idxLen*2 : idxLen*2+2]
l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
@@ -250,8 +260,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
return nil, tracerr.Wrap(err)
}
l1tx.ToIdx = toIdx
l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
return &l1tx, err
l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt()
return &l1tx, nil
}
// BytesGeneric returns the generic representation of a L1Tx. This method is
@@ -259,7 +269,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
var b [RollupConstL1UserTotalBytes]byte
var b [L1UserTxBytesLen]byte
copy(b[0:20], tx.FromEthAddr.Bytes())
if tx.FromBJJ != EmptyBJJComp {
pkCompL := tx.FromBJJ
@@ -271,33 +281,22 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
return nil, tracerr.Wrap(err)
}
copy(b[52:58], fromIdxBytes[:])
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes()
copy(b[58:60], depositAmountFloat16.Bytes())
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[58:63], depositAmountFloat40Bytes)
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[63:68], amountFloat40Bytes)
copy(b[68:72], tx.TokenID.Bytes())
copy(b[60:62], amountFloat16.Bytes())
copy(b[62:66], tx.TokenID.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[72:78], toIdxBytes[:])
copy(b[66:72], toIdxBytes[:])
return b[:], nil
}
@@ -314,7 +313,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
if tx.UserOrigin {
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
}
var b [RollupConstL1CoordinatorTotalBytes]byte
var b [L1CoordinatorTxBytesLen]byte
v := compressedSignatureBytes[64]
s := compressedSignatureBytes[32:64]
r := compressedSignatureBytes[0:32]
@@ -330,7 +329,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
// L1UserTxFromBytes decodes a L1Tx from []byte
func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
if len(b) != RollupConstL1UserTotalBytes {
if len(b) != L1UserTxBytesLen {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
}
@@ -348,19 +347,13 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return nil, tracerr.Wrap(err)
}
tx.FromIdx = fromIdx
tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt()
tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt()
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
tx.TokenID, err = TokenIDFromBytes(b[62:66])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.Amount, err = Float40FromBytes(b[63:68]).BigInt()
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.TokenID, err = TokenIDFromBytes(b[68:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.ToIdx, err = IdxFromBytes(b[72:78])
tx.ToIdx, err = IdxFromBytes(b[66:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -375,7 +368,7 @@ func signHash(data []byte) []byte {
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != RollupConstL1CoordinatorTotalBytes {
if len(b) != L1CoordinatorTxBytesLen {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
}

View File

@@ -50,110 +50,64 @@ func TestNewL1CoordinatorTx(t *testing.T) {
}
func TestL1TxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation (using
// PoolL2Tx values)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{
FromIdx: (1 << 48) - 1,
ToIdx: (1 << 48) - 1,
Amount: amount,
TokenID: (1 << 32) - 1,
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
chainID := uint16(0)
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err)
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 0,
ToIdx: 0,
Amount: big.NewInt(0),
TokenID: 0,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = L1Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
assert.NoError(t, err)
expectedStr = "7b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
// test vector value generated from javascript implementation
expectedStr := "7307597389635308713748674793997299267459594577423"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestBytesDataAvailability(t *testing.T) {
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{
ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
EffectiveAmount: amount,
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err := tx.BytesDataAvailability(16)
txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData))
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
tx = L1Tx{
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
EffectiveAmount: amount,
FromIdx: 2,
ToIdx: 3,
EffectiveAmount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
}
func TestL1TxFromDataAvailability(t *testing.T) {
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
}
txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err)
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 0,
FromIdx: 0,
EffectiveAmount: big.NewInt(0),
FromIdx: 2,
ToIdx: 3,
EffectiveAmount: big.NewInt(4),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 635,
FromIdx: 296,
EffectiveAmount: big.NewInt(1000000000000000000),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
@@ -218,10 +172,12 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
UserOrigin: true,
}
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
require.NoError(t, err)
encodedData, err := l1Tx.BytesUser()
require.NoError(t, err)
expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
assert.Equal(t, expected, hex.EncodeToString(encodedData))
assert.Equal(t, expected, encodedData)
}
func TestL1CoordinatorTxByteParsers(t *testing.T) {

View File

@@ -89,15 +89,11 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
// TokenID
b = append(b, tx.TokenID.Bytes()[:]...)
// Amount
amountFloat40, err := NewFloat40(tx.Amount)
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return txID, tracerr.Wrap(err)
}
b = append(b, amountFloat40Bytes...)
b = append(b, amountFloat16.Bytes()...)
// Nonce
nonceBytes, err := tx.Nonce.Bytes()
if err != nil {
@@ -174,11 +170,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
}
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat40 | Fee ]
// [ fromIdx | toIdx | amountFloat16 | Fee ]
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
@@ -192,16 +188,13 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
}
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
amountFloat40, err := NewFloat40(tx.Amount)
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
b[idxLen*2+2] = byte(tx.Fee)
return b[:], nil
}
@@ -226,10 +219,7 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err)
}
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt()
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt()
tx.Fee = FeeSelector(b[idxLen*2+2])
return tx, nil
}

View File

@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err := NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 87654,
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 1,
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 999,
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String())
l2Tx = &L2Tx{
FromIdx: 4444,
@@ -90,85 +90,25 @@ func TestNewL2Tx(t *testing.T) {
}
l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String())
}
func TestL2TxByteParsers(t *testing.T) {
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
amount := new(big.Int)
amount.SetString("79000000", 10)
l2Tx := &L2Tx{
ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected := "ffffffffffffffffffff"
encodedData, err := l2Tx.BytesDataAvailability(16)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected = "ffffffffffffffffffffffffffff"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 0,
Amount: big.NewInt(0),
Fee: 0,
}
expected = "0000000000000000000000000000"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 1061,
Amount: big.NewInt(420000000000),
Fee: 127,
}
expected = "000004250000000010fa56ea007f"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 256,
Amount: amount,
FromIdx: 257,
Amount: big.NewInt(79000000),
Fee: 201,
}
expected = "00000101000001000004b571c0c9"
encodedData, err = l2Tx.BytesDataAvailability(32)
// Data from the compatibility test
expected := "00000101000001002b16c9"
encodedData, err := l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
}

View File

@@ -36,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"`
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16
Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"`
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -122,13 +122,18 @@ func (tx *PoolL2Tx) SetID() error {
// [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
toBJJSign := byte(0)
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -144,18 +149,19 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[11:17], toIdxBytes[:])
copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[17:23], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[25:29], SignatureConstantBytes[:])
copy(b[19:25], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
@@ -164,9 +170,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
// TxCompressedDataEmpty calculates the TxCompressedData of an empty
// transaction
func TxCompressedDataEmpty(chainID uint16) *big.Int {
var b [29]byte
binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[25:29], SignatureConstantBytes[:])
var b [31]byte
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi
}
@@ -176,24 +182,19 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 40 bits ] amountFloat40 // 5 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil {
tx.Amount = big.NewInt(0)
}
amountFloat40, err := NewFloat40(tx.Amount)
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
var b [25]byte
toBJJSign := byte(0)
if tx.ToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -209,17 +210,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:16], amountFloat40Bytes)
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[16:22], toIdxBytes[:])
copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[22:28], fromIdxBytes[:])
copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
@@ -235,24 +236,19 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 8 bits ] rqUserFee // 1 byte
// [ 40 bits ] rqNonce // 5 bytes
// [ 32 bits ] rqTokenID // 4 bytes
// [ 40 bits ] rqAmountFloat40 // 5 bytes
// [ 16 bits ] rqAmountFloat16 // 2 bytes
// [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0)
}
amountFloat40, err := NewFloat40(tx.RqAmount)
amountFloat16, err := NewFloat16(tx.RqAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
var b [25]byte
rqToBJJSign := byte(0)
if tx.RqToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
@@ -268,17 +264,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.RqTokenID.Bytes())
copy(b[11:16], amountFloat40Bytes)
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.RqToIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[16:22], toIdxBytes[:])
copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.RqFromIdx.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[22:28], fromIdxBytes[:])
copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
@@ -291,22 +287,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
if err != nil {
return nil, tracerr.Wrap(err)
}
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
var e1B [25]byte
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(e1B[0:5], amountFloat40Bytes)
toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
copy(e1B[5:25], toEthAddr.Bytes())
e1 := new(big.Int).SetBytes(e1B[:])
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
@@ -318,7 +299,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
}
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp

View File

@@ -21,104 +21,80 @@ func TestNewPoolL2Tx(t *testing.T) {
}
poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
assert.NoError(t, err)
assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String())
}
func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
// test vectors values generated from javascript implementation
var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
_, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
func TestTxCompressedData(t *testing.T) {
chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
assert.NoError(t, err)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := PoolL2Tx{
FromIdx: (1 << 48) - 1,
ToIdx: (1 << 48) - 1,
Amount: amount,
TokenID: (1 << 32) - 1,
Nonce: (1 << 40) - 1,
Fee: (1 << 3) - 1,
ToBJJ: skPositive.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
require.NoError(t, err)
expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err := tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 0,
ToIdx: 0,
Amount: big.NewInt(0),
TokenID: 0,
Nonce: 0,
Fee: 0,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
assert.Equal(t, "0", txCompressedDataV2.String())
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = PoolL2Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
Nonce: 76,
Fee: 214,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
require.NoError(t, err)
expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedDataV2, err = tx.TxCompressedDataV2()
require.NoError(t, err)
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
Nonce: 4,
Fee: 5,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
FromIdx: 2,
ToIdx: 3,
TokenID: 4,
Nonce: 5,
Fee: 6,
ToBJJ: skPositive.Public().Compress(),
Amount: big.NewInt(4),
TokenID: 5,
Nonce: 6,
ToBJJ: sk.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
// using a different chainID
txCompressedData, err = tx.TxCompressedData(uint16(100))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err = tx.TxCompressedData(uint16(65535))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
RqFromIdx: 7,
RqToIdx: 8,
RqAmount: big.NewInt(9),
RqTokenID: 10,
RqNonce: 11,
RqFee: 12,
RqToBJJ: sk.Public().Compress(),
}
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, rqTxCompressedData.String())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
}
func TestTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 7,
ToIdx: 8,
Amount: big.NewInt(9),
TokenID: 10,
Nonce: 11,
Fee: 12,
ToBJJ: sk.Public().Compress(),
}
txCompressedData, err := tx.TxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestRqTxCompressedDataV2(t *testing.T) {
@@ -137,16 +113,19 @@ func TestRqTxCompressedDataV2(t *testing.T) {
txCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err)
// test vector value generated from javascript implementation
expectedStr := "110248805340524920412994530176819463725852160917809517418728390663"
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestHashToSign(t *testing.T) {
chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{
FromIdx: 2,
ToIdx: 3,
@@ -157,7 +136,7 @@ func TestHashToSign(t *testing.T) {
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
}
func TestVerifyTxSignature(t *testing.T) {
@@ -177,7 +156,7 @@ func TestVerifyTxSignature(t *testing.T) {
}
toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err)
assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress()

View File

@@ -102,8 +102,6 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
// OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -114,8 +112,8 @@ type ZKInputs struct {
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
// account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float40
DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx]
// DepositAmountF encoded as float16
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
// FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -328,7 +326,6 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx)
@@ -479,8 +476,8 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -497,9 +494,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
}
b = append(b, l1TxsDataAvailability...)
// [MAX_TX*(2*NLevels + 48) bits] L2TxsData
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ {

View File

@@ -3,7 +3,6 @@ package config
import (
"fmt"
"io/ioutil"
"math/big"
"time"
"github.com/BurntSushi/toml"
@@ -52,42 +51,12 @@ type Coordinator struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"`
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the
// ProofServer while waiting for a particular status
ProofServerPollInterval Duration `validate:"required"`
// ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error
ForgeRetryInterval Duration `validate:"required"`
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay Duration `validate:"-"`
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay Duration `validate:"-"`
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"`
@@ -96,33 +65,19 @@ type Coordinator struct {
// SafetyPeriod is the number of batches after which
// non-pending L2Txs are deleted from the pool
SafetyPeriod common.BatchNum `validate:"required"`
// MaxTxs is the maximum number of pending L2Txs that can be
// stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
// MaxTxs is the number of L2Txs that once reached triggers
// deletion of old L2Txs
MaxTxs uint32 `validate:"required"`
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"`
// PurgeBatchDelay is the delay between batches to purge
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
// PurgeBatchDelay is the delay between batches to purge outdated transactions
PurgeBatchDelay int64 `validate:"required"`
// InvalidateBatchDelay is the delay between batches to mark
// invalid transactions due to nonce lower than the account
// nonce.
// InvalidateBatchDelay is the delay between batches to mark invalid transactions
InvalidateBatchDelay int64 `validate:"required"`
// PurgeBlockDelay is the delay between blocks to purge
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
// PurgeBlockDelay is the delay between blocks to purge outdated transactions
PurgeBlockDelay int64 `validate:"required"`
// InvalidateBlockDelay is the delay between blocks to mark
// invalid transactions due to nonce lower than the account
// nonce.
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions
InvalidateBlockDelay int64 `validate:"required"`
} `validate:"required"`
TxSelector struct {
@@ -135,6 +90,7 @@ type Coordinator struct {
} `validate:"required"`
ServerProofs []ServerProof `validate:"required"`
Circuit struct {
// VerifierIdx uint8 `validate:"required"`
// MaxTx is the maximum number of txs supported by the circuit
MaxTx int64 `validate:"required"`
// NLevels is the maximum number of merkle tree levels
@@ -142,13 +98,12 @@ type Coordinator struct {
NLevels int64 `validate:"required"`
} `validate:"required"`
EthClient struct {
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int `validate:"required"`
// GasPriceIncPerc is the percentage increase of gas price set
// in an ethereum transaction from the suggested gas price by
// the ehtereum node
GasPriceIncPerc int64
// CallGasLimit is the default gas limit set for ethereum
// calls, except for methods where a particular gas limit is
// harcoded because it's known to be a big value
CallGasLimit uint64 `validate:"required"`
// GasPriceDiv is the gas price division
GasPriceDiv uint64 `validate:"required"`
// CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager
CheckLoopInterval Duration `validate:"required"`
@@ -158,13 +113,6 @@ type Coordinator struct {
// AttemptsDelay is delay between attempts do do an eth client
// RPC call
AttemptsDelay Duration `validate:"required"`
// TxResendTimeout is the timeout after which a non-mined
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept
Keystore struct {
// Path to the keystore
@@ -184,10 +132,6 @@ type Coordinator struct {
// LightScrypt if set, uses light parameters for the ethereum
// keystore encryption algorithm.
LightScrypt bool
// RollupVerifierIndex is the index of the verifier to use in
// the Rollup smart contract. The verifier chosen by index
// must match with the Circuit parameters.
RollupVerifierIndex *int
}
}

View File

@@ -47,8 +47,6 @@ type Debug struct {
MineBlockNum int64
// SendBlockNum is the blockNum when the batch was sent to ethereum
SendBlockNum int64
// ResendNum is the number of times the tx has been resent
ResendNum int
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
// was scheduled
LastScheduledL1BatchBlockNum int64
@@ -66,17 +64,10 @@ type Debug struct {
// StartToSendDelay is the delay between starting a batch and sending
// it to ethereum, in seconds
StartToSendDelay float64
// StartToMineDelay is the delay between starting a batch and having
// it mined in seconds
StartToMineDelay float64
// SendToMineDelay is the delay between sending a batch tx and having
// it mined in seconds
SendToMineDelay float64
}
// BatchInfo contans the Batch information
type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum
ServerProof prover.Client
ZKInputs *common.ZKInputs
@@ -92,14 +83,7 @@ type BatchInfo struct {
ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo
EthTx *types.Transaction
EthTxErr error
// SendTimestamp the time of batch sent to ethereum
SendTimestamp time.Time
Receipt *types.Receipt
// Fail is true if:
// - The receipt status is failed
// - A previous parent batch is failed
Fail bool
Debug Debug
}

View File

@@ -3,8 +3,8 @@ package coordinator
import (
"context"
"fmt"
"math/big"
"os"
"strings"
"sync"
"time"
@@ -24,8 +24,6 @@ import (
var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
)
const (
@@ -44,64 +42,18 @@ type Config struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
SendBatchBlocksMarginCheck int64
// EthClientAttempts is the number of attempts to do an eth client RPC
// call before giving up
EthClientAttempts int
// ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error
ForgeRetryInterval time.Duration
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay time.Duration
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay time.Duration
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval time.Duration
// EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call
EthClientAttemptsDelay time.Duration
// EthTxResendTimeout is the timeout after which a non-mined ethereum
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int
// GasPriceIncPerc is the percentage increase of gas price set in an
// ethereum transaction from the suggested gas price by the ehtereum
// node
GasPriceIncPerc int64
// TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration
@@ -109,8 +61,6 @@ type Config struct {
// in JSON in every step/update of the pipeline
DebugBatchPath string
Purger PurgerCfg
// VerifierIdx is the index of the verifier contract registered in the
// smart contract
VerifierIdx uint8
TxProcessorConfig txprocessor.Config
}
@@ -124,17 +74,10 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
}
}
type fromBatch struct {
BatchNum common.BatchNum
ForgerAddr ethCommon.Address
StateRoot *big.Int
}
// Coordinator implements the Coordinator type
type Coordinator struct {
// State
pipelineNum int // Pipeline sequential number. The first pipeline is 1
pipelineFromBatch fromBatch // batch from which we started the pipeline
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
provers []prover.Client
consts synchronizer.SCConsts
vars synchronizer.SCVariables
@@ -154,7 +97,6 @@ type Coordinator struct {
cancel context.CancelFunc
pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
purger *Purger
txManager *TxManager
@@ -197,12 +139,7 @@ func NewCoordinator(cfg Config,
ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{
pipelineNum: 0,
pipelineFromBatch: fromBatch{
BatchNum: 0,
ForgerAddr: ethCommon.Address{},
StateRoot: big.NewInt(0),
},
pipelineBatchNum: -1,
provers: serverProofs,
consts: *scConsts,
vars: *initSCVars,
@@ -246,9 +183,8 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
}
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
c.pipelineNum++
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
}
// MsgSyncBlock indicates an update to the Synchronizer stats
@@ -269,9 +205,6 @@ type MsgSyncReorg struct {
// MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct {
Reason string
// FailedBatchNum indicates the first batchNum that failed in the
// pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum
}
// SendMsg is a thread safe method to pass a message to the Coordinator
@@ -282,36 +215,27 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
}
}
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
if update.Auction != nil {
vars.Auction = *update.Auction
}
if update.WDelayer != nil {
vars.WDelayer = *update.WDelayer
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&c.vars, vars)
if vars.Rollup != nil {
c.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
c.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
c.vars.WDelayer = *vars.WDelayer
}
}
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
return false
}
var slot *common.Slot
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
slot = currentSlot
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
slot = nextSlot
} else {
log.Warnw("canForge: requested blockNum is outside current and next slot",
log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
"blockNum", blockNum, "currentSlot", currentSlot,
"nextSlot", nextSlot,
)
@@ -320,23 +244,16 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
anyoneForge := false
if !slot.ForgerCommitment &&
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
"block", blockNum)
anyoneForge = true
}
if slot.Forger == addr || anyoneForge {
return true
}
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
return false
}
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction,
@@ -345,39 +262,21 @@ func (c *Coordinator) canForge() bool {
}
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
nextBlock := c.stats.Eth.LastBlock.Num + 1
canForge := c.canForgeAt(nextBlock)
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
}
canForge := c.canForge()
if c.pipeline == nil {
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugf("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge {
if canForge {
log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
fromBatch := fromBatch{
BatchNum: stats.Sync.LastBatch.BatchNum,
ForgerAddr: stats.Sync.LastBatch.ForgerAddr,
StateRoot: stats.Sync.LastBatch.StateRoot,
}
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
fromBatch.BatchNum = c.lastNonFailedBatchNum
fromBatch.ForgerAddr = c.cfg.ForgerAddress
fromBatch.StateRoot = big.NewInt(0)
}
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
batchNum := common.BatchNum(stats.Sync.LastBatch)
var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err)
}
c.pipelineFromBatch = fromBatch
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil {
c.pipeline = nil
return tracerr.Wrap(err)
}
c.pipelineBatchNum = batchNum
}
} else {
if !canForge {
@@ -387,12 +286,25 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
}
}
if c.pipeline == nil {
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
// Mark invalid in Pool due to forged L2Txs
// for _, batch := range batches {
// if err := c.l2DB.InvalidateOldNonces(
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
// return err
// }
// }
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err)
}
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
int64(stats.Sync.LastBatch.BatchNum)); err != nil {
}
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err)
}
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err)
}
}
@@ -419,43 +331,33 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
if c.pipeline != nil {
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
}
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
(c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil ||
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) {
// There's been a reorg and the batch state root from which the
// pipeline was started has changed (probably because it was in
// a block that was discarded), and it was sent by a different
// coordinator than us. That batch may never be in the main
// chain, so we stop the pipeline (it will be started again
// once the node is in sync).
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
// There's been a reorg and the batch from which the pipeline
// was started was in a block that was discarded. The batch
// may not be in the main chain, so we stop the pipeline as a
// precaution (it will be started again once the node is in
// sync).
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
"sync.LastBatch", c.stats.Sync.LastBatch,
"c.pipelineBatchNum", c.pipelineBatchNum)
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
return tracerr.Wrap(err)
}
}
return nil
}
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
// the next pipeline will start from the last state of the synchronizer,
// otherwise, it will state from failedBatchNum-1.
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err)
}
if c.pipeline != nil {
c.pipeline.Stop(c.ctx)
c.pipeline = nil
}
if err := c.l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(err)
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
// TODO: Check that we are in a slot in which we can't forge
}
c.lastNonFailedBatchNum = batchNum
return nil
}
@@ -471,7 +373,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
}
case MsgStopPipeline:
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
}
default:
@@ -494,7 +396,7 @@ func (c *Coordinator) Start() {
c.wg.Add(1)
go func() {
waitCh := time.After(longWaitDuration)
waitDuration := longWaitDuration
for {
select {
case <-c.ctx.Done():
@@ -506,24 +408,23 @@ func (c *Coordinator) Start() {
continue
} else if err != nil {
log.Errorw("Coordinator.handleMsg", "err", err)
waitCh = time.After(c.cfg.SyncRetryInterval)
waitDuration = c.cfg.SyncRetryInterval
continue
}
waitCh = time.After(longWaitDuration)
case <-waitCh:
waitDuration = longWaitDuration
case <-time.After(waitDuration):
if !c.stats.Synced() {
waitCh = time.After(longWaitDuration)
waitDuration = longWaitDuration
continue
}
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
waitCh = time.After(longWaitDuration)
continue
} else if err != nil {
log.Errorw("Coordinator.syncStats", "err", err)
waitCh = time.After(c.cfg.SyncRetryInterval)
waitDuration = c.cfg.SyncRetryInterval
continue
}
waitCh = time.After(longWaitDuration)
waitDuration = longWaitDuration
}
}
}()

View File

@@ -97,8 +97,7 @@ func newTestModules(t *testing.T) modules {
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)
deleteme = append(deleteme, syncDBPath)
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 48})
syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
assert.NoError(t, err)
pass := os.Getenv("POSTGRES_PASS")
@@ -261,8 +260,8 @@ func TestCoordinatorFlow(t *testing.T) {
var stats synchronizer.Stats
stats.Eth.LastBlock = *ethClient.CtlLastBlock()
stats.Sync.LastBlock = stats.Eth.LastBlock
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch = stats.Eth.LastBatch
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
require.NoError(t, err)
var slot common.Slot
@@ -279,7 +278,7 @@ func TestCoordinatorFlow(t *testing.T) {
// Copy stateDB to synchronizer if there was a new batch
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
if stats.Sync.LastBatch.BatchNum != 0 {
if stats.Sync.LastBatch != 0 {
if _, err := os.Stat(dest); os.IsNotExist(err) {
log.Infow("Making pebble checkpoint for sync",
"source", source, "dest", dest)

View File

@@ -2,7 +2,6 @@ package coordinator
import (
"context"
"database/sql"
"fmt"
"math/big"
"sync"
@@ -25,29 +24,19 @@ type statsVars struct {
Vars synchronizer.SCVariablesPtr
}
type state struct {
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
lastSlotForged int64
}
// Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct {
num int
cfg Config
consts synchronizer.SCConsts
// state
state state
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
started bool
rw sync.RWMutex
errAtBatchNum common.BatchNum
lastForgeTime time.Time
proversPool *ProversPool
provers []prover.Client
coord *Coordinator
txManager *TxManager
historyDB *historydb.HistoryDB
l2DB *l2db.L2DB
@@ -64,28 +53,14 @@ type Pipeline struct {
cancel context.CancelFunc
}
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
p.rw.Lock()
defer p.rw.Unlock()
p.errAtBatchNum = batchNum
}
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
p.rw.RLock()
defer p.rw.RUnlock()
return p.errAtBatchNum
}
// NewPipeline creates a new Pipeline
func NewPipeline(ctx context.Context,
cfg Config,
num int, // Pipeline sequential number
historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB,
txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder,
purger *Purger,
coord *Coordinator,
txManager *TxManager,
provers []prover.Client,
scConsts *synchronizer.SCConsts,
@@ -104,7 +79,6 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
}
return &Pipeline{
num: num,
cfg: cfg,
historyDB: historyDB,
l2DB: l2DB,
@@ -113,7 +87,6 @@ func NewPipeline(ctx context.Context,
provers: provers,
proversPool: proversPool,
purger: purger,
coord: coord,
txManager: txManager,
consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen),
@@ -131,68 +104,33 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.state = state{
batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0,
lastSlotForged: -1,
}
p.batchNum = batchNum
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
p.stats = *stats
p.vars = *vars
p.lastScheduledL1BatchBlockNum = 0
// Reset the StateDB in TxSelector and BatchBuilder from the
// synchronizer only if the checkpoint we reset from either:
// a. Doesn't exist in the TxSelector/BatchBuilder
// b. The batch has already been synced by the synchronizer and has a
// different MTRoot than the BatchBuilder
// Otherwise, reset from the local checkpoint.
// First attempt to reset from local checkpoint if such checkpoint exists
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
err := p.txSelector.Reset(p.batchNum)
if err != nil {
return tracerr.Wrap(err)
}
fromSynchronizerTxSelector := !existsTxSelector
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
return tracerr.Wrap(err)
}
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
err = p.batchBuilder.Reset(p.batchNum, true)
if err != nil {
return tracerr.Wrap(err)
}
fromSynchronizerBatchBuilder := !existsBatchBuilder
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
return tracerr.Wrap(err)
}
// After reset, check that if the batch exists in the historyDB, the
// stateRoot matches with the local one, if not, force a reset from
// synchronizer
batch, err := p.historyDB.GetBatch(p.state.batchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
// nothing to do
} else if err != nil {
return tracerr.Wrap(err)
} else {
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
if batch.StateRoot.Cmp(localStateRoot) != 0 {
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
// StateRoot from synchronizer doesn't match StateRoot
// from batchBuilder, force a reset from synchronizer
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
}
}
return nil
}
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&p.vars, vars)
if vars.Rollup != nil {
p.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
p.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
p.vars.WDelayer = *vars.WDelayer
}
}
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
@@ -205,11 +143,8 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
} else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else {
log.Errorw("forgeBatch", "err", err)
}
@@ -253,7 +188,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Add(1)
go func() {
waitCh := time.After(zeroDuration)
waitDuration := zeroDuration
for {
select {
case <-p.ctx.Done():
@@ -263,42 +198,18 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case statsVars := <-p.statsVarsCh:
p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars)
case <-waitCh:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
waitCh = time.After(p.cfg.ForgeRetryInterval)
case <-time.After(waitDuration):
batchNum = p.batchNum + 1
if batchInfo, err := p.handleForgeBatch(p.ctx, batchNum); err != nil {
waitDuration = p.cfg.SyncRetryInterval
continue
}
batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue
} else if err != nil {
p.setErrAtBatchNum(batchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum,
})
waitCh = time.After(p.cfg.ForgeRetryInterval)
continue
}
p.lastForgeTime = time.Now()
p.state.batchNum = batchNum
} else {
p.batchNum = batchNum
select {
case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done():
}
waitCh = time.After(zeroDuration)
}
}
}
}()
@@ -312,28 +223,16 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Done()
return
case batchInfo := <-batchChSentServerProof:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
err := p.waitServerProof(p.ctx, batchInfo)
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
batchInfo.ServerProof = nil
if p.ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("waitServerProof", "err", err)
p.setErrAtBatchNum(batchInfo.BatchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.waitServerProof: %v", err),
FailedBatchNum: batchInfo.BatchNum,
})
continue
}
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
// batchInfo.ServerProof = nil
p.txManager.AddBatch(p.ctx, batchInfo)
}
}
@@ -383,10 +282,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil {
return nil, tracerr.Wrap(err)
}
// Structure to accumulate data and metadata of the batch
now := time.Now()
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
batchInfo.Debug.StartTimestamp = now
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
batchInfo.Debug.StartTimestamp = time.Now()
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
selectionCfg := &txselector.SelectionConfig{
@@ -400,26 +298,22 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var auths [][]byte
var coordIdxs []common.Idx
// Check if the slot is not yet fulfilled
slotCommitted := false
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, errForgeBeforeDelay
}
// 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
defer func() {
// If there's no error, update the parameters related
// to the last L1Batch forged
if err == nil {
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.lastForgeL1TxsNum++
}
}()
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced)
}
// 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -438,43 +332,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
l1UserTxsExtra = nil
}
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the L1UserTxs in the queue following
// the one we are trying to forge.
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues and forge the
// L1UserTxs in the future. Otherwise, skip.
if len(nextL1UserTxs) == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, errForgeNoTxsBeforeDelay
}
}
if batchInfo.L1Batch {
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
}
// 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs
@@ -519,8 +376,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
p.cfg.debugBatchStore(batchInfo)
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil
}
@@ -542,12 +397,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
// Take the lastL1BatchBlockNum as the biggest between the last
// scheduled one, and the synchronized one.
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
}
// Set Debug information
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
batchInfo.Debug.L1BatchBlockScheduleDeadline =

View File

@@ -25,14 +25,6 @@ import (
"github.com/stretchr/testify/require"
)
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestPipelineShouldL1L2Batch(t *testing.T) {
ethClientSetup := test.NewClientSetupExample()
ethClientSetup.ChainID = big.NewInt(int64(chainID))
@@ -85,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
//
// Scheduled L1Batch
//
pipeline.state.lastScheduledL1BatchBlockNum = startBlock
pipeline.lastScheduledL1BatchBlockNum = startBlock
stats.Sync.LastL1BatchBlock = startBlock - 10
// We are are one block before the timeout range * 0.5
@@ -136,11 +128,6 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
blocks, err := tc.GenerateBlocksFromInstructions(set)
require.NoError(t, err)
require.NotNil(t, blocks)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("0")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
ethAddTokens(blocks, ethClient)
err = ethClient.CtlAddBlocks(blocks)
@@ -185,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
// users with positive balances
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
syncStats := sync.Stats()
batchNum := syncStats.Sync.LastBatch.BatchNum
batchNum := common.BatchNum(syncStats.Sync.LastBatch)
syncSCVars := sync.SCVars()
pipeline, err := coord.newPipeline(ctx)

View File

@@ -13,23 +13,13 @@ import (
// PurgerCfg is the purger configuration
type PurgerCfg struct {
// PurgeBatchDelay is the delay between batches to purge outdated
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
// PurgeBatchDelay is the delay between batches to purge outdated transactions
PurgeBatchDelay int64
// InvalidateBatchDelay is the delay between batches to mark invalid
// transactions due to nonce lower than the account nonce.
// InvalidateBatchDelay is the delay between batches to mark invalid transactions
InvalidateBatchDelay int64
// PurgeBlockDelay is the delay between blocks to purge outdated
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
// PurgeBlockDelay is the delay between blocks to purge outdated transactions
PurgeBlockDelay int64
// InvalidateBlockDelay is the delay between blocks to mark invalid
// transactions due to nonce lower than the account nonce.
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions
InvalidateBlockDelay int64
}

View File

@@ -28,14 +28,12 @@ func newStateDB(t *testing.T) *statedb.LocalStateDB {
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)
deleteme = append(deleteme, syncDBPath)
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: syncDBPath, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 48})
syncStateDB, err := statedb.NewStateDB(syncDBPath, 128, statedb.TypeSynchronizer, 48)
assert.NoError(t, err)
stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
require.NoError(t, err)
deleteme = append(deleteme, stateDBPath)
stateDB, err := statedb.NewLocalStateDB(statedb.Config{Path: stateDBPath, Keep: 128,
Type: statedb.TypeTxSelector, NLevels: 0}, syncStateDB)
stateDB, err := statedb.NewLocalStateDB(stateDBPath, 128, syncStateDB, statedb.TypeTxSelector, 0)
require.NoError(t, err)
return stateDB
}

View File

@@ -2,7 +2,6 @@ package coordinator
import (
"context"
"errors"
"fmt"
"math/big"
"time"
@@ -10,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db"
@@ -37,20 +35,12 @@ type TxManager struct {
vars synchronizer.SCVariables
statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum
minPipelineNum int
queue Queue
queue []*BatchInfo
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
lastSuccessBatch common.BatchNum
// lastPendingBatch common.BatchNum
// accNonce is the account nonce in the last mined block (due to mined txs)
accNonce uint64
// accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64
lastSentL1BatchBlockNum int64
lastPendingBatch common.BatchNum
lastSuccessNonce uint64
lastPendingNonce uint64
}
// NewTxManager creates a new TxManager
@@ -64,11 +54,19 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil {
return nil, tracerr.Wrap(err)
}
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil {
return nil, err
}
log.Infow("TxManager started", "nonce", accNonce)
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
if err != nil {
return nil, err
}
if lastSuccessNonce != lastPendingNonce {
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
lastSuccessNonce, lastPendingNonce))
}
log.Infow("TxManager started", "nonce", lastSuccessNonce)
return &TxManager{
cfg: *cfg,
ethClient: ethClient,
@@ -76,7 +74,6 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
coord: coord,
batchCh: make(chan *BatchInfo, queueLen),
statsVarsCh: make(chan statsVars, queueLen),
discardPipelineCh: make(chan int, queueLen),
account: accounts.Account{
Address: *address,
},
@@ -85,10 +82,8 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
vars: *initSCVars,
minPipelineNum: 0,
queue: NewQueue(),
accNonce: accNonce,
accNextNonce: accNonce,
lastSuccessNonce: lastSuccessNonce,
lastPendingNonce: lastPendingNonce,
}, nil
}
@@ -109,17 +104,16 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
}
}
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
// due to a reorg
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
select {
case t.discardPipelineCh <- pipelineNum:
case <-ctx.Done():
}
}
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&t.vars, vars)
if vars.Rollup != nil {
t.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
t.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
t.vars.WDelayer = *vars.WDelayer
}
}
// NewAuth generates a new auth object for an ethereum transaction
@@ -128,14 +122,10 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
if err != nil {
return nil, tracerr.Wrap(err)
}
if t.cfg.GasPriceIncPerc != 0 {
inc := new(big.Int).Set(gasPrice)
inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc))
// nolint reason: to calculate percentages we use 100
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd
const gasPriceDiv = 100
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
gasPrice.Add(gasPrice, inc)
}
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
@@ -144,13 +134,6 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
}
auth.Value = big.NewInt(0) // in wei
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
// This requires a function that estimates the gas usage of the
// forgeBatch call based on the contents of the ForgeBatch args:
// - length of l2txs
// - length of l1Usertxs
// - length of l1CoordTxs with authorization signature
// - length of l1CoordTxs without authoriation signature
// - etc.
auth.GasLimit = 1000000
auth.GasPrice = gasPrice
auth.Nonce = nil
@@ -158,75 +141,29 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return auth, nil
}
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
nextBlock := t.stats.Eth.LastBlock.Num + 1
if !t.canForgeAt(nextBlock) {
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
}
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
}
margin := t.cfg.SendBatchBlocksMarginCheck
if margin != 0 {
if !t.canForgeAt(nextBlock + margin) {
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
margin, nextBlock))
}
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
margin, nextBlock))
}
}
return nil
}
func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percentages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
// TODO: Check if we can forge in the next blockNum, abort if we can't
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = time.Now()
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
var ethTx *types.Transaction
var err error
auth, err := t.NewAuth(ctx)
if err != nil {
return tracerr.Wrap(err)
}
auth.Nonce = big.NewInt(int64(t.accNextNonce))
if resend {
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
}
auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
t.lastPendingNonce++
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
auth.GasPrice, t.cfg.MaxGasPrice))
}
// RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
if errors.Is(err, core.ErrNonceTooLow) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
attempt--
} else if errors.Is(err, core.ErrNonceTooHigh) {
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt--
} else if errors.Is(err, core.ErrUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if errors.Is(err, core.ErrReplaceUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if err != nil {
if err != nil {
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
// "block", t.stats.Eth.LastBlock.Num+1)
// return tracerr.Wrap(err)
// }
log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum)
@@ -242,29 +179,10 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
if err != nil {
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
}
if !resend {
t.accNextNonce = auth.Nonce.Uint64() + 1
}
batchInfo.EthTx = ethTx
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
now := time.Now()
batchInfo.SendTimestamp = now
if resend {
batchInfo.Debug.ResendNum++
}
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
t.cfg.debugBatchStore(batchInfo)
if !resend {
if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
t.lastPendingBatch = batchInfo.BatchNum
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err)
}
@@ -307,20 +225,13 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
receipt := batchInfo.Receipt
if receipt != nil {
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
t.accNonce = batchInfo.EthTx.Nonce() + 1
}
if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err)
batchInfo.EthTxErr = err
if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1
}
t.cfg.debugBatchStore(batchInfo)
return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -328,17 +239,6 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
batchInfo.Debug.StartBlockNum
if batchInfo.Debug.StartToMineDelay == 0 {
if block, err := t.ethClient.EthBlockByNumber(ctx,
receipt.BlockNumber.Int64()); err != nil {
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
} else {
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.SendTimestamp).Seconds()
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
}
}
t.cfg.debugBatchStore(batchInfo)
if batchInfo.BatchNum > t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum
@@ -350,73 +250,10 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
return nil, nil
}
// TODO:
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
// Queue of BatchInfos
type Queue struct {
list []*BatchInfo
// nonceByBatchNum map[common.BatchNum]uint64
next int
}
// NewQueue returns a new queue
func NewQueue() Queue {
return Queue{
list: make([]*BatchInfo, 0),
// nonceByBatchNum: make(map[common.BatchNum]uint64),
next: 0,
}
}
// Len is the length of the queue
func (q *Queue) Len() int {
return len(q.list)
}
// At returns the BatchInfo at position (or nil if position is out of bounds)
func (q *Queue) At(position int) *BatchInfo {
if position >= len(q.list) {
return nil
}
return q.list[position]
}
// Next returns the next BatchInfo (or nil if queue is empty)
func (q *Queue) Next() (int, *BatchInfo) {
if len(q.list) == 0 {
return 0, nil
}
defer func() { q.next = (q.next + 1) % len(q.list) }()
return q.next, q.list[q.next]
}
// Remove removes the BatchInfo at position
func (q *Queue) Remove(position int) {
// batchInfo := q.list[position]
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
q.list = append(q.list[:position], q.list[position+1:]...)
if len(q.list) == 0 {
q.next = 0
} else {
q.next = position % len(q.list)
}
}
// Push adds a new BatchInfo
func (q *Queue) Push(batchInfo *BatchInfo) {
q.list = append(q.list, batchInfo)
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
}
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
// nonce, ok := q.nonceByBatchNum[batchNum]
// return nonce, ok
// }
// Run the TxManager
func (t *TxManager) Run(ctx context.Context) {
waitCh := time.After(longWaitDuration)
next := 0
waitDuration := longWaitDuration
var statsVars statsVars
select {
@@ -426,7 +263,7 @@ func (t *TxManager) Run(ctx context.Context) {
t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars)
log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
for {
select {
@@ -436,27 +273,8 @@ func (t *TxManager) Run(ctx context.Context) {
case statsVars := <-t.statsVarsCh:
t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars)
case pipelineNum := <-t.discardPipelineCh:
t.minPipelineNum = pipelineNum + 1
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
}
case batchInfo := <-t.batchCh:
if batchInfo.PipelineNum < t.minPipelineNum {
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
}
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
log.Warnw("TxManager: shouldSend", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
continue
}
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
continue
} else if err != nil {
// If we reach here it's because our ethNode has
@@ -464,20 +282,19 @@ func (t *TxManager) Run(ctx context.Context) {
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch send failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
continue
}
t.queue.Push(batchInfo)
waitCh = time.After(t.cfg.TxManagerCheckInterval)
case <-waitCh:
queuePosition, batchInfo := t.queue.Next()
if batchInfo == nil {
waitCh = time.After(longWaitDuration)
t.queue = append(t.queue, batchInfo)
waitDuration = t.cfg.TxManagerCheckInterval
case <-time.After(waitDuration):
if len(t.queue) == 0 {
waitDuration = longWaitDuration
continue
}
current := next
next = (current + 1) % len(t.queue)
batchInfo := t.queue[current]
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue
} else if err != nil { //nolint:staticcheck
@@ -487,8 +304,7 @@ func (t *TxManager) Run(ctx context.Context) {
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
}
confirm, err := t.handleReceipt(ctx, batchInfo)
@@ -496,106 +312,32 @@ func (t *TxManager) Run(ctx context.Context) {
continue
} else if err != nil { //nolint:staticcheck
// Transaction was rejected
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
t.queue = append(t.queue[:current], t.queue[current+1:]...)
if len(t.queue) == 0 {
next = 0
} else {
next = current % len(t.queue)
}
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
continue
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
}
now := time.Now()
if !t.cfg.EthNoReuseNonce && confirm == nil &&
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
continue
} else if err != nil {
// If we reach here it's because our ethNode has
// been unable to send the transaction to
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
continue
}
}
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
log.Debugw("TxManager: forgeBatch tx confirmed",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
t.queue.Remove(queuePosition)
log.Debugw("TxManager tx for RollupForgeBatch confirmed",
"batch", batchInfo.BatchNum)
t.queue = append(t.queue[:current], t.queue[current+1:]...)
if len(t.queue) == 0 {
next = 0
} else {
next = current % len(t.queue)
}
}
}
}
}
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
next := 0
for {
batchInfo := t.queue.At(next)
if batchInfo == nil {
break
}
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
return nil
} else if err != nil {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
next++
continue
}
confirm, err := t.handleReceipt(ctx, batchInfo)
if ctx.Err() != nil {
return nil
} else if err != nil {
// Transaction was rejected
if t.minPipelineNum <= batchInfo.PipelineNum {
t.minPipelineNum = batchInfo.PipelineNum + 1
}
t.queue.Remove(next)
continue
}
// If tx is pending but is from a cancelled pipeline, remove it
// from the queue
if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum {
t.queue.Remove(next)
continue
}
}
next++
}
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
if err != nil {
return err
}
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}
func (t *TxManager) canForgeAt(blockNum int64) bool {
// nolint reason: this function will be used in the future
//nolint:unused
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction,
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum)
}
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
}
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
}

View File

@@ -1,15 +0,0 @@
package coordinator
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddPerc(t *testing.T) {
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
}

View File

@@ -164,19 +164,6 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
return nil
}
// GetBatch returns the batch with the given batchNum
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
batchNum,
)
return &batch, err
}
// GetAllBatches retrieve all batches from the DB
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch
@@ -221,18 +208,6 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
return batchNum, tracerr.Wrap(row.Scan(&batchNum))
}
// GetLastBatch returns the last forged batch
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
)
return &batch, err
}
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch

View File

@@ -203,10 +203,6 @@ func TestBatches(t *testing.T) {
fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
assert.NoError(t, err)
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
// Test GetLastBatch
fetchedLastBatch, err := historyDB.GetLastBatch()
assert.NoError(t, err)
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
// Test GetLastL1TxsNum
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
assert.NoError(t, err)
@@ -215,12 +211,6 @@ func TestBatches(t *testing.T) {
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
assert.NoError(t, err)
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
// Test GetBatch
fetchedBatch, err := historyDB.GetBatch(1)
require.NoError(t, err)
assert.Equal(t, &batches[0], fetchedBatch)
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
}
func TestBids(t *testing.T) {
@@ -621,10 +611,10 @@ func TestTxs(t *testing.T) {
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
// Tx ID
assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String())
assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String())
// Tx From and To IDx
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)

View File

@@ -27,8 +27,6 @@ const (
// PathLast defines the subpath of the last Batch in the subpath
// of the StateDB
PathLast = "last"
// DefaultKeep is the default value for the Keep parameter
DefaultKeep = 128
)
var (
@@ -36,18 +34,16 @@ var (
KeyCurrentBatch = []byte("k:currentbatch")
// keyCurrentIdx is used as key in the db to store the CurrentIdx
keyCurrentIdx = []byte("k:idx")
// ErrNoLast is returned when the KVDB has been configured to not have
// a Last checkpoint but a Last method is used
ErrNoLast = fmt.Errorf("no last checkpoint")
)
// KVDB represents the Key-Value DB object
type KVDB struct {
cfg Config
path string
db *pebble.Storage
// CurrentIdx holds the current Idx that the BatchBuilder is using
CurrentIdx common.Idx
CurrentBatch common.BatchNum
keep int
m sync.Mutex
last *Last
}
@@ -65,13 +61,13 @@ func (k *Last) setNew() error {
defer k.rw.Unlock()
if k.db != nil {
k.db.Close()
k.db = nil
}
lastPath := path.Join(k.path, PathLast)
if err := os.RemoveAll(lastPath); err != nil {
err := os.RemoveAll(lastPath)
if err != nil {
return tracerr.Wrap(err)
}
db, err := pebble.NewPebbleStorage(lastPath, false)
db, err := pebble.NewPebbleStorage(path.Join(k.path, lastPath), false)
if err != nil {
return tracerr.Wrap(err)
}
@@ -84,7 +80,6 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
defer k.rw.Unlock()
if k.db != nil {
k.db.Close()
k.db = nil
}
lastPath := path.Join(k.path, PathLast)
if err := kvdb.MakeCheckpointFromTo(batchNum, lastPath); err != nil {
@@ -101,48 +96,26 @@ func (k *Last) set(kvdb *KVDB, batchNum common.BatchNum) error {
func (k *Last) close() {
k.rw.Lock()
defer k.rw.Unlock()
if k.db != nil {
k.db.Close()
k.db = nil
}
}
// Config of the KVDB
type Config struct {
// Path where the checkpoints will be stored
Path string
// Keep is the number of old checkpoints to keep. If 0, all
// checkpoints are kept.
Keep int
// At every checkpoint, check that there are no gaps between the
// checkpoints
NoGapsCheck bool
// NoLast skips having an opened DB with a checkpoint to the last
// batchNum for thread-safe reads.
NoLast bool
}
// NewKVDB creates a new KVDB, allowing to use an in-memory or in-disk storage.
// Checkpoints older than the value defined by `keep` will be deleted.
// func NewKVDB(pathDB string, keep int) (*KVDB, error) {
func NewKVDB(cfg Config) (*KVDB, error) {
func NewKVDB(pathDB string, keep int) (*KVDB, error) {
var sto *pebble.Storage
var err error
sto, err = pebble.NewPebbleStorage(path.Join(cfg.Path, PathCurrent), false)
sto, err = pebble.NewPebbleStorage(path.Join(pathDB, PathCurrent), false)
if err != nil {
return nil, tracerr.Wrap(err)
}
var last *Last
if !cfg.NoLast {
last = &Last{
path: cfg.Path,
}
}
kvdb := &KVDB{
cfg: cfg,
path: pathDB,
db: sto,
last: last,
keep: keep,
last: &Last{
path: pathDB,
},
}
// load currentBatch
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
@@ -160,32 +133,29 @@ func NewKVDB(cfg Config) (*KVDB, error) {
}
// LastRead is a thread-safe method to query the last KVDB
func (k *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
if k.last == nil {
return tracerr.Wrap(ErrNoLast)
}
k.last.rw.RLock()
defer k.last.rw.RUnlock()
return fn(k.last.db)
func (kvdb *KVDB) LastRead(fn func(db *pebble.Storage) error) error {
kvdb.last.rw.RLock()
defer kvdb.last.rw.RUnlock()
return fn(kvdb.last.db)
}
// DB returns the *pebble.Storage from the KVDB
func (k *KVDB) DB() *pebble.Storage {
return k.db
func (kvdb *KVDB) DB() *pebble.Storage {
return kvdb.db
}
// StorageWithPrefix returns the db.Storage with the given prefix from the
// current KVDB
func (k *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
return k.db.WithPrefix(prefix)
func (kvdb *KVDB) StorageWithPrefix(prefix []byte) db.Storage {
return kvdb.db.WithPrefix(prefix)
}
// Reset resets the KVDB to the checkpoint at the given batchNum. Reset does
// not delete the checkpoints between old current and the new current, those
// checkpoints will remain in the storage, and eventually will be deleted when
// MakeCheckpoint overwrites them.
func (k *KVDB) Reset(batchNum common.BatchNum) error {
return k.reset(batchNum, true)
func (kvdb *KVDB) Reset(batchNum common.BatchNum) error {
return kvdb.reset(batchNum, true)
}
// reset resets the KVDB to the checkpoint at the given batchNum. Reset does
@@ -193,19 +163,21 @@ func (k *KVDB) Reset(batchNum common.BatchNum) error {
// checkpoints will remain in the storage, and eventually will be deleted when
// MakeCheckpoint overwrites them. `closeCurrent` will close the currently
// opened db before doing the reset.
func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
currentPath := path.Join(k.cfg.Path, PathCurrent)
func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
currentPath := path.Join(kvdb.path, PathCurrent)
if closeCurrent && k.db != nil {
k.db.Close()
k.db = nil
if closeCurrent {
if err := kvdb.db.Pebble().Close(); err != nil {
return tracerr.Wrap(err)
}
}
// remove 'current'
if err := os.RemoveAll(currentPath); err != nil {
err := os.RemoveAll(currentPath)
if err != nil {
return tracerr.Wrap(err)
}
// remove all checkpoints > batchNum
list, err := k.ListCheckpoints()
list, err := kvdb.ListCheckpoints()
if err != nil {
return tracerr.Wrap(err)
}
@@ -218,7 +190,7 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
}
}
for _, bn := range list[start:] {
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
return tracerr.Wrap(err)
}
}
@@ -229,43 +201,39 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
if err != nil {
return tracerr.Wrap(err)
}
k.db = sto
k.CurrentIdx = common.RollupConstReservedIDx // 255
k.CurrentBatch = 0
if k.last != nil {
if err := k.last.setNew(); err != nil {
kvdb.db = sto
kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
kvdb.CurrentBatch = 0
if err := kvdb.last.setNew(); err != nil {
return tracerr.Wrap(err)
}
}
return nil
}
// copy 'batchNum' to 'current'
if err := k.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
if err := kvdb.MakeCheckpointFromTo(batchNum, currentPath); err != nil {
return tracerr.Wrap(err)
}
// copy 'batchNum' to 'last'
if k.last != nil {
if err := k.last.set(k, batchNum); err != nil {
if err := kvdb.last.set(kvdb, batchNum); err != nil {
return tracerr.Wrap(err)
}
}
// open the new 'current'
sto, err := pebble.NewPebbleStorage(currentPath, false)
if err != nil {
return tracerr.Wrap(err)
}
k.db = sto
kvdb.db = sto
// get currentBatch num
k.CurrentBatch, err = k.GetCurrentBatch()
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
if err != nil {
return tracerr.Wrap(err)
}
// idx is obtained from the statedb reset
k.CurrentIdx, err = k.GetCurrentIdx()
kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
if err != nil {
return tracerr.Wrap(err)
}
@@ -275,28 +243,28 @@ func (k *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
// ResetFromSynchronizer performs a reset in the KVDB getting the state from
// synchronizerKVDB for the given batchNum.
func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB *KVDB) error {
if synchronizerKVDB == nil {
return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil"))
}
currentPath := path.Join(k.cfg.Path, PathCurrent)
if k.db != nil {
k.db.Close()
k.db = nil
currentPath := path.Join(kvdb.path, PathCurrent)
if err := kvdb.db.Pebble().Close(); err != nil {
return tracerr.Wrap(err)
}
// remove 'current'
if err := os.RemoveAll(currentPath); err != nil {
err := os.RemoveAll(currentPath)
if err != nil {
return tracerr.Wrap(err)
}
// remove all checkpoints
list, err := k.ListCheckpoints()
list, err := kvdb.ListCheckpoints()
if err != nil {
return tracerr.Wrap(err)
}
for _, bn := range list {
if err := k.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
return tracerr.Wrap(err)
}
}
@@ -307,14 +275,14 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
if err != nil {
return tracerr.Wrap(err)
}
k.db = sto
k.CurrentIdx = common.RollupConstReservedIDx // 255
k.CurrentBatch = 0
kvdb.db = sto
kvdb.CurrentIdx = common.RollupConstReservedIDx // 255
kvdb.CurrentBatch = 0
return nil
}
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
// copy synchronizer'BatchNumX' to 'BatchNumX'
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
@@ -322,7 +290,7 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
}
// copy 'BatchNumX' to 'current'
err = k.MakeCheckpointFromTo(batchNum, currentPath)
err = kvdb.MakeCheckpointFromTo(batchNum, currentPath)
if err != nil {
return tracerr.Wrap(err)
}
@@ -332,15 +300,15 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
if err != nil {
return tracerr.Wrap(err)
}
k.db = sto
kvdb.db = sto
// get currentBatch num
k.CurrentBatch, err = k.GetCurrentBatch()
kvdb.CurrentBatch, err = kvdb.GetCurrentBatch()
if err != nil {
return tracerr.Wrap(err)
}
// get currentIdx
k.CurrentIdx, err = k.GetCurrentIdx()
kvdb.CurrentIdx, err = kvdb.GetCurrentIdx()
if err != nil {
return tracerr.Wrap(err)
}
@@ -349,8 +317,8 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
}
// GetCurrentBatch returns the current BatchNum stored in the KVDB
func (k *KVDB) GetCurrentBatch() (common.BatchNum, error) {
cbBytes, err := k.db.Get(KeyCurrentBatch)
func (kvdb *KVDB) GetCurrentBatch() (common.BatchNum, error) {
cbBytes, err := kvdb.db.Get(KeyCurrentBatch)
if tracerr.Unwrap(err) == db.ErrNotFound {
return 0, nil
}
@@ -361,12 +329,12 @@ func (k *KVDB) GetCurrentBatch() (common.BatchNum, error) {
}
// setCurrentBatch stores the current BatchNum in the KVDB
func (k *KVDB) setCurrentBatch() error {
tx, err := k.db.NewTx()
func (kvdb *KVDB) setCurrentBatch() error {
tx, err := kvdb.db.NewTx()
if err != nil {
return tracerr.Wrap(err)
}
err = tx.Put(KeyCurrentBatch, k.CurrentBatch.Bytes())
err = tx.Put(KeyCurrentBatch, kvdb.CurrentBatch.Bytes())
if err != nil {
return tracerr.Wrap(err)
}
@@ -377,9 +345,9 @@ func (k *KVDB) setCurrentBatch() error {
}
// GetCurrentIdx returns the stored Idx from the KVDB, which is the last Idx
// used for an Account in the k.
func (k *KVDB) GetCurrentIdx() (common.Idx, error) {
idxBytes, err := k.db.Get(keyCurrentIdx)
// used for an Account in the KVDB.
func (kvdb *KVDB) GetCurrentIdx() (common.Idx, error) {
idxBytes, err := kvdb.db.Get(keyCurrentIdx)
if tracerr.Unwrap(err) == db.ErrNotFound {
return common.RollupConstReservedIDx, nil // 255, nil
}
@@ -390,10 +358,10 @@ func (k *KVDB) GetCurrentIdx() (common.Idx, error) {
}
// SetCurrentIdx stores Idx in the KVDB
func (k *KVDB) SetCurrentIdx(idx common.Idx) error {
k.CurrentIdx = idx
func (kvdb *KVDB) SetCurrentIdx(idx common.Idx) error {
kvdb.CurrentIdx = idx
tx, err := k.db.NewTx()
tx, err := kvdb.db.NewTx()
if err != nil {
return tracerr.Wrap(err)
}
@@ -413,64 +381,49 @@ func (k *KVDB) SetCurrentIdx(idx common.Idx) error {
// MakeCheckpoint does a checkpoint at the given batchNum in the defined path.
// Internally this advances & stores the current BatchNum, and then stores a
// Checkpoint of the current state of the k.
func (k *KVDB) MakeCheckpoint() error {
// Checkpoint of the current state of the KVDB.
func (kvdb *KVDB) MakeCheckpoint() error {
// advance currentBatch
k.CurrentBatch++
kvdb.CurrentBatch++
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, k.CurrentBatch))
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, kvdb.CurrentBatch))
if err := k.setCurrentBatch(); err != nil {
if err := kvdb.setCurrentBatch(); err != nil {
return tracerr.Wrap(err)
}
// if checkpoint BatchNum already exist in disk, delete it
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(checkpointPath); err != nil {
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
err := os.RemoveAll(checkpointPath)
if err != nil {
return tracerr.Wrap(err)
}
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
}
// execute Checkpoint
if err := k.db.Pebble().Checkpoint(checkpointPath); err != nil {
if err := kvdb.db.Pebble().Checkpoint(checkpointPath); err != nil {
return tracerr.Wrap(err)
}
// copy 'CurrentBatch' to 'last'
if k.last != nil {
if err := k.last.set(k, k.CurrentBatch); err != nil {
if err := kvdb.last.set(kvdb, kvdb.CurrentBatch); err != nil {
return tracerr.Wrap(err)
}
}
// delete old checkpoints
if err := k.deleteOldCheckpoints(); err != nil {
if err := kvdb.deleteOldCheckpoints(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
// CheckpointExists returns true if the checkpoint exists
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
func (kvdb *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
} else if err != nil {
return tracerr.Wrap(err)
}
return os.RemoveAll(checkpointPath)
@@ -478,8 +431,8 @@ func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
// ListCheckpoints returns the list of batchNums of the checkpoints, sorted.
// If there's a gap between the list of checkpoints, an error is returned.
func (k *KVDB) ListCheckpoints() ([]int, error) {
files, err := ioutil.ReadDir(k.cfg.Path)
func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
files, err := ioutil.ReadDir(kvdb.path)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -496,12 +449,12 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
}
}
sort.Ints(checkpoints)
if !k.cfg.NoGapsCheck && len(checkpoints) > 0 {
if len(checkpoints) > 0 {
first := checkpoints[0]
for _, checkpoint := range checkpoints[1:] {
first++
if checkpoint != first {
log.Errorw("gap between checkpoints", "checkpoints", checkpoints)
log.Errorw("GAP", "checkpoints", checkpoints)
return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint))
}
}
@@ -511,14 +464,14 @@ func (k *KVDB) ListCheckpoints() ([]int, error) {
// deleteOldCheckpoints deletes old checkpoints when there are more than
// `s.keep` checkpoints
func (k *KVDB) deleteOldCheckpoints() error {
list, err := k.ListCheckpoints()
func (kvdb *KVDB) deleteOldCheckpoints() error {
list, err := kvdb.ListCheckpoints()
if err != nil {
return tracerr.Wrap(err)
}
if k.cfg.Keep > 0 && len(list) > k.cfg.Keep {
for _, checkpoint := range list[:len(list)-k.cfg.Keep] {
if err := k.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
if len(list) > kvdb.keep {
for _, checkpoint := range list[:len(list)-kvdb.keep] {
if err := kvdb.DeleteCheckpoint(common.BatchNum(checkpoint)); err != nil {
return tracerr.Wrap(err)
}
}
@@ -529,40 +482,43 @@ func (k *KVDB) deleteOldCheckpoints() error {
// MakeCheckpointFromTo makes a checkpoint from the current db at fromBatchNum
// to the dest folder. This method is locking, so it can be called from
// multiple places at the same time.
func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
func (kvdb *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) error {
source := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, fromBatchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
// if kvdb does not have checkpoint at batchNum, return err
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
} else if err != nil {
return tracerr.Wrap(err)
}
// By locking we allow calling MakeCheckpointFromTo from multiple
// places at the same time for the same stateDB. This allows the
// synchronizer to do a reset to a batchNum at the same time as the
// pipeline is doing a txSelector.Reset and batchBuilder.Reset from
// synchronizer to the same batchNum
k.m.Lock()
defer k.m.Unlock()
kvdb.m.Lock()
defer kvdb.m.Unlock()
return pebbleMakeCheckpoint(source, dest)
}
func pebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(dest); err != nil {
if _, err := os.Stat(dest); !os.IsNotExist(err) {
err := os.RemoveAll(dest)
if err != nil {
return tracerr.Wrap(err)
}
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
}
sto, err := pebble.NewPebbleStorage(source, false)
if err != nil {
return tracerr.Wrap(err)
}
defer sto.Close()
defer func() {
errClose := sto.Pebble().Close()
if errClose != nil {
log.Errorw("Pebble.Close", "err", errClose)
}
}()
// execute Checkpoint
err = sto.Pebble().Checkpoint(dest)
@@ -574,12 +530,7 @@ func pebbleMakeCheckpoint(source, dest string) error {
}
// Close the DB
func (k *KVDB) Close() {
if k.db != nil {
k.db.Close()
k.db = nil
}
if k.last != nil {
k.last.close()
}
func (kvdb *KVDB) Close() {
kvdb.db.Close()
kvdb.last.close()
}

View File

@@ -37,7 +37,7 @@ func TestCheckpoints(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
db, err := NewKVDB(Config{Path: dir, Keep: 128})
db, err := NewKVDB(dir, 128)
require.NoError(t, err)
// add test key-values
@@ -72,7 +72,7 @@ func TestCheckpoints(t *testing.T) {
err = db.Reset(3)
require.NoError(t, err)
printCheckpoints(t, db.cfg.Path)
printCheckpoints(t, db.path)
// check that currentBatch is as expected after Reset
cb, err = db.GetCurrentBatch()
@@ -99,7 +99,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal, err := ioutil.TempDir("", "ldb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal))
ldb, err := NewKVDB(Config{Path: dirLocal, Keep: 128})
ldb, err := NewKVDB(dirLocal, 128)
require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -120,7 +120,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal2, err := ioutil.TempDir("", "ldb2")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal2))
ldb2, err := NewKVDB(Config{Path: dirLocal2, Keep: 128})
ldb2, err := NewKVDB(dirLocal2, 128)
require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -139,9 +139,9 @@ func TestCheckpoints(t *testing.T) {
debug := false
if debug {
printCheckpoints(t, db.cfg.Path)
printCheckpoints(t, ldb.cfg.Path)
printCheckpoints(t, ldb2.cfg.Path)
printCheckpoints(t, db.path)
printCheckpoints(t, ldb.path)
printCheckpoints(t, ldb2.path)
}
}
@@ -150,7 +150,7 @@ func TestListCheckpoints(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
db, err := NewKVDB(Config{Path: dir, Keep: 128})
db, err := NewKVDB(dir, 128)
require.NoError(t, err)
numCheckpoints := 16
@@ -181,7 +181,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
db, err := NewKVDB(Config{Path: dir, Keep: keep})
db, err := NewKVDB(dir, keep)
require.NoError(t, err)
numCheckpoints := 32
@@ -202,7 +202,7 @@ func TestGetCurrentIdx(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
db, err := NewKVDB(Config{Path: dir, Keep: keep})
db, err := NewKVDB(dir, keep)
require.NoError(t, err)
idx, err := db.GetCurrentIdx()
@@ -211,7 +211,7 @@ func TestGetCurrentIdx(t *testing.T) {
db.Close()
db, err = NewKVDB(Config{Path: dir, Keep: keep})
db, err = NewKVDB(dir, keep)
require.NoError(t, err)
idx, err = db.GetCurrentIdx()
@@ -227,7 +227,7 @@ func TestGetCurrentIdx(t *testing.T) {
db.Close()
db, err = NewKVDB(Config{Path: dir, Keep: keep})
db, err = NewKVDB(dir, keep)
require.NoError(t, err)
idx, err = db.GetCurrentIdx()

View File

@@ -324,9 +324,8 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
_, err := l2db.db.Exec(
`UPDATE tx_pool SET batch_num = NULL, state = $1
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
WHERE (state = $2 OR state = $3) AND batch_num > $4`,
common.PoolL2TxStatePending,
common.PoolL2TxStateForging,
common.PoolL2TxStateForged,
common.PoolL2TxStateInvalid,
lastValidBatch,

View File

@@ -52,40 +52,19 @@ const (
// TypeBatchBuilder defines a StateDB used by the BatchBuilder, that
// generates the ExitTree and the ZKInput when processing the txs
TypeBatchBuilder = "batchbuilder"
// MaxNLevels is the maximum value of NLevels for the merkle tree,
// which comes from the fact that AccountIdx has 48 bits.
MaxNLevels = 48
)
// TypeStateDB determines the type of StateDB
type TypeStateDB string
// Config of the StateDB
type Config struct {
// Path where the checkpoints will be stored
Path string
// Keep is the number of old checkpoints to keep. If 0, all
// checkpoints are kept.
Keep int
// NoLast skips having an opened DB with a checkpoint to the last
// batchNum for thread-safe reads.
NoLast bool
// Type of StateDB (
Type TypeStateDB
// NLevels is the number of merkle tree levels in case the Type uses a
// merkle tree. If the Type doesn't use a merkle tree, NLevels should
// be 0.
NLevels int
// At every checkpoint, check that there are no gaps between the
// checkpoints
noGapsCheck bool
}
// StateDB represents the StateDB object
type StateDB struct {
cfg Config
path string
Typ TypeStateDB
db *kvdb.KVDB
nLevels int
MT *merkletree.MerkleTree
keep int
}
// Last offers a subset of view methods of the StateDB that can be
@@ -125,40 +104,36 @@ func (s *Last) GetAccounts() ([]common.Account, error) {
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
// storage. Checkpoints older than the value defined by `keep` will be
// deleted.
// func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
func NewStateDB(cfg Config) (*StateDB, error) {
func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
var kv *kvdb.KVDB
var err error
kv, err = kvdb.NewKVDB(kvdb.Config{Path: cfg.Path, Keep: cfg.Keep,
NoGapsCheck: cfg.noGapsCheck, NoLast: cfg.NoLast})
kv, err = kvdb.NewKVDB(pathDB, keep)
if err != nil {
return nil, tracerr.Wrap(err)
}
var mt *merkletree.MerkleTree = nil
if cfg.Type == TypeSynchronizer || cfg.Type == TypeBatchBuilder {
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), cfg.NLevels)
if typ == TypeSynchronizer || typ == TypeBatchBuilder {
mt, err = merkletree.NewMerkleTree(kv.StorageWithPrefix(PrefixKeyMT), nLevels)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
if cfg.Type == TypeTxSelector && cfg.NLevels != 0 {
if typ == TypeTxSelector && nLevels != 0 {
return nil, tracerr.Wrap(fmt.Errorf("invalid StateDB parameters: StateDB type==TypeStateDB can not have nLevels!=0"))
}
return &StateDB{
cfg: cfg,
path: pathDB,
db: kv,
nLevels: nLevels,
MT: mt,
Typ: typ,
keep: keep,
}, nil
}
// Type returns the StateDB configured Type
func (s *StateDB) Type() TypeStateDB {
return s.cfg.Type
}
// LastRead is a thread-safe method to query the last checkpoint of the StateDB
// via the Last type methods
func (s *StateDB) LastRead(fn func(sdbLast *Last) error) error {
@@ -204,7 +179,7 @@ func (s *StateDB) LastGetCurrentBatch() (common.BatchNum, error) {
func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
var root *big.Int
if err := s.LastRead(func(sdb *Last) error {
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.cfg.NLevels)
mt, err := merkletree.NewMerkleTree(sdb.DB().WithPrefix(PrefixKeyMT), s.nLevels)
if err != nil {
return tracerr.Wrap(err)
}
@@ -220,7 +195,7 @@ func (s *StateDB) LastMTGetRoot() (*big.Int, error) {
// Internally this advances & stores the current BatchNum, and then stores a
// Checkpoint of the current state of the StateDB.
func (s *StateDB) MakeCheckpoint() error {
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.cfg.Type)
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch()+1, "type", s.Typ)
return s.db.MakeCheckpoint()
}
@@ -255,8 +230,8 @@ func (s *StateDB) SetCurrentIdx(idx common.Idx) error {
// those checkpoints will remain in the storage, and eventually will be
// deleted when MakeCheckpoint overwrites them.
func (s *StateDB) Reset(batchNum common.BatchNum) error {
log.Debugw("Making StateDB Reset", "batch", batchNum, "type", s.cfg.Type)
if err := s.db.Reset(batchNum); err != nil {
err := s.db.Reset(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
if s.MT != nil {
@@ -267,6 +242,7 @@ func (s *StateDB) Reset(batchNum common.BatchNum) error {
}
s.MT = mt
}
log.Debugw("Making StateDB Reset", "batch", batchNum)
return nil
}
@@ -485,10 +461,9 @@ type LocalStateDB struct {
// NewLocalStateDB returns a new LocalStateDB connected to the given
// synchronizerDB. Checkpoints older than the value defined by `keep` will be
// deleted.
func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error) {
cfg.noGapsCheck = true
cfg.NoLast = true
s, err := NewStateDB(cfg)
func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeStateDB,
nLevels int) (*LocalStateDB, error) {
s, err := NewStateDB(path, keep, typ, nLevels)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -498,24 +473,18 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
}, nil
}
// CheckpointExists returns true if the checkpoint exists
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
if fromSynchronizer {
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db)
if err != nil {
return tracerr.Wrap(err)
}
// open the MT for the current s.db
if l.MT != nil {
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT),
l.MT.MaxLevels())
mt, err := merkletree.NewMerkleTree(l.db.StorageWithPrefix(PrefixKeyMT), l.MT.MaxLevels())
if err != nil {
return tracerr.Wrap(err)
}

View File

@@ -45,7 +45,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err)
// test values
@@ -78,7 +78,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
// call NewStateDB which should get the db at the last checkpoint state
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err)
v, err = sdb.db.DB().Get(k0)
assert.NotNil(t, err)
@@ -116,7 +116,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
bn, err := sdb.getCurrentBatch()
require.NoError(t, err)
assert.Equal(t, common.BatchNum(0), bn)
err = sdb.MakeCheckpoint()
err = sdb.db.MakeCheckpoint()
require.NoError(t, err)
bn, err = sdb.getCurrentBatch()
require.NoError(t, err)
@@ -158,7 +158,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
// call NewStateDB which should get the db at the last checkpoint state
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
sdb, err = NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err)
bn, err = sdb.getCurrentBatch()
@@ -182,7 +182,7 @@ func TestStateDBWithoutMT(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err)
// create test accounts
@@ -236,7 +236,7 @@ func TestStateDBWithMT(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err)
// create test accounts
@@ -290,7 +290,7 @@ func TestCheckpoints(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err)
err = sdb.Reset(0)
@@ -335,7 +335,7 @@ func TestCheckpoints(t *testing.T) {
assert.Equal(t, common.BatchNum(i+1), cb)
}
// printCheckpoints(t, sdb.cfg.Path)
// printCheckpoints(t, sdb.path)
// reset checkpoint
err = sdb.Reset(3)
@@ -371,7 +371,7 @@ func TestCheckpoints(t *testing.T) {
dirLocal, err := ioutil.TempDir("", "ldb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal))
ldb, err := NewLocalStateDB(Config{Path: dirLocal, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
ldb, err := NewLocalStateDB(dirLocal, 128, sdb, TypeBatchBuilder, 32)
require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
@@ -392,26 +392,28 @@ func TestCheckpoints(t *testing.T) {
dirLocal2, err := ioutil.TempDir("", "ldb2")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dirLocal2))
ldb2, err := NewLocalStateDB(Config{Path: dirLocal2, Keep: 128, Type: TypeBatchBuilder, NLevels: 32}, sdb)
ldb2, err := NewLocalStateDB(dirLocal2, 128, sdb, TypeBatchBuilder, 32)
require.NoError(t, err)
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
err = ldb2.Reset(4, true)
require.NoError(t, err)
// check that currentBatch is 4 after the Reset
cb = ldb2.CurrentBatch()
cb, err = ldb2.db.GetCurrentBatch()
require.NoError(t, err)
assert.Equal(t, common.BatchNum(4), cb)
// advance one checkpoint in ldb2
err = ldb2.MakeCheckpoint()
err = ldb2.db.MakeCheckpoint()
require.NoError(t, err)
cb, err = ldb2.db.GetCurrentBatch()
require.NoError(t, err)
cb = ldb2.CurrentBatch()
assert.Equal(t, common.BatchNum(5), cb)
debug := false
if debug {
printCheckpoints(t, sdb.cfg.Path)
printCheckpoints(t, ldb.cfg.Path)
printCheckpoints(t, ldb2.cfg.Path)
printCheckpoints(t, sdb.path)
printCheckpoints(t, ldb.path)
printCheckpoints(t, ldb2.path)
}
}
@@ -419,7 +421,7 @@ func TestStateDBGetAccounts(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
require.NoError(t, err)
// create test accounts
@@ -466,7 +468,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err)
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
@@ -540,7 +542,7 @@ func TestListCheckpoints(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeSynchronizer, NLevels: 32})
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
require.NoError(t, err)
numCheckpoints := 16
@@ -573,7 +575,7 @@ func TestDeleteOldCheckpoints(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err)
numCheckpoints := 32
@@ -594,7 +596,7 @@ func TestCurrentIdx(t *testing.T) {
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err)
idx := sdb.CurrentIdx()
@@ -602,7 +604,7 @@ func TestCurrentIdx(t *testing.T) {
sdb.Close()
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err)
idx = sdb.CurrentIdx()
@@ -616,30 +618,9 @@ func TestCurrentIdx(t *testing.T) {
sdb.Close()
sdb, err = NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
sdb, err = NewStateDB(dir, keep, TypeSynchronizer, 32)
require.NoError(t, err)
idx = sdb.CurrentIdx()
assert.Equal(t, common.Idx(255), idx)
}
func TestResetFromBadCheckpoint(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer require.NoError(t, os.RemoveAll(dir))
keep := 16
sdb, err := NewStateDB(Config{Path: dir, Keep: keep, Type: TypeSynchronizer, NLevels: 32})
require.NoError(t, err)
err = sdb.MakeCheckpoint()
require.NoError(t, err)
err = sdb.MakeCheckpoint()
require.NoError(t, err)
err = sdb.MakeCheckpoint()
require.NoError(t, err)
// reset from a checkpoint that doesn't exist
err = sdb.Reset(10)
require.Error(t, err)
}

View File

@@ -18,7 +18,7 @@ func TestGetIdx(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := NewStateDB(Config{Path: dir, Keep: 128, Type: TypeTxSelector, NLevels: 0})
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
assert.NoError(t, err)
var sk babyjub.PrivateKey

View File

@@ -13,9 +13,9 @@ import (
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
"golang.org/x/sync/semaphore"
)
var migrations *migrate.PackrMigrationSource
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
// APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct {
smphr *semaphore.Weighted
smphr semaphore.Semaphore
timeout time.Duration
}
// NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{
smphr: semaphore.NewWeighted(int64(maxConnections)),
smphr: semaphore.New(maxConnections),
timeout: timeout,
}
}

View File

@@ -254,7 +254,7 @@ type AuctionInterface interface {
//
AuctionConstants() (*common.AuctionConstants, error)
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error)
AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error)
AuctionEventInit() (*AuctionEventInitialize, int64, error)
}
@@ -797,22 +797,15 @@ func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, erro
}
// AuctionEventsByBlock returns the events in a block that happened in the
// Auction Smart Contract.
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
// If there are no events in that block the result is nil.
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*AuctionEvents, error) {
// Auction Smart Contract and the blockHash where the eents happened. If there
// are no events in that block, blockHash is nil.
func (c *AuctionClient) AuctionEventsByBlock(blockNum int64) (*AuctionEvents, *ethCommon.Hash, error) {
var auctionEvents AuctionEvents
var blockHash *ethCommon.Hash
var blockNumBigInt *big.Int
if blockHash == nil {
blockNumBigInt = big.NewInt(blockNum)
}
query := ethereum.FilterQuery{
BlockHash: blockHash,
FromBlock: blockNumBigInt,
ToBlock: blockNumBigInt,
FromBlock: big.NewInt(blockNum),
ToBlock: big.NewInt(blockNum),
Addresses: []ethCommon.Address{
c.address,
},
@@ -821,16 +814,15 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
logs, err := c.client.client.FilterLogs(context.TODO(), query)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
if len(logs) == 0 {
return nil, nil
if len(logs) > 0 {
blockHash = &logs[0].BlockHash
}
for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash {
if vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
}
switch vLog.Topics[0] {
case logAuctionNewBid:
@@ -841,7 +833,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
}
var newBid AuctionEventNewBid
if err := c.contractAbi.UnpackIntoInterface(&auxNewBid, "NewBid", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
newBid.BidAmount = auxNewBid.BidAmount
newBid.Slot = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
@@ -850,19 +842,19 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
case logAuctionNewSlotDeadline:
var newSlotDeadline AuctionEventNewSlotDeadline
if err := c.contractAbi.UnpackIntoInterface(&newSlotDeadline, "NewSlotDeadline", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
auctionEvents.NewSlotDeadline = append(auctionEvents.NewSlotDeadline, newSlotDeadline)
case logAuctionNewClosedAuctionSlots:
var newClosedAuctionSlots AuctionEventNewClosedAuctionSlots
if err := c.contractAbi.UnpackIntoInterface(&newClosedAuctionSlots, "NewClosedAuctionSlots", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
auctionEvents.NewClosedAuctionSlots = append(auctionEvents.NewClosedAuctionSlots, newClosedAuctionSlots)
case logAuctionNewOutbidding:
var newOutbidding AuctionEventNewOutbidding
if err := c.contractAbi.UnpackIntoInterface(&newOutbidding, "NewOutbidding", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
auctionEvents.NewOutbidding = append(auctionEvents.NewOutbidding, newOutbidding)
case logAuctionNewDonationAddress:
@@ -872,26 +864,26 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
case logAuctionNewBootCoordinator:
var newBootCoordinator AuctionEventNewBootCoordinator
if err := c.contractAbi.UnpackIntoInterface(&newBootCoordinator, "NewBootCoordinator", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
newBootCoordinator.NewBootCoordinator = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.NewBootCoordinator = append(auctionEvents.NewBootCoordinator, newBootCoordinator)
case logAuctionNewOpenAuctionSlots:
var newOpenAuctionSlots AuctionEventNewOpenAuctionSlots
if err := c.contractAbi.UnpackIntoInterface(&newOpenAuctionSlots, "NewOpenAuctionSlots", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
auctionEvents.NewOpenAuctionSlots = append(auctionEvents.NewOpenAuctionSlots, newOpenAuctionSlots)
case logAuctionNewAllocationRatio:
var newAllocationRatio AuctionEventNewAllocationRatio
if err := c.contractAbi.UnpackIntoInterface(&newAllocationRatio, "NewAllocationRatio", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
auctionEvents.NewAllocationRatio = append(auctionEvents.NewAllocationRatio, newAllocationRatio)
case logAuctionSetCoordinator:
var setCoordinator AuctionEventSetCoordinator
if err := c.contractAbi.UnpackIntoInterface(&setCoordinator, "SetCoordinator", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
setCoordinator.BidderAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
setCoordinator.ForgerAddress = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -899,7 +891,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
case logAuctionNewForgeAllocated:
var newForgeAllocated AuctionEventNewForgeAllocated
if err := c.contractAbi.UnpackIntoInterface(&newForgeAllocated, "NewForgeAllocated", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
newForgeAllocated.Bidder = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
newForgeAllocated.Forger = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -912,7 +904,7 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
}
var newDefaultSlotSetBid AuctionEventNewDefaultSlotSetBid
if err := c.contractAbi.UnpackIntoInterface(&auxNewDefaultSlotSetBid, "NewDefaultSlotSetBid", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
newDefaultSlotSetBid.NewInitialMinBid = auxNewDefaultSlotSetBid.NewInitialMinBid
newDefaultSlotSetBid.SlotSet = auxNewDefaultSlotSetBid.SlotSet.Int64()
@@ -925,11 +917,11 @@ func (c *AuctionClient) AuctionEventsByBlock(blockNum int64,
case logAuctionHEZClaimed:
var HEZClaimed AuctionEventHEZClaimed
if err := c.contractAbi.UnpackIntoInterface(&HEZClaimed, "HEZClaimed", vLog.Data); err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
HEZClaimed.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
auctionEvents.HEZClaimed = append(auctionEvents.HEZClaimed, HEZClaimed)
}
}
return &auctionEvents, nil
return &auctionEvents, blockHash, nil
}

View File

@@ -88,7 +88,7 @@ func TestAuctionSetSlotDeadline(t *testing.T) {
assert.Equal(t, newSlotDeadline, slotDeadline)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newSlotDeadline, auctionEvents.NewSlotDeadline[0].NewSlotDeadline)
}
@@ -109,7 +109,7 @@ func TestAuctionSetOpenAuctionSlots(t *testing.T) {
assert.Equal(t, newOpenAuctionSlots, openAuctionSlots)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newOpenAuctionSlots, auctionEvents.NewOpenAuctionSlots[0].NewOpenAuctionSlots)
}
@@ -130,7 +130,7 @@ func TestAuctionSetClosedAuctionSlots(t *testing.T) {
assert.Equal(t, newClosedAuctionSlots, closedAuctionSlots)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newClosedAuctionSlots, auctionEvents.NewClosedAuctionSlots[0].NewClosedAuctionSlots)
_, err = auctionClientTest.AuctionSetClosedAuctionSlots(closedAuctionSlots)
@@ -153,7 +153,7 @@ func TestAuctionSetOutbidding(t *testing.T) {
assert.Equal(t, newOutbidding, outbidding)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newOutbidding, auctionEvents.NewOutbidding[0].NewOutbidding)
_, err = auctionClientTest.AuctionSetOutbidding(outbiddingConst)
@@ -176,7 +176,7 @@ func TestAuctionSetAllocationRatio(t *testing.T) {
assert.Equal(t, newAllocationRatio, allocationRatio)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newAllocationRatio, auctionEvents.NewAllocationRatio[0].NewAllocationRatio)
_, err = auctionClientTest.AuctionSetAllocationRatio(allocationRatioConst)
@@ -205,7 +205,7 @@ func TestAuctionSetDonationAddress(t *testing.T) {
assert.Equal(t, &newDonationAddress, donationAddress)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newDonationAddress, auctionEvents.NewDonationAddress[0].NewDonationAddress)
_, err = auctionClientTest.AuctionSetDonationAddress(donationAddressConst)
@@ -224,7 +224,7 @@ func TestAuctionSetBootCoordinator(t *testing.T) {
assert.Equal(t, &newBootCoordinator, bootCoordinator)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, newBootCoordinator, auctionEvents.NewBootCoordinator[0].NewBootCoordinator)
assert.Equal(t, newBootCoordinatorURL, auctionEvents.NewBootCoordinator[0].NewBootCoordinatorURL)
@@ -261,7 +261,7 @@ func TestAuctionChangeDefaultSlotSetBid(t *testing.T) {
assert.Equal(t, minBid, newInitialMinBid)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, slotSet, auctionEvents.NewDefaultSlotSetBid[0].SlotSet)
assert.Equal(t, newInitialMinBid, auctionEvents.NewDefaultSlotSetBid[0].NewInitialMinBid)
@@ -287,7 +287,7 @@ func TestAuctionRegisterCoordinator(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, forgerAddress, auctionEvents.SetCoordinator[0].ForgerAddress)
assert.Equal(t, bidderAddress, auctionEvents.SetCoordinator[0].BidderAddress)
@@ -306,7 +306,7 @@ func TestAuctionBid(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, bidAmount, auctionEvents.NewBid[0].BidAmount)
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
@@ -346,7 +346,7 @@ func TestAuctionMultiBid(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, bidderAddress, auctionEvents.NewBid[0].Bidder)
assert.Equal(t, currentSlot+4, auctionEvents.NewBid[0].Slot)
@@ -376,7 +376,7 @@ func TestAuctionClaimHEZ(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := auctionClientTest.client.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum, nil)
auctionEvents, _, err := auctionClientTest.AuctionEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, amount, auctionEvents.HEZClaimed[0].Amount)
assert.Equal(t, governanceAddressConst, auctionEvents.HEZClaimed[0].Owner)

View File

@@ -264,7 +264,7 @@ type RollupInterface interface {
//
RollupConstants() (*common.RollupConstants, error)
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error)
RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error)
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
RollupEventInit() (*RollupEventInitialize, int64, error)
}
@@ -316,7 +316,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
}
consts, err := c.RollupConstants()
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err))
return nil, tracerr.Wrap(err)
}
c.consts = consts
return c, nil
@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
}
fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat40(depositAmount)
depositAmountF, err := common.NewFloat16(depositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountF, err := common.NewFloat40(amount)
amountF, err := common.NewFloat16(amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
}
fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat40(depositAmount)
depositAmountF, err := common.NewFloat16(depositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountF, err := common.NewFloat40(amount)
amountF, err := common.NewFloat16(amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -735,40 +735,31 @@ func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error)
return &rollupInit, int64(vLog.BlockNumber), tracerr.Wrap(err)
}
// RollupEventsByBlock returns the events in a block that happened in the
// Rollup Smart Contract.
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
// If there are no events in that block the result is nil.
func (c *RollupClient) RollupEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*RollupEvents, error) {
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
func (c *RollupClient) RollupEventsByBlock(blockNum int64) (*RollupEvents, *ethCommon.Hash, error) {
var rollupEvents RollupEvents
var blockHash *ethCommon.Hash
var blockNumBigInt *big.Int
if blockHash == nil {
blockNumBigInt = big.NewInt(blockNum)
}
query := ethereum.FilterQuery{
BlockHash: blockHash,
FromBlock: blockNumBigInt,
ToBlock: blockNumBigInt,
FromBlock: big.NewInt(blockNum),
ToBlock: big.NewInt(blockNum),
Addresses: []ethCommon.Address{
c.address,
},
BlockHash: nil,
Topics: [][]ethCommon.Hash{},
}
logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
if len(logs) == 0 {
return nil, nil
if len(logs) > 0 {
blockHash = &logs[0].BlockHash
}
for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash {
if vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
}
switch vLog.Topics[0] {
case logHermezL1UserTxEvent:
@@ -776,11 +767,11 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var L1UserTx RollupEventL1UserTx
err := c.contractAbi.UnpackIntoInterface(&L1UserTxAux, "L1UserTxEvent", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
L1Tx, err := common.L1UserTxFromBytes(L1UserTxAux.L1UserTx)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
toForgeL1TxsNum := new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
L1Tx.ToForgeL1TxsNum = &toForgeL1TxsNum
@@ -792,7 +783,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var addToken RollupEventAddToken
err := c.contractAbi.UnpackIntoInterface(&addToken, "AddToken", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
addToken.TokenAddress = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
rollupEvents.AddToken = append(rollupEvents.AddToken, addToken)
@@ -800,7 +791,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var forgeBatch RollupEventForgeBatch
err := c.contractAbi.UnpackIntoInterface(&forgeBatch, "ForgeBatch", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
forgeBatch.BatchNum = new(big.Int).SetBytes(vLog.Topics[1][:]).Int64()
forgeBatch.EthTxHash = vLog.TxHash
@@ -812,7 +803,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
}
err := c.contractAbi.UnpackIntoInterface(&updateForgeL1L2BatchTimeout, "UpdateForgeL1L2BatchTimeout", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
rollupEvents.UpdateForgeL1L2BatchTimeout = append(rollupEvents.UpdateForgeL1L2BatchTimeout,
RollupEventUpdateForgeL1L2BatchTimeout{
@@ -822,7 +813,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var updateFeeAddToken RollupEventUpdateFeeAddToken
err := c.contractAbi.UnpackIntoInterface(&updateFeeAddToken, "UpdateFeeAddToken", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
rollupEvents.UpdateFeeAddToken = append(rollupEvents.UpdateFeeAddToken, updateFeeAddToken)
case logHermezWithdrawEvent:
@@ -840,7 +831,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var updateBucketWithdraw RollupEventUpdateBucketWithdraw
err := c.contractAbi.UnpackIntoInterface(&updateBucketWithdrawAux, "UpdateBucketWithdraw", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
updateBucketWithdraw.Withdrawals = updateBucketWithdrawAux.Withdrawals
updateBucketWithdraw.NumBucket = int(new(big.Int).SetBytes(vLog.Topics[1][:]).Int64())
@@ -851,7 +842,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var withdrawalDelay RollupEventUpdateWithdrawalDelay
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "UpdateWithdrawalDelay", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
rollupEvents.UpdateWithdrawalDelay = append(rollupEvents.UpdateWithdrawalDelay, withdrawalDelay)
case logHermezUpdateBucketsParameters:
@@ -859,7 +850,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var bucketsParameters RollupEventUpdateBucketsParameters
err := c.contractAbi.UnpackIntoInterface(&bucketsParametersAux, "UpdateBucketsParameters", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
for i, bucket := range bucketsParametersAux.ArrayBuckets {
bucketsParameters.ArrayBuckets[i].CeilUSD = bucket[0]
@@ -872,7 +863,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
var tokensExchange RollupEventUpdateTokenExchange
err := c.contractAbi.UnpackIntoInterface(&tokensExchange, "UpdateTokenExchange", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
rollupEvents.UpdateTokenExchange = append(rollupEvents.UpdateTokenExchange, tokensExchange)
case logHermezSafeMode:
@@ -894,7 +885,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
bucketsParameters)
}
}
return &rollupEvents, nil
return &rollupEvents, blockHash, nil
}
// RollupForgeBatchArgs returns the arguments used in a ForgeBatch call in the
@@ -902,7 +893,7 @@ func (c *RollupClient) RollupEventsByBlock(blockNum int64,
func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsLen uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) {
tx, _, err := c.client.client.TransactionByHash(context.Background(), ethTxHash)
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("TransactionByHash: %w", err))
return nil, nil, tracerr.Wrap(err)
}
txData := tx.Data()
@@ -939,9 +930,9 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
FeeIdxCoordinator: []common.Idx{},
}
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1)
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
l1UserTxsData := []byte{}
@@ -968,7 +959,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
}
for i := 0; i < numTxsL1Coord; i++ {
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
var signature []byte
v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33]

View File

@@ -91,7 +91,7 @@ func TestRollupAddToken(t *testing.T) {
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, tokenHEZAddressConst, rollupEvents.AddToken[0].TokenAddress)
@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
require.NoError(t, err)
numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen
for i := 0; i < numTxsL1; i++ {
bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen]
var signature []byte
v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33]
@@ -174,7 +174,7 @@ func TestRollupForgeBatch(t *testing.T) {
currentBlockNum, err = rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, int64(1), rollupEvents.ForgeBatch[0].BatchNum)
@@ -203,7 +203,7 @@ func TestRollupUpdateForgeL1L2BatchTimeout(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, newForgeL1L2BatchTimeout, rollupEvents.UpdateForgeL1L2BatchTimeout[0].NewForgeL1L2BatchTimeout)
@@ -216,7 +216,7 @@ func TestRollupUpdateFeeAddToken(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, newFeeAddToken, rollupEvents.UpdateFeeAddToken[0].NewFeeAddToken)
@@ -235,7 +235,7 @@ func TestRollupUpdateBucketsParameters(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
blockStampBucket = currentBlockNum
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, bucketsParameters, rollupEvents.UpdateBucketsParameters[0].ArrayBuckets)
}
@@ -246,7 +246,7 @@ func TestRollupUpdateWithdrawalDelay(t *testing.T) {
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, newWithdrawalDelay, int64(rollupEvents.UpdateWithdrawalDelay[0].NewWithdrawalDelay))
}
@@ -263,7 +263,7 @@ func TestRollupUpdateTokenExchange(t *testing.T) {
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, addressArray, rollupEvents.UpdateTokenExchange[0].AddressArray)
assert.Equal(t, valueArray, rollupEvents.UpdateTokenExchange[0].ValueArray)
@@ -292,7 +292,7 @@ func TestRollupL1UserTxETHCreateAccountDeposit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@@ -324,7 +324,7 @@ func TestRollupL1UserTxERC20CreateAccountDeposit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@@ -356,7 +356,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDeposit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.FromBJJ, rollupEvents.L1UserTx[0].L1UserTx.FromBJJ)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
@@ -388,7 +388,7 @@ func TestRollupL1UserTxETHDeposit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -418,7 +418,7 @@ func TestRollupL1UserTxERC20Deposit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -447,7 +447,7 @@ func TestRollupL1UserTxERC20PermitDeposit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -478,7 +478,7 @@ func TestRollupL1UserTxETHDepositTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -508,7 +508,7 @@ func TestRollupL1UserTxERC20DepositTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -538,7 +538,7 @@ func TestRollupL1UserTxERC20PermitDepositTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -569,7 +569,7 @@ func TestRollupL1UserTxETHCreateAccountDepositTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -599,7 +599,7 @@ func TestRollupL1UserTxERC20CreateAccountDepositTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -629,7 +629,7 @@ func TestRollupL1UserTxERC20PermitCreateAccountDepositTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -659,7 +659,7 @@ func TestRollupL1UserTxETHForceTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -688,7 +688,7 @@ func TestRollupL1UserTxERC20ForceTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -717,7 +717,7 @@ func TestRollupL1UserTxERC20PermitForceTransfer(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -747,7 +747,7 @@ func TestRollupL1UserTxETHForceExit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -776,7 +776,7 @@ func TestRollupL1UserTxERC20ForceExit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -807,7 +807,7 @@ func TestRollupL1UserTxERC20PermitForceExit(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, l1Tx.ToIdx, rollupEvents.L1UserTx[0].L1UserTx.ToIdx)
assert.Equal(t, l1Tx.DepositAmount, rollupEvents.L1UserTx[0].L1UserTx.DepositAmount)
@@ -822,7 +822,7 @@ func TestRollupForgeBatch2(t *testing.T) {
require.NoError(t, err)
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, int64(2), rollupEvents.ForgeBatch[0].BatchNum)
@@ -876,7 +876,7 @@ func TestRollupForgeBatch2(t *testing.T) {
currentBlockNum, err = rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err = rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err = rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, int64(3), rollupEvents.ForgeBatch[0].BatchNum)
@@ -928,7 +928,7 @@ func TestRollupWithdrawMerkleProof(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
assert.Equal(t, uint64(fromIdx), rollupEvents.Withdraw[0].Idx)
@@ -951,7 +951,7 @@ func TestRollupSafeMode(t *testing.T) {
currentBlockNum, err := rollupClient.client.EthLastBlock()
require.NoError(t, err)
rollupEvents, err := rollupClient.RollupEventsByBlock(currentBlockNum, nil)
rollupEvents, _, err := rollupClient.RollupEventsByBlock(currentBlockNum)
require.NoError(t, err)
auxEvent := new(RollupEventSafeMode)
assert.Equal(t, auxEvent, &rollupEvents.SafeMode[0])

View File

@@ -134,7 +134,7 @@ type WDelayerInterface interface {
WDelayerWithdrawal(owner, token ethCommon.Address) (*types.Transaction, error)
WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amount *big.Int) (*types.Transaction, error)
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error)
WDelayerConstants() (*common.WDelayerConstants, error)
WDelayerEventInit() (*WDelayerEventInitialize, int64, error)
}
@@ -424,47 +424,40 @@ func (c *WDelayerClient) WDelayerEventInit() (*WDelayerEventInitialize, int64, e
}
// WDelayerEventsByBlock returns the events in a block that happened in the
// WDelayer Smart Contract.
// To query by blockNum, set blockNum >= 0 and blockHash == nil.
// To query by blockHash set blockHash != nil, and blockNum will be ignored.
// If there are no events in that block the result is nil.
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*WDelayerEvents, error) {
// WDelayer Smart Contract and the blockHash where the eents happened. If
// there are no events in that block, blockHash is nil.
func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64) (*WDelayerEvents, *ethCommon.Hash, error) {
var wdelayerEvents WDelayerEvents
var blockHash *ethCommon.Hash
var blockNumBigInt *big.Int
if blockHash == nil {
blockNumBigInt = big.NewInt(blockNum)
}
query := ethereum.FilterQuery{
BlockHash: blockHash,
FromBlock: blockNumBigInt,
ToBlock: blockNumBigInt,
FromBlock: big.NewInt(blockNum),
ToBlock: big.NewInt(blockNum),
Addresses: []ethCommon.Address{
c.address,
},
BlockHash: nil,
Topics: [][]ethCommon.Hash{},
}
logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
if len(logs) == 0 {
return nil, nil
if len(logs) > 0 {
blockHash = &logs[0].BlockHash
}
for _, vLog := range logs {
if blockHash != nil && vLog.BlockHash != *blockHash {
if vLog.BlockHash != *blockHash {
log.Errorw("Block hash mismatch", "expected", blockHash.String(), "got", vLog.BlockHash.String())
return nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
return nil, nil, tracerr.Wrap(ErrBlockHashMismatchEvent)
}
switch vLog.Topics[0] {
case logWDelayerDeposit:
var deposit WDelayerEventDeposit
err := c.contractAbi.UnpackIntoInterface(&deposit, "Deposit", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
deposit.Owner = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
deposit.Token = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -475,7 +468,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
var withdraw WDelayerEventWithdraw
err := c.contractAbi.UnpackIntoInterface(&withdraw, "Withdraw", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
withdraw.Token = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
withdraw.Owner = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -489,7 +482,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
var withdrawalDelay WDelayerEventNewWithdrawalDelay
err := c.contractAbi.UnpackIntoInterface(&withdrawalDelay, "NewWithdrawalDelay", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
wdelayerEvents.NewWithdrawalDelay = append(wdelayerEvents.NewWithdrawalDelay, withdrawalDelay)
@@ -497,7 +490,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
var escapeHatchWithdrawal WDelayerEventEscapeHatchWithdrawal
err := c.contractAbi.UnpackIntoInterface(&escapeHatchWithdrawal, "EscapeHatchWithdrawal", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
escapeHatchWithdrawal.Who = ethCommon.BytesToAddress(vLog.Topics[1].Bytes())
escapeHatchWithdrawal.To = ethCommon.BytesToAddress(vLog.Topics[2].Bytes())
@@ -508,7 +501,7 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
var emergencyCouncil WDelayerEventNewEmergencyCouncil
err := c.contractAbi.UnpackIntoInterface(&emergencyCouncil, "NewEmergencyCouncil", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
wdelayerEvents.NewEmergencyCouncil = append(wdelayerEvents.NewEmergencyCouncil, emergencyCouncil)
@@ -516,10 +509,10 @@ func (c *WDelayerClient) WDelayerEventsByBlock(blockNum int64,
var governanceAddress WDelayerEventNewHermezGovernanceAddress
err := c.contractAbi.UnpackIntoInterface(&governanceAddress, "NewHermezGovernanceAddress", vLog.Data)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, nil, tracerr.Wrap(err)
}
wdelayerEvents.NewHermezGovernanceAddress = append(wdelayerEvents.NewHermezGovernanceAddress, governanceAddress)
}
}
return &wdelayerEvents, nil
return &wdelayerEvents, blockHash, nil
}

View File

@@ -52,7 +52,7 @@ func TestWDelayerSetHermezGovernanceAddress(t *testing.T) {
assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewHermezGovernanceAddress[0].NewHermezGovernanceAddress)
_, err = wdelayerClientAux.WDelayerTransferGovernance(governanceAddressConst)
@@ -81,7 +81,7 @@ func TestWDelayerSetEmergencyCouncil(t *testing.T) {
assert.Equal(t, &auxAddressConst, auxAddress)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, auxAddressConst, wdelayerEvents.NewEmergencyCouncil[0].NewEmergencyCouncil)
_, err = wdelayerClientAux.WDelayerTransferEmergencyCouncil(emergencyCouncilAddressConst)
@@ -110,7 +110,7 @@ func TestWDelayerChangeWithdrawalDelay(t *testing.T) {
assert.Equal(t, newWithdrawalDelay, withdrawalDelay)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, uint64(newWithdrawalDelay), wdelayerEvents.NewWithdrawalDelay[0].WithdrawalDelay)
}
@@ -124,7 +124,7 @@ func TestWDelayerDeposit(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
@@ -150,7 +150,7 @@ func TestWDelayerWithdrawal(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, amount, wdelayerEvents.Withdraw[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Withdraw[0].Owner)
@@ -166,7 +166,7 @@ func TestWDelayerSecondDeposit(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, amount, wdelayerEvents.Deposit[0].Amount)
assert.Equal(t, auxAddressConst, wdelayerEvents.Deposit[0].Owner)
@@ -181,7 +181,7 @@ func TestWDelayerEnableEmergencyMode(t *testing.T) {
assert.Equal(t, true, emergencyMode)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
auxEvent := new(WDelayerEventEmergencyModeEnabled)
assert.Equal(t, auxEvent, &wdelayerEvents.EmergencyModeEnabled[0])
@@ -210,7 +210,7 @@ func TestWDelayerEscapeHatchWithdrawal(t *testing.T) {
require.Nil(t, err)
currentBlockNum, err := wdelayerClientTest.client.EthLastBlock()
require.Nil(t, err)
wdelayerEvents, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum, nil)
wdelayerEvents, _, err := wdelayerClientTest.WDelayerEventsByBlock(currentBlockNum)
require.Nil(t, err)
assert.Equal(t, tokenHEZAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].Token)
assert.Equal(t, governanceAddressConst, wdelayerEvents.EscapeHatchWithdrawal[0].To)

2
go.mod
View File

@@ -17,6 +17,7 @@ require (
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0
@@ -28,6 +29,5 @@ require (
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
gopkg.in/go-playground/validator.v9 v9.29.1
)

13
go.sum
View File

@@ -24,8 +24,6 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
@@ -86,8 +84,6 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
@@ -173,8 +169,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -421,6 +415,9 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@@ -602,8 +599,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -622,8 +617,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=

View File

@@ -2,7 +2,6 @@ package node
import (
"context"
"errors"
"fmt"
"net/http"
"sync"
@@ -103,8 +102,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
var keyStore *ethKeystore.KeyStore
if mode == ModeCoordinator {
ethCfg = eth.EthereumConfig{
CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit,
GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv,
CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit,
GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv,
}
scryptN := ethKeystore.StandardScryptN
@@ -172,12 +171,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(fmt.Errorf("cfg.StateDB.Keep = %v < %v, which is unsafe",
cfg.StateDB.Keep, safeStateDBKeep))
}
stateDB, err := statedb.NewStateDB(statedb.Config{
Path: cfg.StateDB.Path,
Keep: cfg.StateDB.Keep,
Type: statedb.TypeSynchronizer,
NLevels: statedb.MaxNLevels,
})
stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, cfg.StateDB.Keep,
statedb.TypeSynchronizer, 32)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -261,9 +256,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
MaxL1Tx: common.RollupConstMaxL1Tx,
}
var verifierIdx int
if cfg.Coordinator.Debug.RollupVerifierIndex == nil {
verifierIdx, err = scConsts.Rollup.FindVerifierIdx(
verifierIdx, err := scConsts.Rollup.FindVerifierIdx(
cfg.Coordinator.Circuit.MaxTx,
cfg.Coordinator.Circuit.NLevels,
)
@@ -271,27 +264,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err)
}
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
} else {
verifierIdx = *cfg.Coordinator.Debug.RollupVerifierIndex
log.Infow("Using debug verifier index from config", "verifierIdx", verifierIdx)
if verifierIdx >= len(scConsts.Rollup.Verifiers) {
return nil, tracerr.Wrap(
fmt.Errorf("verifierIdx (%v) >= "+
"len(scConsts.Rollup.Verifiers) (%v)",
verifierIdx, len(scConsts.Rollup.Verifiers)))
}
verifier := scConsts.Rollup.Verifiers[verifierIdx]
if verifier.MaxTx != cfg.Coordinator.Circuit.MaxTx ||
verifier.NLevels != cfg.Coordinator.Circuit.NLevels {
return nil, tracerr.Wrap(
fmt.Errorf("Circuit config and verifier params don't match. "+
"circuit.MaxTx = %v, circuit.NLevels = %v, "+
"verifier.MaxTx = %v, verifier.NLevels = %v",
cfg.Coordinator.Circuit.MaxTx, cfg.Coordinator.Circuit.NLevels,
verifier.MaxTx, verifier.NLevels,
))
}
}
coord, err = coordinator.NewCoordinator(
coordinator.Config{
@@ -299,15 +271,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{
@@ -494,15 +460,11 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
common.BatchNum(stats.Eth.LastBatch),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err)
}
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
}
}
}
@@ -584,16 +546,10 @@ func (n *Node) StartSynchronizer() {
if n.ctx.Err() != nil {
continue
}
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
log.Warnw("Synchronizer.Sync", "err", err)
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
log.Warnw("Synchronizer.Sync", "err", err)
} else {
log.Errorw("Synchronizer.Sync", "err", err)
}
}
}
}
}()
n.wg.Add(1)

View File

@@ -18,19 +18,6 @@ import (
"github.com/hermeznetwork/tracerr"
)
const (
// errStrUnknownBlock is the string returned by geth when querying an
// unknown block
errStrUnknownBlock = "unknown block"
)
var (
// ErrUnknownBlock is the error returned by the Synchronizer when a
// block is queried by hash but the ethereum node doesn't find it due
// to it being discarded from a reorg.
ErrUnknownBlock = fmt.Errorf("unknown block")
)
// Stats of the syncrhonizer
type Stats struct {
Eth struct {
@@ -38,12 +25,12 @@ type Stats struct {
Updated time.Time
FirstBlockNum int64
LastBlock common.Block
LastBatchNum int64
LastBatch int64
}
Sync struct {
Updated time.Time
LastBlock common.Block
LastBatch common.Batch
LastBatch int64
// LastL1BatchBlock is the last ethereum block in which an
// l1Batch was forged
LastL1BatchBlock int64
@@ -90,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
}
// UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
now := time.Now()
s.rw.Lock()
s.Sync.LastBlock = *lastBlock
if lastBatch != nil {
s.Sync.LastBatch = *lastBatch
s.Sync.LastBatch = int64(*lastBatch)
}
if lastL1BatchBlock != nil {
s.Sync.LastL1BatchBlock = *lastL1BatchBlock
@@ -118,16 +105,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
if err != nil {
return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
return tracerr.Wrap(err)
}
lastBatchNum, err := ethClient.RollupLastForgedBatch()
lastBatch, err := ethClient.RollupLastForgedBatch()
if err != nil {
return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err))
return tracerr.Wrap(err)
}
s.rw.Lock()
s.Eth.Updated = now
s.Eth.LastBlock = *lastBlock
s.Eth.LastBatchNum = lastBatchNum
s.Eth.LastBatch = lastBatch
s.rw.Unlock()
return nil
}
@@ -152,10 +139,6 @@ func (s *StatsHolder) CopyStats() *Stats {
sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
}
if s.Sync.LastBatch.StateRoot != nil {
sCopy.Sync.LastBatch.StateRoot =
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
}
s.rw.RUnlock()
return &sCopy
}
@@ -169,9 +152,9 @@ func (s *StatsHolder) blocksPerc() float64 {
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
}
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
return float64(batchNum) * 100.0 /
float64(s.Eth.LastBatchNum)
float64(s.Eth.LastBatch)
}
// StartBlockNums sets the first block used to start tracking the smart
@@ -221,7 +204,6 @@ type Synchronizer struct {
startBlockNum int64
vars SCVariables
stats *StatsHolder
resetStateFailed bool
}
// NewSynchronizer creates a new Synchronizer
@@ -346,25 +328,23 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil
}
// updateCurrentSlot updates the slot with information of the current slot.
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
// hasBatch is true when the last synced block contained at least one batch.
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
// firstBatchBlockNum is the blockNum of first batch in that block, if any
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
slot := common.Slot{
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
}
// We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows {
hasBatch = false
firstBatchBlockNum = nil
} else {
hasBatch = true
firstBatchBlockNum = dbFirstBatchBlockNum
firstBatchBlockNum = &dbFirstBatchBlockNum
}
slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum {
@@ -375,11 +355,11 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil {
return tracerr.Wrap(err)
if err := s.setSlotCoordinator(&slot); err != nil {
return nil, tracerr.Wrap(err)
}
if hasBatch &&
s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
if firstBatchBlockNum != nil &&
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true
}
@@ -388,61 +368,57 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
// BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
return nil, tracerr.Wrap(err)
}
if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot))
}
// END SANITY CHECK
}
return nil
return &slot, nil
}
// updateNextSlot updates the slot with information of the next slot.
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
// We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot.SlotNum = slotNum
slot.ForgerCommitment = false
slot := common.Slot{
SlotNum: slotNum,
ForgerCommitment: false,
}
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil {
return tracerr.Wrap(err)
if err := s.setSlotCoordinator(&slot); err != nil {
return nil, tracerr.Wrap(err)
}
// TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
return nil, tracerr.Wrap(err)
}
if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot))
}
// END SANITY CHECK
}
return nil
return &slot, nil
}
// updateCurrentNextSlotIfSync updates the current and next slot. Information
// about forger address that is allowed to forge is only updated if we are
// Synced.
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
if err != nil {
return tracerr.Wrap(err)
}
if err := s.updateNextSlot(&next); err != nil {
next, err := s.getNextSlot()
if err != nil {
return tracerr.Wrap(err)
}
s.stats.UpdateCurrentNextSlot(&current, &next)
s.stats.UpdateCurrentNextSlot(current, next)
return nil
}
@@ -469,10 +445,8 @@ func (s *Synchronizer) init() error {
lastBlock = lastSavedBlock
}
if err := s.resetState(lastBlock); err != nil {
s.resetStateFailed = true
return tracerr.Wrap(err)
}
s.resetStateFailed = false
log.Infow("Sync init block",
"syncLastBlock", s.stats.Sync.LastBlock,
@@ -481,44 +455,23 @@ func (s *Synchronizer) init() error {
"ethLastBlock", s.stats.Eth.LastBlock,
)
log.Infow("Sync init batch",
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum,
"syncLastBatch", s.stats.Sync.LastBatch,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
"ethLastBatch", s.stats.Eth.LastBatch,
)
return nil
}
func (s *Synchronizer) resetIntermediateState() error {
lastBlock, err := s.historyDB.GetLastBlock()
if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBlock = &common.Block{}
} else if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBlock: %w", err))
}
if err := s.resetState(lastBlock); err != nil {
s.resetStateFailed = true
return tracerr.Wrap(fmt.Errorf("resetState at block %v: %w", lastBlock.Num, err))
}
s.resetStateFailed = false
return nil
}
// Sync2 attems to synchronize an ethereum block starting from lastSavedBlock.
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
// If a block is synched, it will be returned and also stored in the DB. If a
// reorg is detected, the number of discarded blocks will be returned and no
// synchronization will be made.
// TODO: Be smart about locking: only lock during the read/write operations
func (s *Synchronizer) Sync2(ctx context.Context,
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
if s.resetStateFailed {
if err := s.resetIntermediateState(); err != nil {
return nil, nil, tracerr.Wrap(err)
}
}
func (s *Synchronizer) Sync2(ctx context.Context, lastSavedBlock *common.Block) (*common.BlockData, *int64, error) {
var nextBlockNum int64 // next block number to sync
if lastSavedBlock == nil {
var err error
// Get lastSavedBlock from History DB
lastSavedBlock, err = s.historyDB.GetLastBlock()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
@@ -544,7 +497,7 @@ func (s *Synchronizer) Sync2(ctx context.Context,
if tracerr.Unwrap(err) == ethereum.NotFound {
return nil, nil, nil
} else if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
return nil, nil, tracerr.Wrap(err)
}
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
@@ -574,20 +527,6 @@ func (s *Synchronizer) Sync2(ctx context.Context,
}
}
defer func() {
// If there was an error during sync, reset to the last block
// in the historyDB because the historyDB is written last in
// the Sync method and is the source of consistency. This
// allows reseting the stateDB in the case a batch was
// processed but the historyDB block was not committed due to an
// error.
if err != nil {
if err2 := s.resetIntermediateState(); err2 != nil {
log.Errorw("sync revert", "err", err2)
}
}
}()
// Get data from the rollup contract
rollupData, err := s.rollupSync(ethBlock)
if err != nil {
@@ -625,14 +564,14 @@ func (s *Synchronizer) Sync2(ctx context.Context,
}
// Group all the block data into the structs to save into HistoryDB
blockData = &common.BlockData{
blockData := common.BlockData{
Block: *ethBlock,
Rollup: *rollupData,
Auction: *auctionData,
WDelayer: *wDelayerData,
}
err = s.historyDB.AddBlockSCData(blockData)
err = s.historyDB.AddBlockSCData(&blockData)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
@@ -650,31 +589,31 @@ func (s *Synchronizer) Sync2(ctx context.Context,
}
}
s.stats.UpdateSync(ethBlock,
&rollupData.Batches[batchesLen-1].Batch,
&rollupData.Batches[batchesLen-1].Batch.BatchNum,
lastL1BatchBlock, lastForgeL1TxsNum)
}
hasBatch := false
var firstBatchBlockNum *int64
if len(rollupData.Batches) > 0 {
hasBatch = true
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
}
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
return nil, nil, tracerr.Wrap(err)
}
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum,
)
}
log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlockNum", s.stats.Eth.LastBlock.Num,
)
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
"ethLastBatch", s.stats.Eth.LastBatch,
)
}
return blockData, nil, nil
return &blockData, nil, nil
}
// reorg manages a reorg, updating History and State DB as needed. Keeps
@@ -706,15 +645,14 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1)
// Set History DB and State DB to the correct state
if err := s.historyDB.Reorg(block.Num); err != nil {
err := s.historyDB.Reorg(block.Num)
if err != nil {
return 0, tracerr.Wrap(err)
}
if err := s.resetState(block); err != nil {
s.resetStateFailed = true
return 0, tracerr.Wrap(err)
}
s.resetStateFailed = false
return block.Num, nil
}
@@ -723,15 +661,15 @@ func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
return nil, nil, tracerr.Wrap(err)
}
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
return nil, nil, tracerr.Wrap(err)
}
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
return nil, nil, tracerr.Wrap(err)
}
rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
@@ -776,17 +714,12 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer
}
batch, err := s.historyDB.GetLastBatch()
batchNum, err := s.historyDB.GetLastBatchNum()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
if tracerr.Unwrap(err) == sql.ErrNoRows {
batch = &common.Batch{}
}
err = s.stateDB.Reset(batch.BatchNum)
if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
batchNum = 0
}
lastL1BatchBlockNum, err := s.historyDB.GetLastL1BatchBlockNum()
@@ -806,9 +739,14 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n
}
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
err = s.stateDB.Reset(batchNum)
if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
}
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
return tracerr.Wrap(err)
}
return nil
@@ -823,16 +761,19 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// Get rollup events in the block, and make sure the block hash matches
// the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock {
return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
rollupEvents, blockHash, err := s.ethClient.RollupEventsByBlock(blockNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
// No events in this block
if rollupEvents == nil {
if blockHash == nil {
return &rollupData, nil
}
if *blockHash != ethBlock.Hash {
log.Errorw("Block hash mismatch in Rollup events", "expected", ethBlock.Hash.String(),
"got", blockHash.String())
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
}
var nextForgeL1TxsNum int64 // forgeL1TxsNum for the next L1Batch
nextForgeL1TxsNumPtr, err := s.historyDB.GetLastL1TxsNum()
@@ -860,7 +801,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
forgeBatchArgs, sender, err := s.ethClient.RollupForgeBatchArgs(evtForgeBatch.EthTxHash,
evtForgeBatch.L1UserTxsLen)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupForgeBatchArgs: %w", err))
return nil, tracerr.Wrap(err)
}
batchNum := common.BatchNum(evtForgeBatch.BatchNum)
@@ -943,16 +884,6 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
if err != nil {
return nil, tracerr.Wrap(err)
}
if s.stateDB.CurrentBatch() != batchNum {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
"evtForgeBatch.BatchNum = (%v)",
s.stateDB.CurrentBatch(), batchNum))
}
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
"forgeBatchArgs.NewStRoot (%v)",
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
}
// Transform processed PoolL2 txs to L2 and store in BatchData
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
@@ -1135,16 +1066,19 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
var auctionData = common.NewAuctionData()
// Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock {
return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
auctionEvents, blockHash, err := s.ethClient.AuctionEventsByBlock(blockNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
// No events in this block
if auctionEvents == nil {
if blockHash == nil {
return &auctionData, nil
}
if *blockHash != ethBlock.Hash {
log.Errorw("Block hash mismatch in Auction events", "expected", ethBlock.Hash.String(),
"got", blockHash.String())
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
}
// Get bids
for _, evt := range auctionEvents.NewBid {
@@ -1234,16 +1168,19 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
wDelayerData := common.NewWDelayerData()
// Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil && err.Error() == errStrUnknownBlock {
return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
wDelayerEvents, blockHash, err := s.ethClient.WDelayerEventsByBlock(blockNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
// No events in this block
if wDelayerEvents == nil {
if blockHash == nil {
return &wDelayerData, nil
}
if *blockHash != ethBlock.Hash {
log.Errorw("Block hash mismatch in WDelayer events", "expected", ethBlock.Hash.String(),
"got", blockHash.String())
return nil, tracerr.Wrap(eth.ErrBlockHashMismatchEvent)
}
for _, evt := range wDelayerEvents.Deposit {
wDelayerData.Deposits = append(wDelayerData.Deposits, common.WDelayerTransfer{

View File

@@ -17,6 +17,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til"
"github.com/jinzhu/copier"
@@ -306,7 +307,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
require.NoError(t, err)
deleteme = append(deleteme, dir)
stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
stateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
require.NoError(t, err)
// Init History DB
@@ -320,14 +321,6 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
return stateDB, historyDB
}
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestSyncGeneral(t *testing.T) {
//
// Setup
@@ -346,6 +339,7 @@ func TestSyncGeneral(t *testing.T) {
s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second,
})
log.Error(err)
require.NoError(t, err)
ctx := context.Background()
@@ -440,22 +434,12 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
// blocks 1 (blockNum=3)
i = 1
require.Equal(t, 3, int(blocks[i].Block.Num))
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
// Generate extra required data
ethAddTokens(blocks, client)
@@ -630,12 +614,6 @@ func TestSyncGeneral(t *testing.T) {
blocks, err = tc.GenerateBlocks(set2)
require.NoError(t, err)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
for i := 0; i < 4; i++ {
client.CtlRollback()
}

View File

@@ -44,7 +44,7 @@ func TestDebugAPI(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
require.Nil(t, err)
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
require.Nil(t, err)

View File

@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
cpy := c.nextBlock().copy()
defer func() { c.revertIfErr(err, cpy) }()
_, err = common.NewFloat40(amount)
_, err = common.NewFloat16(amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
_, err = common.NewFloat40(depositAmount)
_, err = common.NewFloat16(depositAmount)
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -1116,20 +1116,15 @@ func (c *Client) RollupConstants() (*common.RollupConstants, error) {
}
// RollupEventsByBlock returns the events in a block that happened in the Rollup Smart Contract
func (c *Client) RollupEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*eth.RollupEvents, error) {
func (c *Client) RollupEventsByBlock(blockNum int64) (*eth.RollupEvents, *ethCommon.Hash, error) {
c.rw.RLock()
defer c.rw.RUnlock()
block, ok := c.blocks[blockNum]
if !ok {
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
}
if blockHash != nil && *blockHash != block.Eth.Hash {
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
blockHash, block.Eth.Hash))
}
return &block.Rollup.Events, nil
return &block.Rollup.Events, &block.Eth.Hash, nil
}
// RollupEventInit returns the initialize event with its corresponding block number
@@ -1578,20 +1573,15 @@ func (c *Client) AuctionConstants() (*common.AuctionConstants, error) {
}
// AuctionEventsByBlock returns the events in a block that happened in the Auction Smart Contract
func (c *Client) AuctionEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*eth.AuctionEvents, error) {
func (c *Client) AuctionEventsByBlock(blockNum int64) (*eth.AuctionEvents, *ethCommon.Hash, error) {
c.rw.RLock()
defer c.rw.RUnlock()
block, ok := c.blocks[blockNum]
if !ok {
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
}
if blockHash != nil && *blockHash != block.Eth.Hash {
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
blockHash, block.Eth.Hash))
}
return &block.Auction.Events, nil
return &block.Auction.Events, &block.Eth.Hash, nil
}
// AuctionEventInit returns the initialize event with its corresponding block number
@@ -1799,20 +1789,15 @@ func (c *Client) WDelayerEscapeHatchWithdrawal(to, token ethCommon.Address, amou
}
// WDelayerEventsByBlock returns the events in a block that happened in the WDelayer Contract
func (c *Client) WDelayerEventsByBlock(blockNum int64,
blockHash *ethCommon.Hash) (*eth.WDelayerEvents, error) {
func (c *Client) WDelayerEventsByBlock(blockNum int64) (*eth.WDelayerEvents, *ethCommon.Hash, error) {
c.rw.RLock()
defer c.rw.RUnlock()
block, ok := c.blocks[blockNum]
if !ok {
return nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
return nil, nil, tracerr.Wrap(fmt.Errorf("Block %v doesn't exist", blockNum))
}
if blockHash != nil && *blockHash != block.Eth.Hash {
return nil, tracerr.Wrap(fmt.Errorf("Hash mismatch, requested %v got %v",
blockHash, block.Eth.Hash))
}
return &block.WDelayer.Events, nil
return &block.WDelayer.Events, &block.Eth.Hash, nil
}
// WDelayerConstants returns the Constants of the WDelayer Contract

View File

@@ -130,7 +130,7 @@ func TestClientAuction(t *testing.T) {
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
auctionEvents, err := c.AuctionEventsByBlock(blockNum, nil)
auctionEvents, _, err := c.AuctionEventsByBlock(blockNum)
require.Nil(t, err)
assert.Equal(t, 2, len(auctionEvents.NewBid))
}
@@ -171,7 +171,7 @@ func TestClientRollup(t *testing.T) {
blockNum, err := c.EthLastBlock()
require.Nil(t, err)
rollupEvents, err := c.RollupEventsByBlock(blockNum, nil)
rollupEvents, _, err := c.RollupEventsByBlock(blockNum)
require.Nil(t, err)
assert.Equal(t, N, len(rollupEvents.L1UserTx))
assert.Equal(t, 1, len(rollupEvents.AddToken))
@@ -192,7 +192,7 @@ func TestClientRollup(t *testing.T) {
blockNumA, err := c.EthLastBlock()
require.Nil(t, err)
rollupEvents, err = c.RollupEventsByBlock(blockNumA, nil)
rollupEvents, hashA, err := c.RollupEventsByBlock(blockNumA)
require.Nil(t, err)
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
assert.Equal(t, 0, len(rollupEvents.AddToken))
@@ -205,14 +205,14 @@ func TestClientRollup(t *testing.T) {
blockNumB, err := c.EthLastBlock()
require.Nil(t, err)
rollupEventsB, err := c.RollupEventsByBlock(blockNumA, nil)
rollupEvents, hashB, err := c.RollupEventsByBlock(blockNumA)
require.Nil(t, err)
assert.Equal(t, 0, len(rollupEventsB.L1UserTx))
assert.Equal(t, 0, len(rollupEventsB.AddToken))
assert.Equal(t, 0, len(rollupEventsB.ForgeBatch))
assert.Equal(t, 0, len(rollupEvents.L1UserTx))
assert.Equal(t, 0, len(rollupEvents.AddToken))
assert.Equal(t, 0, len(rollupEvents.ForgeBatch))
assert.Equal(t, blockNumA, blockNumB)
assert.NotEqual(t, rollupEvents, rollupEventsB)
assert.NotEqual(t, hashA, hashB)
// Forge again
rollupForgeBatchArgs0 := &eth.RollupForgeBatchArgs{
@@ -232,7 +232,7 @@ func TestClientRollup(t *testing.T) {
blockNum, err = c.EthLastBlock()
require.Nil(t, err)
rollupEvents, err = c.RollupEventsByBlock(blockNum, nil)
rollupEvents, _, err = c.RollupEventsByBlock(blockNum)
require.Nil(t, err)
rollupForgeBatchArgs1, sender, err := c.RollupForgeBatchArgs(rollupEvents.ForgeBatch[0].EthTxHash,

View File

@@ -145,7 +145,7 @@ const longWaitDuration = 999 * time.Hour
// const provingDuration = 2 * time.Second
func (s *Mock) runProver(ctx context.Context) {
waitCh := time.After(longWaitDuration)
waitDuration := longWaitDuration
for {
select {
case <-ctx.Done():
@@ -153,21 +153,21 @@ func (s *Mock) runProver(ctx context.Context) {
case msg := <-s.msgCh:
switch msg.value {
case "cancel":
waitCh = time.After(longWaitDuration)
waitDuration = longWaitDuration
s.Lock()
if !s.status.IsReady() {
s.status = prover.StatusCodeAborted
}
s.Unlock()
case "prove":
waitCh = time.After(s.provingDuration)
waitDuration = s.provingDuration
s.Lock()
s.status = prover.StatusCodeBusy
s.Unlock()
}
msg.ackCh <- true
case <-waitCh:
waitCh = time.After(longWaitDuration)
case <-time.After(waitDuration):
waitDuration = longWaitDuration
s.Lock()
if s.status != prover.StatusCodeBusy {
s.Unlock()

View File

@@ -142,12 +142,10 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
// same values than in the js test
users = GenerateJsUsers(t)
depositAmount, err := common.Float40(10400).BigInt()
require.Nil(t, err)
l1UserTxs = []common.L1Tx{
{
FromIdx: 0,
DepositAmount: depositAmount,
DepositAmount: big.NewInt(16000000),
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: users[0].BJJ.Public().Compress(),

View File

@@ -80,8 +80,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 0})
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 0)
require.NoError(t, err)
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
@@ -311,7 +310,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
h, err := zki.HashGlobalData()
require.NoError(t, err)
assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String())
assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String())
sendProofAndCheckResp(t, zki)
// batch3
@@ -334,7 +333,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
h, err = zki.HashGlobalData()
require.NoError(t, err)
assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String())
assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String())
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
assert.Equal(t, "0", zki.EthAddr3[1].String())
sendProofAndCheckResp(t, zki)

View File

@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
os.Exit(exitVal)
}
const MaxTx = 352
const MaxTx = 376
const NLevels = 32
const MaxL1Tx = 256
const MaxFeeTx = 64
@@ -50,7 +50,7 @@ func initStateDB(t *testing.T, typ statedb.TypeStateDB) *statedb.StateDB {
require.NoError(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: typ, NLevels: NLevels})
sdb, err := statedb.NewStateDB(dir, 128, typ, NLevels)
require.NoError(t, err)
return sdb
}
@@ -61,7 +61,6 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
return
}
log.Infof("sending proof to %s", proofServerURL)
// Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err)

View File

@@ -127,7 +127,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
exits := make([]processedExit, nTx)
if tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeBatchBuilder {
tp.zki = common.NewZKInputs(tp.config.ChainID, tp.config.MaxTx, tp.config.MaxL1Tx,
tp.config.MaxFeeTx, tp.config.NLevels, (tp.s.CurrentBatch() + 1).BigInt())
tp.zki.OldLastIdx = tp.s.CurrentIdx().BigInt()
@@ -137,7 +137,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
// TBD if ExitTree is only in memory or stored in disk, for the moment
// is only needed in memory
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
if err != nil {
return nil, tracerr.Wrap(err)
@@ -166,7 +166,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
if err != nil {
return nil, tracerr.Wrap(err)
}
if tp.s.Type() == statedb.TypeSynchronizer {
if tp.s.Typ == statedb.TypeSynchronizer {
if createdAccount != nil {
createdAccounts = append(createdAccounts, *createdAccount)
l1usertxs[i].EffectiveFromIdx = createdAccount.Idx
@@ -195,7 +195,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
}
}
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
if exitIdx != nil && exitTree != nil {
exits[tp.i] = processedExit{
exit: true,
@@ -217,7 +217,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
if exitIdx != nil {
log.Error("Unexpected Exit in L1CoordinatorTx")
}
if tp.s.Type() == statedb.TypeSynchronizer {
if tp.s.Typ == statedb.TypeSynchronizer {
if createdAccount != nil {
createdAccounts = append(createdAccounts, *createdAccount)
l1coordinatortxs[i].EffectiveFromIdx = createdAccount.Idx
@@ -276,7 +276,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
// collectedFees will contain the amount of fee collected for each
// TokenID
var collectedFees map[common.TokenID]*big.Int
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
collectedFees = make(map[common.TokenID]*big.Int)
for tokenID := range coordIdxsMap {
collectedFees[tokenID] = big.NewInt(0)
@@ -317,7 +317,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
}
}
}
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeSynchronizer || tp.s.Typ == statedb.TypeBatchBuilder {
if exitIdx != nil && exitTree != nil {
exits[tp.i] = processedExit{
exit: true,
@@ -401,13 +401,12 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
}
}
if tp.s.Type() == statedb.TypeTxSelector {
if tp.s.Typ == statedb.TypeTxSelector {
return nil, nil
}
if tp.s.Type() == statedb.TypeSynchronizer {
// once all txs processed (exitTree root frozen), for each
// Exit, generate common.ExitInfo data
// once all txs processed (exitTree root frozen), for each Exit,
// generate common.ExitInfo data
var exitInfos []common.ExitInfo
exitInfosByIdx := make(map[common.Idx]*common.ExitInfo)
for i := 0; i < nTx; i++ {
@@ -437,9 +436,9 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
}
}
// return exitInfos, createdAccounts and collectedFees, so
// Synchronizer will be able to store it into HistoryDB for the
// concrete BatchNum
if tp.s.Typ == statedb.TypeSynchronizer {
// return exitInfos, createdAccounts and collectedFees, so Synchronizer will
// be able to store it into HistoryDB for the concrete BatchNum
return &ProcessTxOutput{
ZKInputs: nil,
ExitInfos: exitInfos,
@@ -502,11 +501,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
tp.zki.OnChain[tp.i] = big.NewInt(1)
// L1Txs
depositAmountF40, err := common.NewFloat40(tx.DepositAmount)
depositAmountF16, err := common.NewFloat16(tx.DepositAmount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40))
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16))
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
if tx.FromBJJ != common.EmptyBJJComp {
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
@@ -516,20 +515,6 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
tp.zki.ISOnChain[tp.i] = big.NewInt(1)
}
if tx.Type == common.TxTypeForceTransfer ||
tx.Type == common.TxTypeDepositTransfer ||
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
tx.Type == common.TxTypeForceExit {
// in the cases where at L1Tx there is usage of the
// Amount parameter, add it at the ZKInputs.AmountF
// slot
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
}
}
switch tx.Type {
@@ -603,7 +588,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
}
var createdAccount *common.Account
if tp.s.Type() == statedb.TypeSynchronizer &&
if tp.s.Typ == statedb.TypeSynchronizer &&
(tx.Type == common.TxTypeCreateAccountDeposit ||
tx.Type == common.TxTypeCreateAccountDepositTransfer) {
var err error
@@ -627,8 +612,8 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
var err error
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
if tp.s.Type() == statedb.TypeSynchronizer {
// thisTypeould never be reached
if tp.s.Typ == statedb.TypeSynchronizer {
// this should never be reached
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
return nil, nil, false, tracerr.Wrap(fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0"))
}
@@ -672,11 +657,6 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
tp.zki.OnChain[tp.i] = big.NewInt(0)
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
tp.zki.NewAccount[tp.i] = big.NewInt(0)
// L2Txs
@@ -696,8 +676,8 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
}
// if StateDB type==TypeSynchronizer, will need to add Nonce
if tp.s.Type() == statedb.TypeSynchronizer {
// as tType==TypeSynchronizer, always tx.ToIdx!=0
if tp.s.Typ == statedb.TypeSynchronizer {
// as type==TypeSynchronizer, always tx.ToIdx!=0
acc, err := tp.s.GetAccount(tx.FromIdx)
if err != nil {
log.Errorw("GetAccount", "fromIdx", tx.FromIdx, "err", err)
@@ -909,8 +889,8 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
accumulated := tp.AccumulatedFees[accCoord.Idx]
accumulated.Add(accumulated, fee)
if tp.s.Type() == statedb.TypeSynchronizer ||
tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeSynchronizer ||
tp.s.Typ == statedb.TypeBatchBuilder {
collected := collectedFees[accCoord.TokenID]
collected.Add(collected, fee)
}
@@ -1114,8 +1094,8 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
accumulated := tp.AccumulatedFees[accCoord.Idx]
accumulated.Add(accumulated, fee)
if tp.s.Type() == statedb.TypeSynchronizer ||
tp.s.Type() == statedb.TypeBatchBuilder {
if tp.s.Typ == statedb.TypeSynchronizer ||
tp.s.Typ == statedb.TypeBatchBuilder {
collected := collectedFees[accCoord.TokenID]
collected.Add(collected, fee)
}

View File

@@ -1,6 +1,8 @@
package txprocessor
import (
"encoding/binary"
"encoding/hex"
"io/ioutil"
"math/big"
"os"
@@ -34,8 +36,7 @@ func TestComputeEffectiveAmounts(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
assert.NoError(t, err)
set := `
@@ -211,8 +212,7 @@ func TestProcessTxsBalances(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
assert.NoError(t, err)
chainID := uint16(0)
@@ -358,8 +358,7 @@ func TestProcessTxsSynchronizer(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
assert.NoError(t, err)
chainID := uint16(0)
@@ -490,8 +489,7 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
assert.NoError(t, err)
chainID := uint16(0)
@@ -582,8 +580,7 @@ func TestProcessTxsRootTestVectors(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
assert.NoError(t, err)
// same values than in the js test
@@ -634,22 +631,22 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
defer assert.NoError(t, os.RemoveAll(dir))
nLevels := 16
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: nLevels})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, nLevels)
assert.NoError(t, err)
users := txsets.GenerateJsUsers(t)
daMaxF40 := common.Float40(0xFFFFFFFFFF)
daMaxBI, err := daMaxF40.BigInt()
daMaxHex, err := hex.DecodeString("FFFF")
require.NoError(t, err)
assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String())
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
daMaxBI := daMaxF16.BigInt()
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
daMax1F40 := common.Float40(0xFFFFFFFFFE)
daMax1Hex, err := hex.DecodeString("FFFE")
require.NoError(t, err)
daMax1BI, err := daMax1F40.BigInt()
require.NoError(t, err)
assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String())
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
daMax1BI := daMax1F16.BigInt()
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
l1Txs := []common.L1Tx{
{
@@ -703,8 +700,7 @@ func initTestMultipleCoordIdxForTokenID(t *testing.T) (*TxProcessor, *til.Contex
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, 32)
assert.NoError(t, err)
chainID := uint16(1)
@@ -802,8 +798,7 @@ func TestTwoExits(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 32)
assert.NoError(t, err)
chainID := uint16(1)
@@ -870,8 +865,7 @@ func TestTwoExits(t *testing.T) {
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir2))
sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
sdb2, err := statedb.NewStateDB(dir2, 128, statedb.TypeSynchronizer, 32)
assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)

File diff suppressed because one or more lines are too long

View File

@@ -3,13 +3,13 @@ package txselector
// current: very simple version of TxSelector
import (
"bytes"
"fmt"
"math/big"
"sort"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/kvdb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log"
@@ -62,14 +62,8 @@ type TxSelector struct {
// NewTxSelector returns a *TxSelector
func NewTxSelector(coordAccount *CoordAccount, dbpath string,
synchronizerStateDB *statedb.StateDB, l2 *l2db.L2DB) (*TxSelector, error) {
localAccountsDB, err := statedb.NewLocalStateDB(
statedb.Config{
Path: dbpath,
Keep: kvdb.DefaultKeep,
Type: statedb.TypeTxSelector,
NLevels: 0,
},
synchronizerStateDB) // without merkletree
localAccountsDB, err := statedb.NewLocalStateDB(dbpath, 128,
synchronizerStateDB, statedb.TypeTxSelector, 0) // without merkletree
if err != nil {
return nil, tracerr.Wrap(err)
}
@@ -88,8 +82,12 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
// Reset tells the TxSelector to get it's internal AccountsDB
// from the required `batchNum`
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
err := txsel.localAccountsDB.Reset(batchNum, true)
if err != nil {
return tracerr.Wrap(err)
}
return nil
}
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
@@ -231,8 +229,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
"L1CoordinatorTx and there is not enough space for L1Coordinator"
l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue
}
@@ -257,9 +254,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
@@ -271,8 +266,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs
// array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
@@ -290,31 +284,18 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
log.Debug(err)
// Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s",
err.Error())
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
if l1CoordinatorTx != nil {
// If ToEthAddr == 0xff.. this means that we
// are handling a TransferToBJJ, which doesn't
// require an authorization because it doesn't
// contain a valid ethereum address.
// Otherwise only create the account if we have
// the corresponding authorization
if validL2Tx.ToEthAddr == common.FFAddr {
accAuths = append(accAuths, common.EmptyEthSignature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
} else if accAuth != nil {
if accAuth != nil && l1CoordinatorTx != nil {
accAuths = append(accAuths, accAuth.Signature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
}
}
if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx)
}
@@ -326,8 +307,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
"ToIdx", l2Txs[i].ToIdx)
// Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+
"ToIdx: %d", l2Txs[i].ToIdx)
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d",
l2Txs[i].ToIdx)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
}
@@ -339,9 +320,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info
// parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+
"does not correspond to the Account.EthAddr. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
@@ -355,9 +334,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info
// parameter of the tx, and add it to
// the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+
"does not correspond to the Account.BJJ. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue
@@ -431,7 +408,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
log.Error(err)
// Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
continue
}
@@ -487,7 +464,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) &&
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
// case: ToEthAddr != 0x00 neither 0xff
if l2Tx.ToBJJ != common.EmptyBJJComp {
// case: ToBJJ!=0:
@@ -543,7 +521,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
DepositAmount: big.NewInt(0),
Type: common.TxTypeCreateAccountDeposit,
}
} else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp {
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) &&
l2Tx.ToBJJ != common.EmptyBJJComp {
// if idx exist for EthAddr&BJJ use it
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
l2Tx.TokenID)
@@ -569,8 +548,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
}
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
// L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx"))
}
return &l2Tx, l1CoordinatorTx, accAuth, nil
@@ -579,7 +557,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ {
if l1CoordinatorTxs[i].FromEthAddr == addr &&
if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) &&
l1CoordinatorTxs[i].TokenID == tokenID &&
l1CoordinatorTxs[i].FromBJJ == bjj {
return true

View File

@@ -34,8 +34,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
syncStateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeTxSelector, NLevels: 0})
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeTxSelector, 0)
require.NoError(t, err)
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
@@ -48,7 +47,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil,
}
// fmt.Printf("%v\n", coordAccount)
fmt.Printf("%v", coordAccount)
auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(),
@@ -424,9 +423,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr
expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit
expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr
expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr
expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit
expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr
// batch2
// prepare the PoolL2Txs
@@ -497,134 +496,3 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestTransferToBjj(t *testing.T) {
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
CreateAccountDeposit(1) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
addTokens(t, tc, txsel.l2db.DB())
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 20,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to create some accounts with positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx.
batchPoolL2 := `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// Now the BJJ-only account for B is already created, so the transfer
// happens without an L1CoordTx that creates the user account.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx. Since it's a
// transfer of a token for which the coordinator doesn't have a fee
// account, another L1CoordTx will be created for the coordinator to
// receive the fees.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(1) B-A: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
// to receive the fees by the coordinator and another one for the
// recipient of the l2tx
assert.Equal(t, 2, len(oL1CoordTxs))
// [0] Coordinator account cration for token 1
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
// [1] User A BJJ-only account creation for token 1
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}