Compare commits

..

5 Commits

Author SHA1 Message Date
arnaucube
6a01c0ac14 Update ZKInputs test vectors with float40 & other
- Add AmountF new parameter to ZKInputs
- Update ZKInputs test vectors with float40 checked with circom circuits
- Small fix at eth/rollup.go with lenL1L2TxsBytes with new length of
Float40
2021-02-12 16:40:36 +01:00
arnaucube
63151a285c Migrate all packages to use Float40
Migrate all packages to use Float40 & Add more test vectors at common
2021-02-12 14:27:07 +01:00
arnaucube
52d4197330 Add Float40 methods
This commit adds Float40 related methods, and keeps the Float16 version
which will be deleted in a near future once the Float40 migration is
ready.
2021-02-12 14:27:07 +01:00
arnau
ea63cba62a Merge pull request #538 from hermeznetwork/fix/txselTransferToBJJ
Fix txselector TransferToBJJ behaviour
2021-02-12 12:02:13 +01:00
Eduard S
c7e6267189 Fix txselector TransferToBJJ behaviour 2021-02-11 18:19:29 +01:00
45 changed files with 1137 additions and 1245 deletions

View File

@@ -2916,7 +2916,7 @@ components:
example: 101 example: 101
l1UserTotalBytes: l1UserTotalBytes:
type: integer type: integer
description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx). description: Number of bytes that a L1 user transaction has ([20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx).
example: 72 example: 72
maxL1UserTx: maxL1UserTx:
type: integer type: integer

View File

@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise // copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
// it can just roll back the internal copy. // it can just roll back the internal copy.
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer)) return bb.localStateDB.Reset(batchNum, fromSynchronizer)
} }
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch

View File

@@ -41,15 +41,12 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZName = "Hermez Network Token" TokenHEZName = "Hermez Network Token"
[Coordinator] [Coordinator]
ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator # ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
# ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.6 L1BatchTimeoutPerc = 0.999
StartSlotBlocksDelay = 2
ScheduleBatchBlocksAheadCheck = 3
SendBatchBlocksMarginCheck = 1
ProofServerPollInterval = "1s" ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms" ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s" SyncRetryInterval = "1s"
@@ -88,11 +85,8 @@ ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms" CheckLoopInterval = "500ms"
Attempts = 4 Attempts = 4
AttemptsDelay = "500ms" AttemptsDelay = "500ms"
TxResendTimeout = "2m"
NoReuseNonce = false
CallGasLimit = 300000 CallGasLimit = 300000
GasPriceDiv = 100 GasPriceDiv = 100
MaxGasPrice = "5000000000"
[Coordinator.EthClient.Keystore] [Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"

View File

@@ -17,6 +17,11 @@ const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollu
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem // EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem
const EthMsgPrefix = "\x19Ethereum Signed Message:\n" const EthMsgPrefix = "\x19Ethereum Signed Message:\n"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for // AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary // account creations when necessary
type AccountCreationAuth struct { type AccountCreationAuth struct {

View File

@@ -27,24 +27,6 @@ type Batch struct {
TotalFeesUSD *float64 `meddler:"total_fees_usd"` TotalFeesUSD *float64 `meddler:"total_fees_usd"`
} }
// NewEmptyBatch creates a new empty batch
func NewEmptyBatch() *Batch {
return &Batch{
BatchNum: 0,
EthBlockNum: 0,
ForgerAddr: ethCommon.Address{},
CollectedFees: make(map[TokenID]*big.Int),
FeeIdxsCoordinator: make([]Idx, 0),
StateRoot: big.NewInt(0),
NumAccounts: 0,
LastIdx: 0,
ExitRoot: big.NewInt(0),
ForgeL1TxsNum: nil,
SlotNum: 0,
TotalFeesUSD: nil,
}
}
// BatchNum identifies a batch // BatchNum identifies a batch
type BatchNum int64 type BatchNum int64
@@ -93,23 +75,3 @@ func NewBatchData() *BatchData {
Batch: Batch{}, Batch: Batch{},
} }
} }
// BatchSync is a subset of Batch that contains fileds needed for the
// synchronizer and coordinator
// type BatchSync struct {
// BatchNum BatchNum `meddler:"batch_num"`
// EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
// ForgerAddr ethCommon.Address `meddler:"forger_addr"`
// StateRoot *big.Int `meddler:"state_root,bigint"`
// SlotNum int64 `meddler:"slot_num"` // Slot in which the batch is forged
// }
//
// func NewBatchSync() *BatchSync {
// return &BatchSync{
// BatchNum: 0,
// EthBlockNum: 0,
// ForgerAddr: ethCommon.Address,
// StateRoot: big.NewInt(0),
// SlotNum: 0,
// }
// }

View File

@@ -33,8 +33,7 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum { if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot) return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// This result will be negative return -1
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// SlotBlocks returns the first and the last block numbers included in that slot // SlotBlocks returns the first and the last block numbers included in that slot

View File

@@ -24,8 +24,8 @@ const (
// RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature // RollupConstL1CoordinatorTotalBytes [4 bytes] token + [32 bytes] babyjub + [65 bytes] compressedSignature
RollupConstL1CoordinatorTotalBytes = 101 RollupConstL1CoordinatorTotalBytes = 101
// RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx + // RollupConstL1UserTotalBytes [20 bytes] fromEthAddr + [32 bytes] fromBjj-compressed + [6 bytes] fromIdx +
// [2 bytes] depositAmountFloat16 + [2 bytes] amountFloat16 + [4 bytes] tokenId + [6 bytes] toIdx // [5 bytes] depositAmountFloat40 + [5 bytes] amountFloat40 + [4 bytes] tokenId + [6 bytes] toIdx
RollupConstL1UserTotalBytes = 72 RollupConstL1UserTotalBytes = 78
// RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch // RollupConstMaxL1UserTx Maximum L1-user transactions allowed to be queued in a batch
RollupConstMaxL1UserTx = 128 RollupConstMaxL1UserTx = 128
// RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch // RollupConstMaxL1Tx Maximum L1 transactions allowed to be queued in a batch

View File

@@ -30,6 +30,7 @@ func (f16 Float16) Bytes() []byte {
// Float16FromBytes returns a Float16 from a byte array of 2 bytes. // Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 { func Float16FromBytes(b []byte) *Float16 {
// WARNING b[:2] for a b where len(b)<2 can break
f16 := Float16(binary.BigEndian.Uint16(b[:2])) f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16 return &f16
} }

View File

@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestConversions(t *testing.T) { func TestConversionsFloat16(t *testing.T) {
testVector := map[Float16]string{ testVector := map[Float16]string{
0x307B: "123000000", 0x307B: "123000000",
0x1DC6: "454500", 0x1DC6: "454500",
@@ -32,14 +32,14 @@ func TestConversions(t *testing.T) {
bi.SetString(testVector[test], 10) bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi) fl, err := NewFloat16(bi)
assert.Equal(t, nil, err) assert.NoError(t, err)
fx2 := fl.BigInt() fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test]) assert.Equal(t, fx2.String(), testVector[test])
} }
} }
func TestFloorFix2Float(t *testing.T) { func TestFloorFix2FloatFloat16(t *testing.T) {
testVector := map[string]Float16{ testVector := map[string]Float16{
"87999990000000000": 0x776f, "87999990000000000": 0x776f,
"87950000000000001": 0x776f, "87950000000000001": 0x776f,
@@ -57,10 +57,10 @@ func TestFloorFix2Float(t *testing.T) {
} }
} }
func TestConversionLosses(t *testing.T) { func TestConversionLossesFloat16(t *testing.T) {
a := big.NewInt(1000) a := big.NewInt(1000)
b, err := NewFloat16(a) b, err := NewFloat16(a)
assert.Equal(t, nil, err) assert.NoError(t, err)
c := b.BigInt() c := b.BigInt()
assert.Equal(t, c, a) assert.Equal(t, c, a)

105
common/float40.go Normal file
View File

@@ -0,0 +1,105 @@
// Package common Float40 provides methods to work with Hermez custom half
// float precision, 40 bits, codification internally called Float40 has been
// adopted to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"bytes"
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
const (
// maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
)
var (
// ErrFloat40Overflow is used when a given nonce overflows the maximum
// capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40
ErrFloat40E31 = errors.New("Float40 error, e > 31")
// ErrFloat40NotEnoughPrecission is used when the given *big.Int can
// not be represented as Float40 due not enough precission
ErrFloat40NotEnoughPrecission = errors.New("Float40 error, not enough precission")
)
// Float40 represents a float in a 64 bit format
type Float40 uint64
// Bytes return a byte array of length 5 with the Float40 value encoded in
// BigEndian
func (f40 Float40) Bytes() ([]byte, error) {
if f40 > maxFloat40Value {
return []byte{}, tracerr.Wrap(ErrFloat40Overflow)
}
var f40Bytes [8]byte
binary.BigEndian.PutUint64(f40Bytes[:], uint64(f40))
var b [5]byte
copy(b[:], f40Bytes[3:])
return b[:], nil
}
// Float40FromBytes returns a Float40 from a byte array of 5 bytes in Bigendian
// representation.
func Float40FromBytes(b []byte) Float40 {
var f40Bytes [8]byte
copy(f40Bytes[3:], b[:])
f40 := binary.BigEndian.Uint64(f40Bytes[:])
return Float40(f40)
}
// BigInt converts the Float40 to a *big.Int v, where v = m * 10^e, being:
// [ e | m ]
// [ 5 bits | 35 bits ]
func (f40 Float40) BigInt() (*big.Int, error) {
// take the 5 used bytes (FF * 5)
var f40Uint64 uint64 = uint64(f40) & 0x00_00_00_FF_FF_FF_FF_FF
f40Bytes, err := f40.Bytes()
if err != nil {
return nil, err
}
e := f40Bytes[0] & 0xF8 >> 3 // take first 5 bits
m := f40Uint64 & 0x07_FF_FF_FF_FF // take the others 35 bits
exp := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(e)), nil)
r := new(big.Int).Mul(big.NewInt(int64(m)), exp)
return r, nil
}
// NewFloat40 encodes a *big.Int integer as a Float40, returning error in case
// of loss during the encoding.
func NewFloat40(f *big.Int) (Float40, error) {
m := f
e := big.NewInt(0)
zero := big.NewInt(0)
ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00)
for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) &&
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1))
}
if e.Int64() > 31 {
return 0, ErrFloat40E31
}
if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
return 0, ErrFloat40NotEnoughPrecission
}
r := new(big.Int).Add(m,
new(big.Int).Mul(e, thres))
return Float40(r.Uint64()), nil
}

95
common/float40_test.go Normal file
View File

@@ -0,0 +1,95 @@
package common
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConversionsFloat40(t *testing.T) {
testVector := map[Float40]string{
6*0x800000000 + 123: "123000000",
2*0x800000000 + 4545: "454500",
30*0x800000000 + 10235: "10235000000000000000000000000000000",
0x000000000: "0",
0x800000000: "0",
0x0001: "1",
0x0401: "1025",
0x800000000 + 1: "10",
0xFFFFFFFFFF: "343597383670000000000000000000000000000000",
}
for test := range testVector {
fix, err := test.BigInt()
require.NoError(t, err)
assert.Equal(t, fix.String(), testVector[test])
bi, ok := new(big.Int).SetString(testVector[test], 10)
require.True(t, ok)
fl, err := NewFloat40(bi)
assert.NoError(t, err)
fx2, err := fl.BigInt()
require.NoError(t, err)
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestExpectError(t *testing.T) {
testVector := map[string]error{
"9922334455000000000000000000000000000000": nil,
"9922334455000000000000000000000000000001": ErrFloat40NotEnoughPrecission,
"9922334454999999999999999999999999999999": ErrFloat40NotEnoughPrecission,
"42949672950000000000000000000000000000000": nil,
"99223344556573838487575": ErrFloat40NotEnoughPrecission,
"992233445500000000000000000000000000000000": ErrFloat40E31,
"343597383670000000000000000000000000000000": nil,
"343597383680000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383690000000000000000000000000000000": ErrFloat40NotEnoughPrecission,
"343597383700000000000000000000000000000000": ErrFloat40E31,
}
for test := range testVector {
bi, ok := new(big.Int).SetString(test, 10)
require.True(t, ok)
_, err := NewFloat40(bi)
assert.Equal(t, testVector[test], err)
}
}
func BenchmarkFloat40(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Can not convert string to *big.Int")
}
return bigInt
}
type pair struct {
Float40 Float40
BigInt *big.Int
}
testVector := []pair{
{6*0x800000000 + 123, newBigInt("123000000")},
{2*0x800000000 + 4545, newBigInt("454500")},
{30*0x800000000 + 10235, newBigInt("10235000000000000000000000000000000")},
{0x000000000, newBigInt("0")},
{0x800000000, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1025")},
{0x800000000 + 1, newBigInt("10")},
{0xFFFFFFFFFF, newBigInt("343597383670000000000000000000000000000000")},
}
b.Run("NewFloat40()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat40(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float40.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = testVector[i%len(testVector)].Float40.BigInt()
}
})
}

View File

@@ -11,18 +11,11 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
const (
// L1UserTxBytesLen is the length of the byte array that represents the L1Tx
L1UserTxBytesLen = 72
// L1CoordinatorTxBytesLen is the length of the byte array that represents the L1CoordinatorTx
L1CoordinatorTxBytesLen = 101
)
// L1Tx is a struct that represents a L1 tx // L1Tx is a struct that represents a L1 tx
type L1Tx struct { type L1Tx struct {
// Stored in DB: mandatory fileds // Stored in DB: mandatory fileds
// TxID (12 bytes) for L1Tx is: // TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of:
// bytes: | 1 | 8 | 2 | 1 | // bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) | // values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type: // where type:
@@ -179,45 +172,38 @@ func (tx L1Tx) Tx() Tx {
// [ 8 bits ] empty (userFee) // 1 byte // [ 8 bits ] empty (userFee) // 1 byte
// [ 40 bits ] empty (nonce) // 5 bytes // [ 40 bits ] empty (nonce) // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes // [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation // Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
amountFloat16, err := NewFloat16(tx.Amount) var b [29]byte
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
// b[0:7] empty: no ToBJJSign, no fee, no nonce // b[0:7] empty: no ToBJJSign, no fee, no nonce
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[13:19], toIdxBytes[:]) copy(b[11:17], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[19:25], fromIdxBytes[:]) copy(b[17:23], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[25:27], chainID) binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[27:31], SignatureConstantBytes[:]) copy(b[25:29], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
} }
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability // BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat16 | Fee ] // [ fromIdx | toIdx | amountFloat40 | Fee ]
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
@@ -231,13 +217,17 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
if tx.EffectiveAmount != nil { if tx.EffectiveAmount != nil {
amountFloat16, err := NewFloat16(tx.EffectiveAmount) amountFloat40, err := NewFloat40(tx.EffectiveAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes()) amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
} }
// fee = 0 (as is L1Tx) b[10:11] copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
}
// fee = 0 (as is L1Tx)
return b[:], nil return b[:], nil
} }
@@ -247,7 +237,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen] fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2] toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+2] amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength]
l1tx := L1Tx{} l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6)) fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
@@ -260,8 +250,8 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
l1tx.ToIdx = toIdx l1tx.ToIdx = toIdx
l1tx.EffectiveAmount = Float16FromBytes(amountBytes).BigInt() l1tx.EffectiveAmount, err = Float40FromBytes(amountBytes).BigInt()
return &l1tx, nil return &l1tx, err
} }
// BytesGeneric returns the generic representation of a L1Tx. This method is // BytesGeneric returns the generic representation of a L1Tx. This method is
@@ -269,7 +259,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method // the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
// for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case). // for L1CoordinatorTxs & L1UserTxs (for the ZKInputs case).
func (tx *L1Tx) BytesGeneric() ([]byte, error) { func (tx *L1Tx) BytesGeneric() ([]byte, error) {
var b [L1UserTxBytesLen]byte var b [RollupConstL1UserTotalBytes]byte
copy(b[0:20], tx.FromEthAddr.Bytes()) copy(b[0:20], tx.FromEthAddr.Bytes())
if tx.FromBJJ != EmptyBJJComp { if tx.FromBJJ != EmptyBJJComp {
pkCompL := tx.FromBJJ pkCompL := tx.FromBJJ
@@ -281,22 +271,33 @@ func (tx *L1Tx) BytesGeneric() ([]byte, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[52:58], fromIdxBytes[:]) copy(b[52:58], fromIdxBytes[:])
depositAmountFloat16, err := NewFloat16(tx.DepositAmount)
depositAmountFloat40, err := NewFloat40(tx.DepositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[58:60], depositAmountFloat16.Bytes()) depositAmountFloat40Bytes, err := depositAmountFloat40.Bytes()
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[60:62], amountFloat16.Bytes()) copy(b[58:63], depositAmountFloat40Bytes)
copy(b[62:66], tx.TokenID.Bytes())
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(b[63:68], amountFloat40Bytes)
copy(b[68:72], tx.TokenID.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[66:72], toIdxBytes[:]) copy(b[72:78], toIdxBytes[:])
return b[:], nil return b[:], nil
} }
@@ -313,7 +314,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
if tx.UserOrigin { if tx.UserOrigin {
return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx")) return nil, tracerr.Wrap(fmt.Errorf("Can not calculate BytesCoordinatorTx() for a L1UserTx"))
} }
var b [L1CoordinatorTxBytesLen]byte var b [RollupConstL1CoordinatorTotalBytes]byte
v := compressedSignatureBytes[64] v := compressedSignatureBytes[64]
s := compressedSignatureBytes[32:64] s := compressedSignatureBytes[32:64]
r := compressedSignatureBytes[0:32] r := compressedSignatureBytes[0:32]
@@ -329,7 +330,7 @@ func (tx *L1Tx) BytesCoordinatorTx(compressedSignatureBytes []byte) ([]byte, err
// L1UserTxFromBytes decodes a L1Tx from []byte // L1UserTxFromBytes decodes a L1Tx from []byte
func L1UserTxFromBytes(b []byte) (*L1Tx, error) { func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
if len(b) != L1UserTxBytesLen { if len(b) != RollupConstL1UserTotalBytes {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b))) return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1Tx bytes, expected length %d, current: %d", 68, len(b)))
} }
@@ -347,13 +348,19 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.FromIdx = fromIdx tx.FromIdx = fromIdx
tx.DepositAmount = Float16FromBytes(b[58:60]).BigInt() tx.DepositAmount, err = Float40FromBytes(b[58:63]).BigInt()
tx.Amount = Float16FromBytes(b[60:62]).BigInt()
tx.TokenID, err = TokenIDFromBytes(b[62:66])
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.ToIdx, err = IdxFromBytes(b[66:72]) tx.Amount, err = Float40FromBytes(b[63:68]).BigInt()
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.TokenID, err = TokenIDFromBytes(b[68:72])
if err != nil {
return nil, tracerr.Wrap(err)
}
tx.ToIdx, err = IdxFromBytes(b[72:78])
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -368,7 +375,7 @@ func signHash(data []byte) []byte {
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte // L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) { func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != L1CoordinatorTxBytesLen { if len(b) != RollupConstL1CoordinatorTotalBytes {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b))) return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
} }

View File

@@ -50,64 +50,110 @@ func TestNewL1CoordinatorTx(t *testing.T) {
} }
func TestL1TxCompressedData(t *testing.T) { func TestL1TxCompressedData(t *testing.T) {
// test vectors values generated from javascript implementation (using
// PoolL2Tx values)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{ tx := L1Tx{
FromIdx: 2, FromIdx: (1 << 48) - 1,
ToIdx: 3, ToIdx: (1 << 48) - 1,
Amount: big.NewInt(4), Amount: amount,
TokenID: 5, TokenID: (1 << 32) - 1,
} }
chainID := uint16(0) txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err) assert.NoError(t, err)
expectedStr := "ffffffffffffffffffffffffffffffffffffc60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
// test vector value generated from javascript implementation tx = L1Tx{
expectedStr := "7307597389635308713748674793997299267459594577423" FromIdx: 0,
assert.Equal(t, expectedStr, txCompressedData.String()) ToIdx: 0,
assert.Equal(t, "0500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes())) Amount: big.NewInt(0),
TokenID: 0,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
amount, ok = new(big.Int).SetString("63000000000000000", 10)
require.True(t, ok)
tx = L1Tx{
FromIdx: 324,
ToIdx: 256,
Amount: amount,
TokenID: 123,
}
txCompressedData, err = tx.TxCompressedData(uint16(1))
assert.NoError(t, err)
expectedStr = "7b0000000001000000000001440001c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = L1Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err)
expectedStr = "030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestBytesDataAvailability(t *testing.T) { func TestBytesDataAvailability(t *testing.T) {
// test vectors values generated from javascript implementation
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := L1Tx{ tx := L1Tx{
FromIdx: 2, ToIdx: (1 << 16) - 1,
ToIdx: 3, FromIdx: (1 << 16) - 1,
Amount: big.NewInt(4), EffectiveAmount: amount,
TokenID: 5,
} }
txCompressedData, err := tx.BytesDataAvailability(32) txCompressedData, err := tx.BytesDataAvailability(16)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData)) assert.Equal(t, "ffffffffffffffffff00", hex.EncodeToString(txCompressedData))
l1tx, err := L1TxFromDataAvailability(txCompressedData, 16)
tx = L1Tx{
FromIdx: 2,
ToIdx: 3,
EffectiveAmount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
}
func TestL1TxFromDataAvailability(t *testing.T) {
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
}
txCompressedData, err := tx.BytesDataAvailability(32)
assert.NoError(t, err)
l1tx, err := L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx) assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx) assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{ tx = L1Tx{
FromIdx: 2, ToIdx: (1 << 32) - 1,
ToIdx: 3, FromIdx: (1 << 32) - 1,
EffectiveAmount: big.NewInt(4), EffectiveAmount: amount,
} }
txCompressedData, err = tx.BytesDataAvailability(32) txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "ffffffffffffffffffffffffff00", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 0,
FromIdx: 0,
EffectiveAmount: big.NewInt(0),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "0000000000000000000000000000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
assert.Equal(t, tx.ToIdx, l1tx.ToIdx)
assert.Equal(t, tx.EffectiveAmount, l1tx.EffectiveAmount)
tx = L1Tx{
ToIdx: 635,
FromIdx: 296,
EffectiveAmount: big.NewInt(1000000000000000000),
}
txCompressedData, err = tx.BytesDataAvailability(32)
assert.NoError(t, err)
assert.Equal(t, "000001280000027b42540be40000", hex.EncodeToString(txCompressedData))
l1tx, err = L1TxFromDataAvailability(txCompressedData, 32) l1tx, err = L1TxFromDataAvailability(txCompressedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tx.FromIdx, l1tx.FromIdx) assert.Equal(t, tx.FromIdx, l1tx.FromIdx)
@@ -172,12 +218,10 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
UserOrigin: true, UserOrigin: true,
} }
expected, err := utils.HexDecode("85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db8be880f00020039c0000053cb88d")
require.NoError(t, err)
encodedData, err := l1Tx.BytesUser() encodedData, err := l1Tx.BytesUser()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, encodedData) expected := "85dab5b9e2e361d0c208d77be90efcc0439b0a530dd02deb2c81068e7a0f7e327df80b4ab79ee1f41a7def613e73a20c32eece5a000001c638db52540be400459682f0000020039c0000053cb88d"
assert.Equal(t, expected, hex.EncodeToString(encodedData))
} }
func TestL1CoordinatorTxByteParsers(t *testing.T) { func TestL1CoordinatorTxByteParsers(t *testing.T) {

View File

@@ -89,11 +89,15 @@ func (tx L2Tx) CalculateTxID() ([TxIDLen]byte, error) {
// TokenID // TokenID
b = append(b, tx.TokenID.Bytes()[:]...) b = append(b, tx.TokenID.Bytes()[:]...)
// Amount // Amount
amountFloat16, err := NewFloat16(tx.Amount) amountFloat40, err := NewFloat40(tx.Amount)
if err != nil { if err != nil {
return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount)) return txID, tracerr.Wrap(fmt.Errorf("%s: %d", err, tx.Amount))
} }
b = append(b, amountFloat16.Bytes()...) amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return txID, tracerr.Wrap(err)
}
b = append(b, amountFloat40Bytes...)
// Nonce // Nonce
nonceBytes, err := tx.Nonce.Bytes() nonceBytes, err := tx.Nonce.Bytes()
if err != nil { if err != nil {
@@ -170,11 +174,11 @@ func TxIDsFromL2Txs(txs []L2Tx) []TxID {
} }
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability // BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
// [ fromIdx | toIdx | amountFloat16 | Fee ] // [ fromIdx | toIdx | amountFloat40 | Fee ]
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd b := make([]byte, ((nLevels*2)+40+8)/8) //nolint:gomnd
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
@@ -188,13 +192,16 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
} }
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
amountFloat16, err := NewFloat16(tx.Amount) amountFloat40, err := NewFloat40(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountFloat40Bytes, err := amountFloat40.Bytes()
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes()) if err != nil {
b[idxLen*2+2] = byte(tx.Fee) return nil, tracerr.Wrap(err)
}
copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
return b[:], nil return b[:], nil
} }
@@ -219,7 +226,10 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount = Float16FromBytes(b[idxLen*2 : idxLen*2+2]).BigInt() tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt()
tx.Fee = FeeSelector(b[idxLen*2+2]) if err != nil {
return nil, tracerr.Wrap(err)
}
tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
return tx, nil return tx, nil
} }

View File

@@ -19,7 +19,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err := NewL2Tx(l2Tx) l2Tx, err := NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", l2Tx.TxID.String()) assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -30,7 +30,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x0276114a8f666fa1ff7dbf34b4a9da577808dc501e3b2760d01fe3ef5473f5737f", l2Tx.TxID.String()) assert.Equal(t, "0x029e7499a830f8f5eb17c07da48cf91415710f1bcbe0169d363ff91e81faf92fc2", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -42,7 +42,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x025afb63126d3067f61f633d13e5a51da0551af3a4567a9af2db5321ed04214ff4", l2Tx.TxID.String()) assert.Equal(t, "0x0255c70ed20e1b8935232e1b9c5884dbcc88a6e1a3454d24f2d77252eb2bb0b64e", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 87654, FromIdx: 87654,
@@ -54,7 +54,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02cf390157041c3b1b59f0aaed4da464f0d0d48f1d026e46fd89c7fe1e5aed7fcf", l2Tx.TxID.String()) assert.Equal(t, "0x0206b372f967061d1148bbcff679de38120e075141a80a07326d0f514c2efc6ca9", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 1, FromIdx: 1,
@@ -66,7 +66,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x020ec18eaae67fcd545998841a9c4be09ee3083e12db6ae5e5213a2ecaaa52d5cf", l2Tx.TxID.String()) assert.Equal(t, "0x0236f7ea5bccf78ba60baf56c058d235a844f9b09259fd0efa4f5f72a7d4a26618", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 999, FromIdx: 999,
@@ -78,7 +78,7 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02f036223e79fac776de107f50822552cc964ee9fc4caa304613285f6976bcc940", l2Tx.TxID.String()) assert.Equal(t, "0x02ac122f5b709ce190129fecbbe35bfd30c70e6433dbd85a8eb743d110906a1dc1", l2Tx.TxID.String())
l2Tx = &L2Tx{ l2Tx = &L2Tx{
FromIdx: 4444, FromIdx: 4444,
@@ -90,25 +90,85 @@ func TestNewL2Tx(t *testing.T) {
} }
l2Tx, err = NewL2Tx(l2Tx) l2Tx, err = NewL2Tx(l2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x029c8aef9ef24531e4cf84e78cbab1018ba1626a5a10afb6b7c356be1b5c28e92c", l2Tx.TxID.String()) assert.Equal(t, "0x02c674951a81881b7bc50db3b9e5efd97ac88550c7426ac548720e5057cfba515a", l2Tx.TxID.String())
} }
func TestL2TxByteParsers(t *testing.T) { func TestL2TxByteParsers(t *testing.T) {
amount := new(big.Int) // test vectors values generated from javascript implementation
amount.SetString("79000000", 10) amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
l2Tx := &L2Tx{ l2Tx := &L2Tx{
ToIdx: 256, ToIdx: (1 << 16) - 1,
FromIdx: (1 << 16) - 1,
Amount: amount, Amount: amount,
FromIdx: 257, Fee: (1 << 8) - 1,
Fee: 201,
} }
// Data from the compatibility test expected := "ffffffffffffffffffff"
expected := "00000101000001002b16c9" encodedData, err := l2Tx.BytesDataAvailability(16)
encodedData, err := l2Tx.BytesDataAvailability(32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData)) assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err := L2TxFromBytesDataAvailability(encodedData, 32) decodedData, err := L2TxFromBytesDataAvailability(encodedData, 16)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: (1 << 32) - 1,
FromIdx: (1 << 32) - 1,
Amount: amount,
Fee: (1 << 8) - 1,
}
expected = "ffffffffffffffffffffffffffff"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 0,
Amount: big.NewInt(0),
Fee: 0,
}
expected = "0000000000000000000000000000"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 0,
FromIdx: 1061,
Amount: big.NewInt(420000000000),
Fee: 127,
}
expected = "000004250000000010fa56ea007f"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData)
l2Tx = &L2Tx{
ToIdx: 256,
FromIdx: 257,
Amount: big.NewInt(79000000),
Fee: 201,
}
expected = "00000101000001000004b571c0c9"
encodedData, err = l2Tx.BytesDataAvailability(32)
require.NoError(t, err)
assert.Equal(t, expected, hex.EncodeToString(encodedData))
decodedData, err = L2TxFromBytesDataAvailability(encodedData, 32)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, l2Tx, decodedData) assert.Equal(t, l2Tx, decodedData)
} }

View File

@@ -36,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"` ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"` ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"` TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float16 Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40
Fee FeeSelector `meddler:"fee"` Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"` State PoolL2TxState `meddler:"state"`
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"` RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"` RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"` RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float16 RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"` RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"` AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -122,18 +122,13 @@ func (tx *PoolL2Tx) SetID() error {
// [ 8 bits ] userFee // 1 byte // [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes // [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes // [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation // Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
amountFloat16, err := NewFloat16(tx.Amount) var b [29]byte
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [31]byte
toBJJSign := byte(0) toBJJSign := byte(0)
pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ) pkSign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -149,19 +144,18 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[13:19], toIdxBytes[:]) copy(b[11:17], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[19:25], fromIdxBytes[:]) copy(b[17:23], fromIdxBytes[:])
binary.BigEndian.PutUint16(b[25:27], chainID) binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[27:31], SignatureConstantBytes[:]) copy(b[25:29], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -170,9 +164,9 @@ func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
// TxCompressedDataEmpty calculates the TxCompressedData of an empty // TxCompressedDataEmpty calculates the TxCompressedData of an empty
// transaction // transaction
func TxCompressedDataEmpty(chainID uint16) *big.Int { func TxCompressedDataEmpty(chainID uint16) *big.Int {
var b [31]byte var b [29]byte
binary.BigEndian.PutUint16(b[25:27], chainID) binary.BigEndian.PutUint16(b[23:25], chainID)
copy(b[27:31], SignatureConstantBytes[:]) copy(b[25:29], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi return bi
} }
@@ -182,7 +176,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 8 bits ] userFee // 1 byte // [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes // [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes // [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes // [ 40 bits ] amountFloat40 // 5 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
@@ -190,11 +184,16 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil { if tx.Amount == nil {
tx.Amount = big.NewInt(0) tx.Amount = big.NewInt(0)
} }
amountFloat16, err := NewFloat16(tx.Amount) amountFloat40, err := NewFloat40(tx.Amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
var b [25]byte amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
toBJJSign := byte(0) toBJJSign := byte(0)
if tx.ToBJJ != EmptyBJJComp { if tx.ToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.ToBJJ) sign, _ := babyjub.UnpackSignY(tx.ToBJJ)
@@ -210,17 +209,17 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes()) copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes()) copy(b[11:16], amountFloat40Bytes)
toIdxBytes, err := tx.ToIdx.Bytes() toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[13:19], toIdxBytes[:]) copy(b[16:22], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes() fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[19:25], fromIdxBytes[:]) copy(b[22:28], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -236,7 +235,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 8 bits ] rqUserFee // 1 byte // [ 8 bits ] rqUserFee // 1 byte
// [ 40 bits ] rqNonce // 5 bytes // [ 40 bits ] rqNonce // 5 bytes
// [ 32 bits ] rqTokenID // 4 bytes // [ 32 bits ] rqTokenID // 4 bytes
// [ 16 bits ] rqAmountFloat16 // 2 bytes // [ 40 bits ] rqAmountFloat40 // 5 bytes
// [ 48 bits ] rqToIdx // 6 bytes // [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes // [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation // Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
@@ -244,11 +243,16 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil { if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0) tx.RqAmount = big.NewInt(0)
} }
amountFloat16, err := NewFloat16(tx.RqAmount) amountFloat40, err := NewFloat40(tx.RqAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
var b [25]byte amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
var b [28]byte
rqToBJJSign := byte(0) rqToBJJSign := byte(0)
if tx.RqToBJJ != EmptyBJJComp { if tx.RqToBJJ != EmptyBJJComp {
sign, _ := babyjub.UnpackSignY(tx.RqToBJJ) sign, _ := babyjub.UnpackSignY(tx.RqToBJJ)
@@ -264,17 +268,17 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
} }
copy(b[2:7], nonceBytes[:]) copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.RqTokenID.Bytes()) copy(b[7:11], tx.RqTokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes()) copy(b[11:16], amountFloat40Bytes)
toIdxBytes, err := tx.RqToIdx.Bytes() toIdxBytes, err := tx.RqToIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[13:19], toIdxBytes[:]) copy(b[16:22], toIdxBytes[:])
fromIdxBytes, err := tx.RqFromIdx.Bytes() fromIdxBytes, err := tx.RqFromIdx.Bytes()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[19:25], fromIdxBytes[:]) copy(b[22:28], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
@@ -287,7 +291,22 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// e1: [5 bytes AmountFloat40 | 20 bytes ToEthAddr]
var e1B [25]byte
amountFloat40, err := NewFloat40(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
}
amountFloat40Bytes, err := amountFloat40.Bytes()
if err != nil {
return nil, tracerr.Wrap(err)
}
copy(e1B[0:5], amountFloat40Bytes)
toEthAddr := EthAddrToBigInt(tx.ToEthAddr) toEthAddr := EthAddrToBigInt(tx.ToEthAddr)
copy(e1B[5:25], toEthAddr.Bytes())
e1 := new(big.Int).SetBytes(e1B[:])
rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr) rqToEthAddr := EthAddrToBigInt(tx.RqToEthAddr)
_, toBJJY := babyjub.UnpackSignY(tx.ToBJJ) _, toBJJY := babyjub.UnpackSignY(tx.ToBJJ)
@@ -299,7 +318,7 @@ func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
_, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ) _, rqToBJJY := babyjub.UnpackSignY(tx.RqToBJJ)
return poseidon.Hash([]*big.Int{toCompressedData, toEthAddr, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY}) return poseidon.Hash([]*big.Int{toCompressedData, e1, toBJJY, rqTxCompressedDataV2, rqToEthAddr, rqToBJJY})
} }
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp // VerifySignature returns true if the signature verification is correct for the given PublicKeyComp

View File

@@ -21,80 +21,104 @@ func TestNewPoolL2Tx(t *testing.T) {
} }
poolL2Tx, err := NewPoolL2Tx(poolL2Tx) poolL2Tx, err := NewPoolL2Tx(poolL2Tx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0x02fb52b5d0b9ef2626c11701bb751b2720c76d59946b9a48146ac153bb6e63bf6a", poolL2Tx.TxID.String()) assert.Equal(t, "0x022669acda59b827d20ef5354a3eebd1dffb3972b0a6bf89d18bfd2efa0ab9f41e", poolL2Tx.TxID.String())
} }
func TestTxCompressedData(t *testing.T) { func TestTxCompressedDataAndTxCompressedDataV2JSVectors(t *testing.T) {
chainID := uint16(0) // test vectors values generated from javascript implementation
var sk babyjub.PrivateKey var skPositive babyjub.PrivateKey // 'Positive' refers to the sign
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001")) _, err := hex.Decode(skPositive[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err) assert.NoError(t, err)
var skNegative babyjub.PrivateKey // 'Negative' refers to the sign
_, err = hex.Decode(skNegative[:], []byte("0001020304050607080900010203040506070809000102030405060708090002"))
assert.NoError(t, err)
amount, ok := new(big.Int).SetString("343597383670000000000000000000000000000000", 10)
require.True(t, ok)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: 2, FromIdx: (1 << 48) - 1,
ToIdx: 3, ToIdx: (1 << 48) - 1,
Amount: big.NewInt(4), Amount: amount,
TokenID: 5, TokenID: (1 << 32) - 1,
Nonce: 6, Nonce: (1 << 40) - 1,
ToBJJ: sk.Public().Compress(), Fee: (1 << 3) - 1,
ToBJJ: skPositive.Public().Compress(),
} }
txCompressedData, err := tx.TxCompressedData(chainID) txCompressedData, err := tx.TxCompressedData(uint16((1 << 16) - 1))
assert.NoError(t, err) require.NoError(t, err)
// test vector value generated from javascript implementation expectedStr := "0107ffffffffffffffffffffffffffffffffffffffffffffffc60be60f"
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575" assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes())) txCompressedDataV2, err := tx.TxCompressedDataV2()
// using a different chainID require.NoError(t, err)
txCompressedData, err = tx.TxCompressedData(uint16(100)) expectedStr = "0107ffffffffffffffffffffffffffffffffffffffffffffffffffff"
assert.NoError(t, err) assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
txCompressedData, err = tx.TxCompressedData(uint16(65535))
assert.NoError(t, err)
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{ tx = PoolL2Tx{
RqFromIdx: 7, FromIdx: 0,
RqToIdx: 8, ToIdx: 0,
RqAmount: big.NewInt(9), Amount: big.NewInt(0),
RqTokenID: 10, TokenID: 0,
RqNonce: 11, Nonce: 0,
RqFee: 12, Fee: 0,
RqToBJJ: sk.Public().Compress(), ToBJJ: skNegative.Public().Compress(),
} }
rqTxCompressedData, err := tx.RqTxCompressedDataV2() txCompressedData, err = tx.TxCompressedData(uint16(0))
assert.NoError(t, err) require.NoError(t, err)
// test vector value generated from javascript implementation expectedStr = "c60be60f"
expectedStr = "6571340879233176732837827812956721483162819083004853354503" assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, expectedStr, rqTxCompressedData.String())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
}
func TestTxCompressedDataV2(t *testing.T) { txCompressedDataV2, err = tx.TxCompressedDataV2()
var sk babyjub.PrivateKey require.NoError(t, err)
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001")) assert.Equal(t, "0", txCompressedDataV2.String())
assert.NoError(t, err)
tx := PoolL2Tx{ amount, ok = new(big.Int).SetString("63000000000000000", 10)
FromIdx: 7, require.True(t, ok)
ToIdx: 8, tx = PoolL2Tx{
Amount: big.NewInt(9), FromIdx: 324,
TokenID: 10, ToIdx: 256,
Nonce: 11, Amount: amount,
Fee: 12, TokenID: 123,
ToBJJ: sk.Public().Compress(), Nonce: 76,
Fee: 214,
ToBJJ: skNegative.Public().Compress(),
} }
txCompressedData, err := tx.TxCompressedDataV2() txCompressedData, err = tx.TxCompressedData(uint16(1))
assert.NoError(t, err) require.NoError(t, err)
// test vector value generated from javascript implementation expectedStr = "d6000000004c0000007b0000000001000000000001440001c60be60f"
expectedStr := "6571340879233176732837827812956721483162819083004853354503" assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes()) txCompressedDataV2, err = tx.TxCompressedDataV2()
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes())) require.NoError(t, err)
expectedStr = "d6000000004c0000007b3977825f00000000000100000000000144"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedDataV2.Bytes()))
tx = PoolL2Tx{
FromIdx: 1,
ToIdx: 2,
TokenID: 3,
Nonce: 4,
Fee: 5,
ToBJJ: skNegative.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "050000000004000000030000000000020000000000010000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{
FromIdx: 2,
ToIdx: 3,
TokenID: 4,
Nonce: 5,
Fee: 6,
ToBJJ: skPositive.Public().Compress(),
}
txCompressedData, err = tx.TxCompressedData(uint16(0))
require.NoError(t, err)
expectedStr = "01060000000005000000040000000000030000000000020000c60be60f"
assert.Equal(t, expectedStr, hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestRqTxCompressedDataV2(t *testing.T) { func TestRqTxCompressedDataV2(t *testing.T) {
@@ -113,19 +137,16 @@ func TestRqTxCompressedDataV2(t *testing.T) {
txCompressedData, err := tx.RqTxCompressedDataV2() txCompressedData, err := tx.RqTxCompressedDataV2()
assert.NoError(t, err) assert.NoError(t, err)
// test vector value generated from javascript implementation // test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503" expectedStr := "110248805340524920412994530176819463725852160917809517418728390663"
assert.Equal(t, expectedStr, txCompressedData.String()) assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10) expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes()) assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes())) assert.Equal(t, "010c000000000b0000000a0000000009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestHashToSign(t *testing.T) { func TestHashToSign(t *testing.T) {
chainID := uint16(0) chainID := uint16(0)
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.NoError(t, err)
tx := PoolL2Tx{ tx := PoolL2Tx{
FromIdx: 2, FromIdx: 2,
ToIdx: 3, ToIdx: 3,
@@ -136,7 +157,7 @@ func TestHashToSign(t *testing.T) {
} }
toSign, err := tx.HashToSign(chainID) toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String()) assert.Equal(t, "2d49ce1d4136e06f64e3eb1f79a346e6ee3e93ceeac909a57806a8d87005c263", hex.EncodeToString(toSign.Bytes()))
} }
func TestVerifyTxSignature(t *testing.T) { func TestVerifyTxSignature(t *testing.T) {
@@ -156,7 +177,7 @@ func TestVerifyTxSignature(t *testing.T) {
} }
toSign, err := tx.HashToSign(chainID) toSign, err := tx.HashToSign(chainID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String()) assert.Equal(t, "1571327027383224465388301747239444557034990637650927918405777653988509342917", toSign.String())
sig := sk.SignPoseidon(toSign) sig := sk.SignPoseidon(toSign)
tx.Signature = sig.Compress() tx.Signature = sig.Compress()

View File

@@ -102,6 +102,8 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx] ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr // ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx] ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"`
// OnChain determines if is L1 (1/true) or L2 (0/false) // OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx] OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -112,8 +114,8 @@ type ZKInputs struct {
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new // NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
// account (fromIdx==0) // account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx] NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float16 // DepositAmountF encoded as float40
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx] DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx]
// FromEthAddr // FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx] FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int // FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -326,6 +328,7 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
zki.AuxToIdx = newSlice(maxTx) zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx) zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx) zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx) zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx) zki.NewAccount = newSlice(maxTx)
@@ -477,7 +480,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
b = append(b, newExitRoot...) b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData // [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 480) l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen) l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ { for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -496,7 +499,7 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData // [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen) l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ { for i := 0; i < len(z.Metadata.L2TxsData); i++ {

View File

@@ -3,7 +3,6 @@ package config
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big"
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
@@ -52,27 +51,6 @@ type Coordinator struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"` L1BatchTimeoutPerc float64 `validate:"required"`
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the // ProofServerPollInterval is the waiting interval between polling the
// ProofServer while waiting for a particular status // ProofServer while waiting for a particular status
ProofServerPollInterval Duration `validate:"required"` ProofServerPollInterval Duration `validate:"required"`
@@ -123,9 +101,6 @@ type Coordinator struct {
// calls, except for methods where a particular gas limit is // calls, except for methods where a particular gas limit is
// harcoded because it's known to be a big value // harcoded because it's known to be a big value
CallGasLimit uint64 `validate:"required"` CallGasLimit uint64 `validate:"required"`
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int `validate:"required"`
// GasPriceDiv is the gas price division // GasPriceDiv is the gas price division
GasPriceDiv uint64 `validate:"required"` GasPriceDiv uint64 `validate:"required"`
// CheckLoopInterval is the waiting interval between receipt // CheckLoopInterval is the waiting interval between receipt
@@ -137,13 +112,6 @@ type Coordinator struct {
// AttemptsDelay is delay between attempts do do an eth client // AttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
AttemptsDelay Duration `validate:"required"` AttemptsDelay Duration `validate:"required"`
// TxResendTimeout is the timeout after which a non-mined
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept // Keystore is the ethereum keystore where private keys are kept
Keystore struct { Keystore struct {
// Path to the keystore // Path to the keystore

View File

@@ -47,8 +47,6 @@ type Debug struct {
MineBlockNum int64 MineBlockNum int64
// SendBlockNum is the blockNum when the batch was sent to ethereum // SendBlockNum is the blockNum when the batch was sent to ethereum
SendBlockNum int64 SendBlockNum int64
// ResendNum is the number of times the tx has been resent
ResendNum int
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch // LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
// was scheduled // was scheduled
LastScheduledL1BatchBlockNum int64 LastScheduledL1BatchBlockNum int64
@@ -66,17 +64,10 @@ type Debug struct {
// StartToSendDelay is the delay between starting a batch and sending // StartToSendDelay is the delay between starting a batch and sending
// it to ethereum, in seconds // it to ethereum, in seconds
StartToSendDelay float64 StartToSendDelay float64
// StartToMineDelay is the delay between starting a batch and having
// it mined in seconds
StartToMineDelay float64
// SendToMineDelay is the delay between sending a batch tx and having
// it mined in seconds
SendToMineDelay float64
} }
// BatchInfo contans the Batch information // BatchInfo contans the Batch information
type BatchInfo struct { type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
@@ -92,13 +83,7 @@ type BatchInfo struct {
ForgeBatchArgs *eth.RollupForgeBatchArgs ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo // FeesInfo
EthTx *types.Transaction EthTx *types.Transaction
// SendTimestamp the time of batch sent to ethereum
SendTimestamp time.Time
Receipt *types.Receipt Receipt *types.Receipt
// Fail is true if:
// - The receipt status is failed
// - A previous parent batch is failed
Fail bool
Debug Debug Debug Debug
} }

View File

@@ -3,8 +3,8 @@ package coordinator
import ( import (
"context" "context"
"fmt" "fmt"
"math/big"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
@@ -42,29 +42,6 @@ type Config struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 L1BatchTimeoutPerc float64
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
SendBatchBlocksMarginCheck int64
// EthClientAttempts is the number of attempts to do an eth client RPC // EthClientAttempts is the number of attempts to do an eth client RPC
// call before giving up // call before giving up
EthClientAttempts int EthClientAttempts int
@@ -77,16 +54,6 @@ type Config struct {
// EthClientAttemptsDelay is delay between attempts do do an eth client // EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
EthClientAttemptsDelay time.Duration EthClientAttemptsDelay time.Duration
// EthTxResendTimeout is the timeout after which a non-mined ethereum
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int
// TxManagerCheckInterval is the waiting interval between receipt // TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration TxManagerCheckInterval time.Duration
@@ -94,8 +61,6 @@ type Config struct {
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
DebugBatchPath string DebugBatchPath string
Purger PurgerCfg Purger PurgerCfg
// VerifierIdx is the index of the verifier contract registered in the
// smart contract
VerifierIdx uint8 VerifierIdx uint8
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -109,17 +74,10 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
} }
} }
type fromBatch struct {
BatchNum common.BatchNum
ForgerAddr ethCommon.Address
StateRoot *big.Int
}
// Coordinator implements the Coordinator type // Coordinator implements the Coordinator type
type Coordinator struct { type Coordinator struct {
// State // State
pipelineNum int // Pipeline sequential number. The first pipeline is 1 pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline
pipelineFromBatch fromBatch // batch from which we started the pipeline
provers []prover.Client provers []prover.Client
consts synchronizer.SCConsts consts synchronizer.SCConsts
vars synchronizer.SCVariables vars synchronizer.SCVariables
@@ -139,7 +97,6 @@ type Coordinator struct {
cancel context.CancelFunc cancel context.CancelFunc
pipeline *Pipeline pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
purger *Purger purger *Purger
txManager *TxManager txManager *TxManager
@@ -182,12 +139,7 @@ func NewCoordinator(cfg Config,
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{ c := Coordinator{
pipelineNum: 0, pipelineBatchNum: -1,
pipelineFromBatch: fromBatch{
BatchNum: 0,
ForgerAddr: ethCommon.Address{},
StateRoot: big.NewInt(0),
},
provers: serverProofs, provers: serverProofs,
consts: *scConsts, consts: *scConsts,
vars: *initSCVars, vars: *initSCVars,
@@ -231,9 +183,8 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
} }
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) { func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
c.pipelineNum++ return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector, c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
} }
// MsgSyncBlock indicates an update to the Synchronizer stats // MsgSyncBlock indicates an update to the Synchronizer stats
@@ -254,9 +205,6 @@ type MsgSyncReorg struct {
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct { type MsgStopPipeline struct {
Reason string Reason string
// FailedBatchNum indicates the first batchNum that faile in the
// pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum
} }
// SendMsg is a thread safe method to pass a message to the Coordinator // SendMsg is a thread safe method to pass a message to the Coordinator
@@ -267,36 +215,27 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func updateSCVars(vars *synchronizer.SCVariables, update synchronizer.SCVariablesPtr) {
if update.Rollup != nil {
vars.Rollup = *update.Rollup
}
if update.Auction != nil {
vars.Auction = *update.Auction
}
if update.WDelayer != nil {
vars.WDelayer = *update.WDelayer
}
}
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) { func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&c.vars, vars) if vars.Rollup != nil {
c.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
c.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
c.vars.WDelayer = *vars.WDelayer
}
} }
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables, func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool { currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
return false
}
var slot *common.Slot var slot *common.Slot
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock { if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
slot = currentSlot slot = currentSlot
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock { } else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
slot = nextSlot slot = nextSlot
} else { } else {
log.Warnw("canForge: requested blockNum is outside current and next slot", log.Warnw("Coordinator: requested blockNum for canForge is outside slot",
"blockNum", blockNum, "currentSlot", currentSlot, "blockNum", blockNum, "currentSlot", currentSlot,
"nextSlot", nextSlot, "nextSlot", nextSlot,
) )
@@ -305,23 +244,16 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
anyoneForge := false anyoneForge := false
if !slot.ForgerCommitment && if !slot.ForgerCommitment &&
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) { auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)", log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)",
"block", blockNum) "block", blockNum)
anyoneForge = true anyoneForge = true
} }
if slot.Forger == addr || anyoneForge { if slot.Forger == addr || anyoneForge {
return true return true
} }
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
return false return false
} }
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool { func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1 blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction, return canForge(&c.consts.Auction, &c.vars.Auction,
@@ -330,24 +262,12 @@ func (c *Coordinator) canForge() bool {
} }
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error { func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
nextBlock := c.stats.Eth.LastBlock.Num + 1 canForge := c.canForge()
canForge := c.canForgeAt(nextBlock)
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
}
if c.pipeline == nil { if c.pipeline == nil {
relativeBlock := c.consts.Auction.RelativeBlock(nextBlock) if canForge {
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugw("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge {
log.Infow("Coordinator: forging state begin", "block", log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum) stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch)
batchNum := stats.Sync.LastBatch.BatchNum batchNum := common.BatchNum(stats.Sync.LastBatch)
if c.lastNonFailedBatchNum > batchNum {
batchNum = c.lastNonFailedBatchNum
}
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -356,7 +276,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// c.pipelineBatchNum = batchNum c.pipelineBatchNum = batchNum
} }
} else { } else {
if !canForge { if !canForge {
@@ -373,17 +293,18 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
// return err // return err
// } // }
// } // }
// if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)) { if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
// if err := c.txSelector.Reset(stats.Sync.LastBatch.BatchNum); err != nil { if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
// return tracerr.Wrap(err)
// }
// }
if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, }
int64(stats.Sync.LastBatch.BatchNum)); err != nil { _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err)
}
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -410,42 +331,33 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars) c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
} }
if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress && if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0 { // There's been a reorg and the batch from which the pipeline
// There's been a reorg and the batch state root from which the // was started was in a block that was discarded. The batch
// pipeline was started has changed (probably because it was in // may not be in the main chain, so we stop the pipeline as a
// a block that was discarded), and it was sent by a different // precaution (it will be started again once the node is in
// coordinator than us. That batch may never be in the main // sync).
// chain, so we stop the pipeline (it will be started again log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
// once the node is in sync). "sync.LastBatch", c.stats.Sync.LastBatch,
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+ "c.pipelineBatchNum", c.pipelineBatchNum)
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot", if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
// handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0, func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
// the next pipeline will start from the last state of the synchronizer,
// otherwise, it will state from failedBatchNum-1.
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1
}
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.Stop(c.ctx) c.pipeline.Stop(c.ctx)
c.pipeline = nil c.pipeline = nil
} }
if err := c.l2DB.Reorg(batchNum); err != nil { if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.lastNonFailedBatchNum = batchNum if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck
// TODO: Check that we are in a slot in which we can't forge
}
return nil return nil
} }
@@ -461,7 +373,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
} }
case MsgStopPipeline: case MsgStopPipeline:
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason) log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil { if err := c.handleStopPipeline(ctx, msg.Reason); err != nil {
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err)) return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
} }
default: default:

View File

@@ -2,7 +2,6 @@ package coordinator
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
@@ -12,7 +11,6 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
@@ -263,8 +261,8 @@ func TestCoordinatorFlow(t *testing.T) {
var stats synchronizer.Stats var stats synchronizer.Stats
stats.Eth.LastBlock = *ethClient.CtlLastBlock() stats.Eth.LastBlock = *ethClient.CtlLastBlock()
stats.Sync.LastBlock = stats.Eth.LastBlock stats.Sync.LastBlock = stats.Eth.LastBlock
stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch() stats.Eth.LastBatch = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum) stats.Sync.LastBatch = stats.Eth.LastBatch
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1) canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
require.NoError(t, err) require.NoError(t, err)
var slot common.Slot var slot common.Slot
@@ -281,7 +279,7 @@ func TestCoordinatorFlow(t *testing.T) {
// Copy stateDB to synchronizer if there was a new batch // Copy stateDB to synchronizer if there was a new batch
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch) source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch) dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
if stats.Sync.LastBatch.BatchNum != 0 { if stats.Sync.LastBatch != 0 {
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
log.Infow("Making pebble checkpoint for sync", log.Infow("Making pebble checkpoint for sync",
"source", source, "dest", dest) "source", source, "dest", dest)
@@ -568,8 +566,3 @@ func TestCoordinatorStress(t *testing.T) {
// TODO: Test forgeBatch // TODO: Test forgeBatch
// TODO: Test waitServerProof // TODO: Test waitServerProof
// TODO: Test handleReorg // TODO: Test handleReorg
func TestFoo(t *testing.T) {
a := tracerr.Wrap(fmt.Errorf("AAA: %w", core.ErrNonceTooLow))
fmt.Println(errors.Is(a, core.ErrNonceTooLow))
}

View File

@@ -2,7 +2,6 @@ package coordinator
import ( import (
"context" "context"
"database/sql"
"fmt" "fmt"
"math/big" "math/big"
"sync" "sync"
@@ -25,30 +24,19 @@ type statsVars struct {
Vars synchronizer.SCVariablesPtr Vars synchronizer.SCVariablesPtr
} }
type state struct {
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
}
// Pipeline manages the forging of batches with parallel server proofs // Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct { type Pipeline struct {
num int
cfg Config cfg Config
consts synchronizer.SCConsts consts synchronizer.SCConsts
// state // state
state state batchNum common.BatchNum
// batchNum common.BatchNum lastScheduledL1BatchBlockNum int64
// lastScheduledL1BatchBlockNum int64 lastForgeL1TxsNum int64
// lastForgeL1TxsNum int64
started bool started bool
rw sync.RWMutex
errAtBatchNum common.BatchNum
proversPool *ProversPool proversPool *ProversPool
provers []prover.Client provers []prover.Client
coord *Coordinator
txManager *TxManager txManager *TxManager
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
l2DB *l2db.L2DB l2DB *l2db.L2DB
@@ -65,28 +53,14 @@ type Pipeline struct {
cancel context.CancelFunc cancel context.CancelFunc
} }
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
p.rw.Lock()
defer p.rw.Unlock()
p.errAtBatchNum = batchNum
}
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
p.rw.RLock()
defer p.rw.RUnlock()
return p.errAtBatchNum
}
// NewPipeline creates a new Pipeline // NewPipeline creates a new Pipeline
func NewPipeline(ctx context.Context, func NewPipeline(ctx context.Context,
cfg Config, cfg Config,
num int, // Pipeline sequential number
historyDB *historydb.HistoryDB, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, l2DB *l2db.L2DB,
txSelector *txselector.TxSelector, txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
purger *Purger, purger *Purger,
coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *synchronizer.SCConsts, scConsts *synchronizer.SCConsts,
@@ -105,7 +79,6 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool")) return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
} }
return &Pipeline{ return &Pipeline{
num: num,
cfg: cfg, cfg: cfg,
historyDB: historyDB, historyDB: historyDB,
l2DB: l2DB, l2DB: l2DB,
@@ -114,7 +87,6 @@ func NewPipeline(ctx context.Context,
provers: provers, provers: provers,
proversPool: proversPool, proversPool: proversPool,
purger: purger, purger: purger,
coord: coord,
txManager: txManager, txManager: txManager,
consts: *scConsts, consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
@@ -132,67 +104,33 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
p.state = state{ p.batchNum = batchNum
batchNum: batchNum, p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0,
}
p.stats = *stats p.stats = *stats
p.vars = *vars p.vars = *vars
p.lastScheduledL1BatchBlockNum = 0
// Reset the StateDB in TxSelector and BatchBuilder from the err := p.txSelector.Reset(p.batchNum)
// synchronizer only if the checkpoint we reset from either:
// a. Doesn't exist in the TxSelector/BatchBuilder
// b. The batch has already been synced by the synchronizer and has a
// different MTRoot than the BatchBuilder
// Otherwise, reset from the local checkpoint.
// First attempt to reset from local checkpoint if such checkpoint exists
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerTxSelector := !existsTxSelector err = p.batchBuilder.Reset(p.batchNum, true)
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
return tracerr.Wrap(err)
}
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerBatchBuilder := !existsBatchBuilder
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
return tracerr.Wrap(err)
}
// After reset, check that if the batch exists in the historyDB, the
// stateRoot matches with the local one, if not, force a reset from
// synchronizer
batch, err := p.historyDB.GetBatch(p.state.batchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
// nothing to do
} else if err != nil {
return tracerr.Wrap(err)
} else {
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
if batch.StateRoot.Cmp(localStateRoot) != 0 {
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
// StateRoot from synchronizer doesn't match StateRoot
// from batchBuilder, force a reset from synchronizer
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
}
}
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&p.vars, vars) if vars.Rollup != nil {
p.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
p.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
p.vars.WDelayer = *vars.WDelayer
}
} }
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs, // handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs,
@@ -205,7 +143,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
} else if err != nil { } else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced { if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err, log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum, "lastForgeL1TxsNum", p.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum) "syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else { } else {
log.Errorw("forgeBatch", "err", err) log.Errorw("forgeBatch", "err", err)
@@ -261,32 +199,15 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-time.After(waitDuration): case <-time.After(waitDuration):
// Once errAtBatchNum != 0, we stop forging batchNum = p.batchNum + 1
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
waitDuration = p.cfg.ForgeRetryInterval
continue
}
batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum) batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
waitDuration = p.cfg.ForgeRetryInterval
continue
} else if err != nil { } else if err != nil {
p.setErrAtBatchNum(batchNum) waitDuration = p.cfg.SyncRetryInterval
waitDuration = p.cfg.ForgeRetryInterval
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum,
})
continue continue
} }
p.batchNum = batchNum
p.state.batchNum = batchNum
select { select {
case batchChSentServerProof <- batchInfo: case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -304,28 +225,16 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Done() p.wg.Done()
return return
case batchInfo := <-batchChSentServerProof: case batchInfo := <-batchChSentServerProof:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
err := p.waitServerProof(p.ctx, batchInfo) err := p.waitServerProof(p.ctx, batchInfo)
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
batchInfo.ServerProof = nil
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("waitServerProof", "err", err) log.Errorw("waitServerProof", "err", err)
p.setErrAtBatchNum(batchInfo.BatchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.waitServerProof: %v", err),
FailedBatchNum: batchInfo.BatchNum,
})
continue continue
} }
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
// batchInfo.ServerProof = nil
p.txManager.AddBatch(p.ctx, batchInfo) p.txManager.AddBatch(p.ctx, batchInfo)
} }
} }
@@ -375,8 +284,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Structure to accumulate data and metadata of the batch
batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum} batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
batchInfo.Debug.StartTimestamp = time.Now() batchInfo.Debug.StartTimestamp = time.Now()
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1 batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
@@ -391,19 +300,22 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var auths [][]byte var auths [][]byte
var coordIdxs []common.Idx var coordIdxs []common.Idx
// TODO: If there are no txs and we are behind the timeout, skip
// forging a batch and return a particular error that can be handleded
// in the loop where handleForgeBatch is called to retry after an
// interval
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) { if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true batchInfo.L1Batch = true
if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum { defer func() {
// If there's no error, update the parameters related
// to the last L1Batch forged
if err == nil {
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.lastForgeL1TxsNum++
}
}()
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced) return nil, tracerr.Wrap(errLastL1BatchNotSynced)
} }
// 2a: L1+L2 txs // 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1) l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -412,9 +324,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
} else { } else {
// 2b: only L2 txs // 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
@@ -490,12 +399,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool { func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
// Take the lastL1BatchBlockNum as the biggest between the last // Take the lastL1BatchBlockNum as the biggest between the last
// scheduled one, and the synchronized one. // scheduled one, and the synchronized one.
lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum { if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
} }
// Set Debug information // Set Debug information
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
batchInfo.Debug.L1BatchBlockScheduleDeadline = batchInfo.Debug.L1BatchBlockScheduleDeadline =

View File

@@ -77,7 +77,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
// //
// Scheduled L1Batch // Scheduled L1Batch
// //
pipeline.state.lastScheduledL1BatchBlockNum = startBlock pipeline.lastScheduledL1BatchBlockNum = startBlock
stats.Sync.LastL1BatchBlock = startBlock - 10 stats.Sync.LastL1BatchBlock = startBlock - 10
// We are are one block before the timeout range * 0.5 // We are are one block before the timeout range * 0.5
@@ -172,7 +172,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
// users with positive balances // users with positive balances
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB) tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
syncStats := sync.Stats() syncStats := sync.Stats()
batchNum := syncStats.Sync.LastBatch.BatchNum batchNum := common.BatchNum(syncStats.Sync.LastBatch)
syncSCVars := sync.SCVars() syncSCVars := sync.SCVars()
pipeline, err := coord.newPipeline(ctx) pipeline, err := coord.newPipeline(ctx)

View File

@@ -2,7 +2,6 @@ package coordinator
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"time" "time"
@@ -10,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
@@ -37,22 +35,12 @@ type TxManager struct {
vars synchronizer.SCVariables vars synchronizer.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
discardPipelineCh chan int // int refers to the pipelineNum queue []*BatchInfo
minPipelineNum int
queue Queue
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed // lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
lastSuccessBatch common.BatchNum lastSuccessBatch common.BatchNum
// lastPendingBatch common.BatchNum lastPendingBatch common.BatchNum
// accNonce is the account nonce in the last mined block (due to mined txs) lastSuccessNonce uint64
accNonce uint64 lastPendingNonce uint64
// accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64
// accPendingNonce is the pending nonce of the account due to pending txs
// accPendingNonce uint64
lastSentL1BatchBlockNum int64
} }
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
@@ -66,19 +54,19 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
accNonce, err := ethClient.EthNonceAt(ctx, *address, nil) lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// accPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address) lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address)
// if err != nil { if err != nil {
// return nil, err return nil, err
// } }
// if accNonce != accPendingNonce { if lastSuccessNonce != lastPendingNonce {
// return nil, tracerr.Wrap(fmt.Errorf("currentNonce (%v) != accPendingNonce (%v)", return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
// accNonce, accPendingNonce)) lastSuccessNonce, lastPendingNonce))
// } }
log.Infow("TxManager started", "nonce", accNonce) log.Infow("TxManager started", "nonce", lastSuccessNonce)
return &TxManager{ return &TxManager{
cfg: *cfg, cfg: *cfg,
ethClient: ethClient, ethClient: ethClient,
@@ -86,7 +74,6 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
coord: coord, coord: coord,
batchCh: make(chan *BatchInfo, queueLen), batchCh: make(chan *BatchInfo, queueLen),
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
discardPipelineCh: make(chan int, queueLen),
account: accounts.Account{ account: accounts.Account{
Address: *address, Address: *address,
}, },
@@ -95,11 +82,8 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
vars: *initSCVars, vars: *initSCVars,
minPipelineNum: 0, lastSuccessNonce: lastSuccessNonce,
queue: NewQueue(), lastPendingNonce: lastPendingNonce,
accNonce: accNonce,
accNextNonce: accNonce,
// accPendingNonce: accPendingNonce,
}, nil }, nil
} }
@@ -120,17 +104,16 @@ func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.St
} }
} }
// DiscardPipeline is a thread safe method to notify about a discarded pipeline
// due to a reorg
func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
select {
case t.discardPipelineCh <- pipelineNum:
case <-ctx.Done():
}
}
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) { func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
updateSCVars(&t.vars, vars) if vars.Rollup != nil {
t.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
t.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
t.vars.WDelayer = *vars.WDelayer
}
} }
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
@@ -140,7 +123,6 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
inc := new(big.Int).Set(gasPrice) inc := new(big.Int).Set(gasPrice)
// TODO: Replace this by a value of percentage
const gasPriceDiv = 100 const gasPriceDiv = 100
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv)) inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv))
gasPrice.Add(gasPrice, inc) gasPrice.Add(gasPrice, inc)
@@ -159,75 +141,29 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return auth, nil return auth, nil
} }
func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error { func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error {
nextBlock := t.stats.Eth.LastBlock.Num + 1 // TODO: Check if we can forge in the next blockNum, abort if we can't
if !t.canForgeAt(nextBlock) { batchInfo.Debug.Status = StatusSent
return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock)) batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
} batchInfo.Debug.SendTimestamp = time.Now()
if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch { batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock)) batchInfo.Debug.StartTimestamp).Seconds()
}
margin := t.cfg.SendBatchBlocksMarginCheck
if margin != 0 {
if !t.canForgeAt(nextBlock + margin) {
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
margin, nextBlock))
}
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
margin, nextBlock))
}
}
return nil
}
func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percetnages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
auth, err := t.NewAuth(ctx) auth, err := t.NewAuth(ctx)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
auth.Nonce = big.NewInt(int64(t.accNextNonce)) auth.Nonce = big.NewInt(int64(t.lastPendingNonce))
if resend { t.lastPendingNonce++
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
}
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ { for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
auth.GasPrice, t.cfg.MaxGasPrice))
}
// RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth) ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
if errors.Is(err, core.ErrNonceTooLow) { if err != nil {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce", // if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) {
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum) // log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err,
auth.Nonce.Add(auth.Nonce, big.NewInt(1)) // "block", t.stats.Eth.LastBlock.Num+1)
attempt-- // return tracerr.Wrap(err)
} else if errors.Is(err, core.ErrNonceTooHigh) { // }
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt--
} else if errors.Is(err, core.ErrUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if errors.Is(err, core.ErrReplaceUnderpriced) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if err != nil {
log.Errorw("TxManager ethClient.RollupForgeBatch", log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1, "attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum) "batchNum", batchInfo.BatchNum)
@@ -243,30 +179,10 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err)) return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
} }
if !resend {
t.accNextNonce = auth.Nonce.Uint64() + 1
}
batchInfo.EthTx = ethTx batchInfo.EthTx = ethTx
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash()) log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
now := time.Now()
batchInfo.SendTimestamp = now
if resend {
batchInfo.Debug.ResendNum++
}
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
t.lastPendingBatch = batchInfo.BatchNum
// t.lastPendingBatch = batchInfo.BatchNum
if !resend {
if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil { if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -309,19 +225,13 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) { func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
receipt := batchInfo.Receipt receipt := batchInfo.Receipt
if receipt != nil { if receipt != nil {
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
t.accNonce = batchInfo.EthTx.Nonce() + 1
}
if receipt.Status == types.ReceiptStatusFailed { if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber) _, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash, log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(),
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(), "batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err) "err", err)
if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1
}
return nil, tracerr.Wrap(fmt.Errorf( return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err)) "ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful { } else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -329,17 +239,6 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64() batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum - batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
batchInfo.Debug.StartBlockNum batchInfo.Debug.StartBlockNum
if batchInfo.Debug.StartToMineDelay == 0 {
if block, err := t.ethClient.EthBlockByNumber(ctx,
receipt.BlockNumber.Int64()); err != nil {
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
} else {
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.SendTimestamp).Seconds()
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
}
}
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
if batchInfo.BatchNum > t.lastSuccessBatch { if batchInfo.BatchNum > t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum t.lastSuccessBatch = batchInfo.BatchNum
@@ -351,72 +250,9 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
return nil, nil return nil, nil
} }
// TODO:
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
// Queue of BatchInfos
type Queue struct {
list []*BatchInfo
// nonceByBatchNum map[common.BatchNum]uint64
next int
}
// NewQueue returns a new queue
func NewQueue() Queue {
return Queue{
list: make([]*BatchInfo, 0),
// nonceByBatchNum: make(map[common.BatchNum]uint64),
next: 0,
}
}
// Len is the length of the queue
func (q *Queue) Len() int {
return len(q.list)
}
// At returns the BatchInfo at position (or nil if position is out of bounds)
func (q *Queue) At(position int) *BatchInfo {
if position >= len(q.list) {
return nil
}
return q.list[position]
}
// Next returns the next BatchInfo (or nil if queue is empty)
func (q *Queue) Next() (int, *BatchInfo) {
if len(q.list) == 0 {
return 0, nil
}
defer func() { q.next = (q.next + 1) % len(q.list) }()
return q.next, q.list[q.next]
}
// Remove removes the BatchInfo at position
func (q *Queue) Remove(position int) {
// batchInfo := q.list[position]
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
q.list = append(q.list[:position], q.list[position+1:]...)
if len(q.list) == 0 {
q.next = 0
} else {
q.next = position % len(q.list)
}
}
// Push adds a new BatchInfo
func (q *Queue) Push(batchInfo *BatchInfo) {
q.list = append(q.list, batchInfo)
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
}
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
// nonce, ok := q.nonceByBatchNum[batchNum]
// return nonce, ok
// }
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
next := 0
waitDuration := longWaitDuration waitDuration := longWaitDuration
var statsVars statsVars var statsVars statsVars
@@ -427,7 +263,7 @@ func (t *TxManager) Run(ctx context.Context) {
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch)
for { for {
select { select {
@@ -437,27 +273,8 @@ func (t *TxManager) Run(ctx context.Context) {
case statsVars := <-t.statsVarsCh: case statsVars := <-t.statsVarsCh:
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
case pipelineNum := <-t.discardPipelineCh:
t.minPipelineNum = pipelineNum + 1
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
}
case batchInfo := <-t.batchCh: case batchInfo := <-t.batchCh:
if batchInfo.PipelineNum < t.minPipelineNum { if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
}
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
log.Warnw("TxManager: shouldSend", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
continue
}
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
// If we reach here it's because our ethNode has // If we reach here it's because our ethNode has
@@ -465,20 +282,19 @@ func (t *TxManager) Run(ctx context.Context) {
// ethereum. This could be due to the ethNode // ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that // failure, or an invalid transaction (that
// can't be mined) // can't be mined)
log.Warnw("TxManager: forgeBatch send failed", "err", err, t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)})
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
continue continue
} }
t.queue.Push(batchInfo) t.queue = append(t.queue, batchInfo)
waitDuration = t.cfg.TxManagerCheckInterval waitDuration = t.cfg.TxManagerCheckInterval
case <-time.After(waitDuration): case <-time.After(waitDuration):
queuePosition, batchInfo := t.queue.Next() if len(t.queue) == 0 {
if batchInfo == nil {
waitDuration = longWaitDuration waitDuration = longWaitDuration
continue continue
} }
current := next
next = (current + 1) % len(t.queue)
batchInfo := t.queue[current]
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
@@ -488,8 +304,7 @@ func (t *TxManager) Run(ctx context.Context) {
// if it was not mined, mined and succesfull or // if it was not mined, mined and succesfull or
// mined and failed. This could be due to the // mined and failed. This could be due to the
// ethNode failure. // ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{ t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
} }
confirm, err := t.handleReceipt(ctx, batchInfo) confirm, err := t.handleReceipt(ctx, batchInfo)
@@ -497,108 +312,32 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
// Transaction was rejected // Transaction was rejected
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil { t.queue = append(t.queue[:current], t.queue[current+1:]...)
continue if len(t.queue) == 0 {
} else if err != nil { next = 0
log.Errorw("TxManager: removeBadBatchInfos", "err", err) } else {
continue next = current % len(t.queue)
} }
t.coord.SendMsg(ctx, MsgStopPipeline{ t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
continue
} }
now := time.Now()
if !t.cfg.EthNoReuseNonce && confirm == nil &&
now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
continue
} else if err != nil {
// If we reach here it's because our ethNode has
// been unable to send the transaction to
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
continue
}
}
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks { if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
log.Debugw("TxManager: forgeBatch tx confirmed", log.Debugw("TxManager tx for RollupForgeBatch confirmed",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum) "batch", batchInfo.BatchNum)
t.queue.Remove(queuePosition) t.queue = append(t.queue[:current], t.queue[current+1:]...)
if len(t.queue) == 0 {
next = 0
} else {
next = current % len(t.queue)
}
} }
} }
} }
} }
func (t *TxManager) removeBadBatchInfos(ctx context.Context) error { // nolint reason: this function will be used in the future
next := 0 //nolint:unused
// batchNum := 0 func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool {
for {
batchInfo := t.queue.At(next)
if batchInfo == nil {
break
}
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
return nil
} else if err != nil {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
next++
continue
}
confirm, err := t.handleReceipt(ctx, batchInfo)
if ctx.Err() != nil {
return nil
} else if err != nil {
// Transaction was rejected
if t.minPipelineNum <= batchInfo.PipelineNum {
t.minPipelineNum = batchInfo.PipelineNum + 1
}
t.queue.Remove(next)
continue
}
// If tx is pending but is from a cancelled pipeline, remove it
// from the queue
if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum {
// batchNum++
t.queue.Remove(next)
continue
}
}
next++
}
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
if err != nil {
return err
}
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}
func (t *TxManager) canForgeAt(blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction, return canForge(&t.consts.Auction, &t.vars.Auction,
&t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot, &stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum) t.cfg.ForgerAddress, blockNum)
} }
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
}
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
}

View File

@@ -1,15 +0,0 @@
package coordinator
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddPerc(t *testing.T) {
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
}

View File

@@ -164,19 +164,6 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
return nil return nil
} }
// GetBatch returns the batch with the given batchNum
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
batchNum,
)
return &batch, err
}
// GetAllBatches retrieve all batches from the DB // GetAllBatches retrieve all batches from the DB
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) { func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
@@ -221,18 +208,6 @@ func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
return batchNum, tracerr.Wrap(row.Scan(&batchNum)) return batchNum, tracerr.Wrap(row.Scan(&batchNum))
} }
// GetLastBatch returns the last forged batch
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.db, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
)
return &batch, err
}
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch // GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) { func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch

View File

@@ -203,10 +203,6 @@ func TestBatches(t *testing.T) {
fetchedLastBatchNum, err := historyDB.GetLastBatchNum() fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum) assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
// Test GetLastBatch
fetchedLastBatch, err := historyDB.GetLastBatch()
assert.NoError(t, err)
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
// Test GetLastL1TxsNum // Test GetLastL1TxsNum
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum() fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
assert.NoError(t, err) assert.NoError(t, err)
@@ -215,12 +211,6 @@ func TestBatches(t *testing.T) {
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum() fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum) assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
// Test GetBatch
fetchedBatch, err := historyDB.GetBatch(1)
require.NoError(t, err)
assert.Equal(t, &batches[0], fetchedBatch)
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
} }
func TestBids(t *testing.T) { func TestBids(t *testing.T) {
@@ -621,10 +611,10 @@ func TestTxs(t *testing.T) {
assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type) assert.Equal(t, common.TxTypeExit, dbL2Txs[3].Type)
// Tx ID // Tx ID
assert.Equal(t, "0x02d709307533c4e3c03f20751fc4d72bc18b225d14f9616525540a64342c7c350d", dbL2Txs[0].TxID.String()) assert.Equal(t, "0x024e555248100b69a8aabf6d31719b9fe8a60dcc6c3407904a93c8d2d9ade18ee5", dbL2Txs[0].TxID.String())
assert.Equal(t, "0x02e88bc5503f282cca045847668511290e642410a459bb67b1fafcd1b6097c149c", dbL2Txs[1].TxID.String()) assert.Equal(t, "0x021ae87ca34d50ff35d98dfc0d7c95f2bf2e4ffeebb82ea71f43a8b0dfa5d36d89", dbL2Txs[1].TxID.String())
assert.Equal(t, "0x027911262b43315c0b24942a02fe228274b6e4d57a476bfcdd7a324b3091362c7d", dbL2Txs[2].TxID.String()) assert.Equal(t, "0x024abce7f3f2382dc520ed557593f11dea1ee197e55b60402e664facc27aa19774", dbL2Txs[2].TxID.String())
assert.Equal(t, "0x02f572b63f2a5c302e1b9337ea6944bfbac3d199e4ddd262b5a53759c72ec10ee6", dbL2Txs[3].TxID.String()) assert.Equal(t, "0x02f921ad9e7a6e59606570fe12a7dde0e36014197de0363b9b45e5097d6f2b1dd0", dbL2Txs[3].TxID.String())
// Tx From and To IDx // Tx From and To IDx
assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx) assert.Equal(t, dbL2Txs[0].ToIdx, dbL2Txs[2].FromIdx)

View File

@@ -425,13 +425,12 @@ func (k *KVDB) MakeCheckpoint() error {
} }
// if checkpoint BatchNum already exist in disk, delete it // if checkpoint BatchNum already exist in disk, delete it
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(checkpointPath); err != nil { if err := os.RemoveAll(checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
// execute Checkpoint // execute Checkpoint
@@ -452,25 +451,12 @@ func (k *KVDB) MakeCheckpoint() error {
return nil return nil
} }
// CheckpointExists returns true if the checkpoint exists
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum // DeleteCheckpoint removes if exist the checkpoint of the given batchNum
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error { func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum)) checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum)) return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
} else if err != nil {
return tracerr.Wrap(err)
} }
return os.RemoveAll(checkpointPath) return os.RemoveAll(checkpointPath)
@@ -534,8 +520,6 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
if _, err := os.Stat(source); os.IsNotExist(err) { if _, err := os.Stat(source); os.IsNotExist(err) {
// if kvdb does not have checkpoint at batchNum, return err // if kvdb does not have checkpoint at batchNum, return err
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source)) return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
} else if err != nil {
return tracerr.Wrap(err)
} }
// By locking we allow calling MakeCheckpointFromTo from multiple // By locking we allow calling MakeCheckpointFromTo from multiple
// places at the same time for the same stateDB. This allows the // places at the same time for the same stateDB. This allows the
@@ -549,13 +533,12 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
func pebbleMakeCheckpoint(source, dest string) error { func pebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint // Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); !os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(dest); err != nil { if err := os.RemoveAll(dest); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
sto, err := pebble.NewPebbleStorage(source, false) sto, err := pebble.NewPebbleStorage(source, false)

View File

@@ -498,17 +498,11 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
}, nil }, nil
} }
// CheckpointExists returns true if the checkpoint exists
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it // Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. // gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints. // If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
if fromSynchronizer { if fromSynchronizer {
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil { if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }

View File

@@ -462,11 +462,11 @@ func (c *RollupClient) RollupL1UserTxERC20ETH(fromBJJ babyjub.PublicKeyComp, fro
} }
fromIdxBig := big.NewInt(fromIdx) fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx) toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat16(depositAmount) depositAmountF, err := common.NewFloat40(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountF, err := common.NewFloat16(amount) amountF, err := common.NewFloat40(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -497,11 +497,11 @@ func (c *RollupClient) RollupL1UserTxERC20Permit(fromBJJ babyjub.PublicKeyComp,
} }
fromIdxBig := big.NewInt(fromIdx) fromIdxBig := big.NewInt(fromIdx)
toIdxBig := big.NewInt(toIdx) toIdxBig := big.NewInt(toIdx)
depositAmountF, err := common.NewFloat16(depositAmount) depositAmountF, err := common.NewFloat40(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
amountF, err := common.NewFloat16(amount) amountF, err := common.NewFloat40(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -939,9 +939,9 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
FeeIdxCoordinator: []common.Idx{}, FeeIdxCoordinator: []common.Idx{},
} }
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1) lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes
numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser numBeginL2Tx := numBytesL1TxCoord + numBytesL1TxUser
l1UserTxsData := []byte{} l1UserTxsData := []byte{}
@@ -968,7 +968,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx) rollupForgeBatchArgs.L2TxsData = append(rollupForgeBatchArgs.L2TxsData, *l2Tx)
} }
for i := 0; i < numTxsL1Coord; i++ { for i := 0; i < numTxsL1Coord; i++ {
bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen] bytesL1Coordinator := aux.EncodedL1CoordinatorTx[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
var signature []byte var signature []byte
v := bytesL1Coordinator[0] v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33] s := bytesL1Coordinator[1:33]

View File

@@ -131,9 +131,9 @@ func TestRollupForgeBatch(t *testing.T) {
args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified. args.FeeIdxCoordinator = []common.Idx{} // When encoded, 64 times the 0 idx means that no idx to collect fees is specified.
l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001") l1CoordinatorBytes, err := hex.DecodeString("1c660323607bb113e586183609964a333d07ebe4bef3be82ec13af453bae9590bd7711cdb6abf42f176eadfbe5506fbef5e092e5543733f91b0061d9a7747fa10694a915a6470fa230de387b51e6f4db0b09787867778687b55197ad6d6a86eac000000001")
require.NoError(t, err) require.NoError(t, err)
numTxsL1 := len(l1CoordinatorBytes) / common.L1CoordinatorTxBytesLen numTxsL1 := len(l1CoordinatorBytes) / common.RollupConstL1CoordinatorTotalBytes
for i := 0; i < numTxsL1; i++ { for i := 0; i < numTxsL1; i++ {
bytesL1Coordinator := l1CoordinatorBytes[i*common.L1CoordinatorTxBytesLen : (i+1)*common.L1CoordinatorTxBytesLen] bytesL1Coordinator := l1CoordinatorBytes[i*common.RollupConstL1CoordinatorTotalBytes : (i+1)*common.RollupConstL1CoordinatorTotalBytes]
var signature []byte var signature []byte
v := bytesL1Coordinator[0] v := bytesL1Coordinator[0]
s := bytesL1Coordinator[1:33] s := bytesL1Coordinator[1:33]

3
go.mod
View File

@@ -6,7 +6,6 @@ require (
github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml v0.3.1
github.com/dghubble/sling v1.3.0 github.com/dghubble/sling v1.3.0
github.com/ethereum/go-ethereum v1.9.25 github.com/ethereum/go-ethereum v1.9.25
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/getkin/kin-openapi v0.22.0 github.com/getkin/kin-openapi v0.22.0
github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/cors v1.3.1
github.com/gin-gonic/gin v1.5.0 github.com/gin-gonic/gin v1.5.0
@@ -25,8 +24,6 @@ require (
github.com/mitchellh/mapstructure v1.3.0 github.com/mitchellh/mapstructure v1.3.0
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/russross/meddler v1.0.0 github.com/russross/meddler v1.0.0
github.com/sirupsen/logrus v1.5.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
github.com/stretchr/testify v1.6.1 github.com/stretchr/testify v1.6.1
github.com/urfave/cli/v2 v2.2.0 github.com/urfave/cli/v2 v2.2.0
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0

10
go.sum
View File

@@ -24,8 +24,6 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
@@ -86,8 +84,6 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
@@ -173,8 +169,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -605,8 +599,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -625,8 +617,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=

View File

@@ -2,7 +2,6 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"sync" "sync"
@@ -302,9 +301,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration, SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts, EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration, TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath, DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
@@ -491,15 +487,11 @@ func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, va
if stats.Synced() { if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo( if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum), common.BatchNum(stats.Eth.LastBatch),
stats.Sync.Auction.CurrentSlot.SlotNum, stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil { ); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err) log.Errorw("API.UpdateNetworkInfo", "err", err)
} }
} else {
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
} }
} }
} }
@@ -581,14 +573,10 @@ func (n *Node) StartSynchronizer() {
if n.ctx.Err() != nil { if n.ctx.Err() != nil {
continue continue
} }
if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
log.Warnw("Synchronizer.Sync", "err", err)
} else {
log.Errorw("Synchronizer.Sync", "err", err) log.Errorw("Synchronizer.Sync", "err", err)
} }
} }
} }
}
}() }()
n.wg.Add(1) n.wg.Add(1)

View File

@@ -25,12 +25,12 @@ type Stats struct {
Updated time.Time Updated time.Time
FirstBlockNum int64 FirstBlockNum int64
LastBlock common.Block LastBlock common.Block
LastBatchNum int64 LastBatch int64
} }
Sync struct { Sync struct {
Updated time.Time Updated time.Time
LastBlock common.Block LastBlock common.Block
LastBatch common.Batch LastBatch int64
// LastL1BatchBlock is the last ethereum block in which an // LastL1BatchBlock is the last ethereum block in which an
// l1Batch was forged // l1Batch was forged
LastL1BatchBlock int64 LastL1BatchBlock int64
@@ -77,13 +77,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
} }
// UpdateSync updates the synchronizer stats // UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch, func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum,
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) { lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
now := time.Now() now := time.Now()
s.rw.Lock() s.rw.Lock()
s.Sync.LastBlock = *lastBlock s.Sync.LastBlock = *lastBlock
if lastBatch != nil { if lastBatch != nil {
s.Sync.LastBatch = *lastBatch s.Sync.LastBatch = int64(*lastBatch)
} }
if lastL1BatchBlock != nil { if lastL1BatchBlock != nil {
s.Sync.LastL1BatchBlock = *lastL1BatchBlock s.Sync.LastL1BatchBlock = *lastL1BatchBlock
@@ -105,16 +105,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1) lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) return tracerr.Wrap(err)
} }
lastBatchNum, err := ethClient.RollupLastForgedBatch() lastBatch, err := ethClient.RollupLastForgedBatch()
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err)) return tracerr.Wrap(err)
} }
s.rw.Lock() s.rw.Lock()
s.Eth.Updated = now s.Eth.Updated = now
s.Eth.LastBlock = *lastBlock s.Eth.LastBlock = *lastBlock
s.Eth.LastBatchNum = lastBatchNum s.Eth.LastBatch = lastBatch
s.rw.Unlock() s.rw.Unlock()
return nil return nil
} }
@@ -139,10 +139,6 @@ func (s *StatsHolder) CopyStats() *Stats {
sCopy.Sync.Auction.NextSlot.DefaultSlotBid = sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid) common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
} }
if s.Sync.LastBatch.StateRoot != nil {
sCopy.Sync.LastBatch.StateRoot =
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
}
s.rw.RUnlock() s.rw.RUnlock()
return &sCopy return &sCopy
} }
@@ -156,9 +152,9 @@ func (s *StatsHolder) blocksPerc() float64 {
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1)) float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
} }
func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 { func (s *StatsHolder) batchesPerc(batchNum int64) float64 {
return float64(batchNum) * 100.0 / return float64(batchNum) * 100.0 /
float64(s.Eth.LastBatchNum) float64(s.Eth.LastBatch)
} }
// StartBlockNums sets the first block used to start tracking the smart // StartBlockNums sets the first block used to start tracking the smart
@@ -333,25 +329,23 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil return nil
} }
// updateCurrentSlot updates the slot with information of the current slot. // firstBatchBlockNum is the blockNum of first batch in that block, if any
// The information abouth which coordinator is allowed to forge is only updated func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) {
// when we are Synced. slot := common.Slot{
// hasBatch is true when the last synced block contained at least one batch. SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum,
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error { ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment,
}
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset { if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum) dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
hasBatch = false firstBatchBlockNum = nil
} else { } else {
hasBatch = true firstBatchBlockNum = &dbFirstBatchBlockNum
firstBatchBlockNum = dbFirstBatchBlockNum
} }
slot.ForgerCommitment = false slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum { } else if slotNum > slot.SlotNum {
@@ -362,11 +356,11 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil { if err := s.setSlotCoordinator(&slot); err != nil {
return tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if hasBatch && if firstBatchBlockNum != nil &&
s.consts.Auction.RelativeBlock(firstBatchBlockNum) < s.consts.Auction.RelativeBlock(*firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) { int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true slot.ForgerCommitment = true
} }
@@ -375,61 +369,57 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err)) return nil, tracerr.Wrap(err)
} }
if !canForge { if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return nil return &slot, nil
} }
// updateNextSlot updates the slot with information of the next slot. func (s *Synchronizer) getNextSlot() (*common.Slot, error) {
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1 slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot.SlotNum = slotNum slot := common.Slot{
slot.ForgerCommitment = false SlotNum: slotNum,
ForgerCommitment: false,
}
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(slot); err != nil { if err := s.setSlotCoordinator(&slot); err != nil {
return tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// TODO: Remove this SANITY CHECK once this code is tested enough // TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err)) return nil, tracerr.Wrap(err)
} }
if !canForge { if !canForge {
return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return nil return &slot, nil
} }
// updateCurrentNextSlotIfSync updates the current and next slot. Information func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error {
// about forger address that is allowed to forge is only updated if we are current, err := s.getCurrentSlot(reset, firstBatchBlockNum)
// Synced. if err != nil {
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if err := s.updateNextSlot(&next); err != nil { next, err := s.getNextSlot()
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
s.stats.UpdateCurrentNextSlot(&current, &next) s.stats.UpdateCurrentNextSlot(current, next)
return nil return nil
} }
@@ -468,9 +458,9 @@ func (s *Synchronizer) init() error {
"ethLastBlock", s.stats.Eth.LastBlock, "ethLastBlock", s.stats.Eth.LastBlock,
) )
log.Infow("Sync init batch", log.Infow("Sync init batch",
"syncLastBatch", s.stats.Sync.LastBatch.BatchNum, "syncLastBatch", s.stats.Sync.LastBatch,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatch,
) )
return nil return nil
} }
@@ -531,7 +521,7 @@ func (s *Synchronizer) Sync2(ctx context.Context,
if tracerr.Unwrap(err) == ethereum.NotFound { if tracerr.Unwrap(err) == ethereum.NotFound {
return nil, nil, nil return nil, nil, nil
} else if err != nil { } else if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
@@ -637,14 +627,14 @@ func (s *Synchronizer) Sync2(ctx context.Context,
} }
} }
s.stats.UpdateSync(ethBlock, s.stats.UpdateSync(ethBlock,
&rollupData.Batches[batchesLen-1].Batch, &rollupData.Batches[batchesLen-1].Batch.BatchNum,
lastL1BatchBlock, lastForgeL1TxsNum) lastL1BatchBlock, lastForgeL1TxsNum)
} }
hasBatch := false var firstBatchBlockNum *int64
if len(rollupData.Batches) > 0 { if len(rollupData.Batches) > 0 {
hasBatch = true firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum
} }
if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil { if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -656,8 +646,8 @@ func (s *Synchronizer) Sync2(ctx context.Context,
for _, batchData := range rollupData.Batches { for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch", log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum, "syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatch,
) )
} }
@@ -710,15 +700,15 @@ func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) { consts *SCConsts) (*SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit() auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit() wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err)) return nil, nil, tracerr.Wrap(err)
} }
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
@@ -763,15 +753,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer s.vars.WDelayer = *wDelayer
} }
batch, err := s.historyDB.GetLastBatch() batchNum, err := s.historyDB.GetLastBatchNum()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
} }
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
batch = &common.Batch{} batchNum = 0
} }
err = s.stateDB.Reset(batch.BatchNum) err = s.stateDB.Reset(batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err)) return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
} }
@@ -793,9 +783,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n lastForgeL1TxsNum = &n
} }
s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum) s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, false); err != nil { if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -812,7 +802,7 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// the expected one. // the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash) rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err)) return nil, tracerr.Wrap(err)
} }
// No events in this block // No events in this block
if rollupEvents == nil { if rollupEvents == nil {
@@ -929,15 +919,9 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if s.stateDB.CurrentBatch() != batchNum { if s.stateDB.CurrentBatch() != batchNum {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+ return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)",
"evtForgeBatch.BatchNum = (%v)",
s.stateDB.CurrentBatch(), batchNum)) s.stateDB.CurrentBatch(), batchNum))
} }
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
"forgeBatchArgs.NewStRoot (%v)",
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
}
// Transform processed PoolL2 txs to L2 and store in BatchData // Transform processed PoolL2 txs to L2 and store in BatchData
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
@@ -1122,7 +1106,7 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
// Get auction events in the block // Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash) auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err)) return nil, tracerr.Wrap(err)
} }
// No events in this block // No events in this block
if auctionEvents == nil { if auctionEvents == nil {
@@ -1219,7 +1203,7 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
// Get wDelayer events in the block // Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash) wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err)) return nil, tracerr.Wrap(err)
} }
// No events in this block // No events in this block
if wDelayerEvents == nil { if wDelayerEvents == nil {

View File

@@ -797,11 +797,11 @@ func (c *Client) RollupL1UserTxERC20ETH(
cpy := c.nextBlock().copy() cpy := c.nextBlock().copy()
defer func() { c.revertIfErr(err, cpy) }() defer func() { c.revertIfErr(err, cpy) }()
_, err = common.NewFloat16(amount) _, err = common.NewFloat40(amount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
_, err = common.NewFloat16(depositAmount) _, err = common.NewFloat40(depositAmount)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -142,10 +142,12 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
// same values than in the js test // same values than in the js test
users = GenerateJsUsers(t) users = GenerateJsUsers(t)
depositAmount, err := common.Float40(10400).BigInt()
require.Nil(t, err)
l1UserTxs = []common.L1Tx{ l1UserTxs = []common.L1Tx{
{ {
FromIdx: 0, FromIdx: 0,
DepositAmount: big.NewInt(16000000), DepositAmount: depositAmount,
Amount: big.NewInt(0), Amount: big.NewInt(0),
TokenID: 1, TokenID: 1,
FromBJJ: users[0].BJJ.Public().Compress(), FromBJJ: users[0].BJJ.Public().Compress(),

View File

@@ -311,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
h, err := zki.HashGlobalData() h, err := zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "9971598169768987067017223790214537222850903267980994716992761290793474746117", h.String()) assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)
// batch3 // batch3
@@ -334,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
h, err = zki.HashGlobalData() h, err = zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "7992262236065691439683036344554725221924027193771770363772735722054938818364", h.String()) assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String())
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0]) assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
assert.Equal(t, "0", zki.EthAddr3[1].String()) assert.Equal(t, "0", zki.EthAddr3[1].String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)

View File

@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
os.Exit(exitVal) os.Exit(exitVal)
} }
const MaxTx = 376 const MaxTx = 352
const NLevels = 32 const NLevels = 32
const MaxL1Tx = 256 const MaxL1Tx = 256
const MaxFeeTx = 64 const MaxFeeTx = 64
@@ -61,6 +61,7 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
return return
} }
log.Infof("sending proof to %s", proofServerURL)
// Store zkinputs json for debugging purposes // Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(zki) zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -501,11 +501,11 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
tp.zki.OnChain[tp.i] = big.NewInt(1) tp.zki.OnChain[tp.i] = big.NewInt(1)
// L1Txs // L1Txs
depositAmountF16, err := common.NewFloat16(tx.DepositAmount) depositAmountF40, err := common.NewFloat40(tx.DepositAmount)
if err != nil { if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err) return nil, nil, false, nil, tracerr.Wrap(err)
} }
tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF16)) tp.zki.DepositAmountF[tp.i] = big.NewInt(int64(depositAmountF40))
tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr) tp.zki.FromEthAddr[tp.i] = common.EthAddrToBigInt(tx.FromEthAddr)
if tx.FromBJJ != common.EmptyBJJComp { if tx.FromBJJ != common.EmptyBJJComp {
tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ) tp.zki.FromBJJCompressed[tp.i] = BJJCompressedTo256BigInts(tx.FromBJJ)
@@ -515,6 +515,20 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
tp.zki.ISOnChain[tp.i] = big.NewInt(1) tp.zki.ISOnChain[tp.i] = big.NewInt(1)
} }
if tx.Type == common.TxTypeForceTransfer ||
tx.Type == common.TxTypeDepositTransfer ||
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
tx.Type == common.TxTypeForceExit {
// in the cases where at L1Tx there is usage of the
// Amount parameter, add it at the ZKInputs.AmountF
// slot
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
}
} }
switch tx.Type { switch tx.Type {
@@ -657,6 +671,11 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr) tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
tp.zki.OnChain[tp.i] = big.NewInt(0) tp.zki.OnChain[tp.i] = big.NewInt(0)
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
tp.zki.NewAccount[tp.i] = big.NewInt(0) tp.zki.NewAccount[tp.i] = big.NewInt(0)
// L2Txs // L2Txs

View File

@@ -1,8 +1,6 @@
package txprocessor package txprocessor
import ( import (
"encoding/binary"
"encoding/hex"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"os" "os"
@@ -642,17 +640,16 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
users := txsets.GenerateJsUsers(t) users := txsets.GenerateJsUsers(t)
daMaxHex, err := hex.DecodeString("FFFF") daMaxF40 := common.Float40(0xFFFFFFFFFF)
daMaxBI, err := daMaxF40.BigInt()
require.NoError(t, err) require.NoError(t, err)
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex)) assert.Equal(t, "343597383670000000000000000000000000000000", daMaxBI.String())
daMaxBI := daMaxF16.BigInt()
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
daMax1Hex, err := hex.DecodeString("FFFE") daMax1F40 := common.Float40(0xFFFFFFFFFE)
require.NoError(t, err) require.NoError(t, err)
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex)) daMax1BI, err := daMax1F40.BigInt()
daMax1BI := daMax1F16.BigInt() require.NoError(t, err)
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String()) assert.Equal(t, "343597383660000000000000000000000000000000", daMax1BI.String())
l1Txs := []common.L1Tx{ l1Txs := []common.L1Tx{
{ {

File diff suppressed because one or more lines are too long

View File

@@ -3,7 +3,6 @@ package txselector
// current: very simple version of TxSelector // current: very simple version of TxSelector
import ( import (
"bytes"
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
@@ -89,8 +88,12 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
// Reset tells the TxSelector to get it's internal AccountsDB // Reset tells the TxSelector to get it's internal AccountsDB
// from the required `batchNum` // from the required `batchNum`
func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (txsel *TxSelector) Reset(batchNum common.BatchNum) error {
return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer)) err := txsel.localAccountsDB.Reset(batchNum, true)
if err != nil {
return tracerr.Wrap(err)
}
return nil
} }
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) { func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
@@ -232,7 +235,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of // discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator" l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs0[i]) discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue continue
} }
@@ -257,7 +261,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Amount with current Balance. Discard L2Tx, // not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to // and update Info parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String()) l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -269,7 +275,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Nonce at tx. Discard L2Tx, and update Info // not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs // parameter of the tx, and add it to the discardedTxs
// array // array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce) l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -287,18 +294,31 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i]) len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil { if err != nil {
log.Debug(err) log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error()) l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s",
err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
if accAuth != nil && l1CoordinatorTx != nil { if l1CoordinatorTx != nil {
// If ToEthAddr == 0xff.. this means that we
// are handling a TransferToBJJ, which doesn't
// require an authorization because it doesn't
// contain a valid ethereum address.
// Otherwise only create the account if we have
// the corresponding authorization
if validL2Tx.ToEthAddr == common.FFAddr {
accAuths = append(accAuths, common.EmptyEthSignature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
} else if accAuth != nil {
accAuths = append(accAuths, accAuth.Signature) accAuths = append(accAuths, accAuth.Signature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx) l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++ positionL1++
} }
}
if validL2Tx != nil { if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx) validTxs = append(validTxs, *validL2Tx)
} }
@@ -310,8 +330,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
"ToIdx", l2Txs[i].ToIdx) "ToIdx", l2Txs[i].ToIdx)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d", l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+
l2Txs[i].ToIdx) "ToIdx: %d", l2Txs[i].ToIdx)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -323,7 +343,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s", l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+
"does not correspond to the Account.EthAddr. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -337,7 +359,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s", l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+
"does not correspond to the Account.BJJ. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -411,7 +435,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
log.Error(err) log.Error(err)
// Discard L2Tx, and update Info parameter of the tx, // Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array // and add it to the discardedTxs array
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error()) selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i]) discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
continue continue
} }
@@ -467,8 +491,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
var l1CoordinatorTx *common.L1Tx var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth var accAuth *common.AccountCreationAuth
if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) && if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
// case: ToEthAddr != 0x00 neither 0xff // case: ToEthAddr != 0x00 neither 0xff
if l2Tx.ToBJJ != common.EmptyBJJComp { if l2Tx.ToBJJ != common.EmptyBJJComp {
// case: ToBJJ!=0: // case: ToBJJ!=0:
@@ -524,8 +547,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
DepositAmount: big.NewInt(0), DepositAmount: big.NewInt(0),
Type: common.TxTypeCreateAccountDeposit, Type: common.TxTypeCreateAccountDeposit,
} }
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) && } else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp {
l2Tx.ToBJJ != common.EmptyBJJComp {
// if idx exist for EthAddr&BJJ use it // if idx exist for EthAddr&BJJ use it
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ, _, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
l2Tx.TokenID) l2Tx.TokenID)
@@ -551,7 +573,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
} }
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
// L2Tx discarded // L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx")) return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
} }
return &l2Tx, l1CoordinatorTx, accAuth, nil return &l2Tx, l1CoordinatorTx, accAuth, nil
@@ -560,7 +583,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ { for i := 0; i < len(l1CoordinatorTxs); i++ {
if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) && if l1CoordinatorTxs[i].FromEthAddr == addr &&
l1CoordinatorTxs[i].TokenID == tokenID && l1CoordinatorTxs[i].TokenID == tokenID &&
l1CoordinatorTxs[i].FromBJJ == bjj { l1CoordinatorTxs[i].FromBJJ == bjj {
return true return true

View File

@@ -48,7 +48,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil, AccountCreationAuth: nil,
} }
fmt.Printf("%v", coordAccount) // fmt.Printf("%v\n", coordAccount)
auth := common.AccountCreationAuth{ auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr, EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
@@ -424,9 +424,9 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err) require.NoError(t, err)
expectedTxID0 := "0x0248bae02b5c8c3847d312bfac3a33ae790616e888f2f711f22aeaff007cde92c2" // 1st TransferToEthAddr expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" // 1st TransferToEthAddr
expectedTxID1 := "0x0249af018311a393c337ab9174ca2466cba489e49942b4ca4e5c530903671c4aef" // 1st Exit expectedTxID1 := "0x0200b18773dcf56f770d65870fb02041cb59a088fd35b7c3f3df69f8a250b99a42" // 1st Exit
expectedTxID2 := "0x0228b93a261a0cdc62f35588c03bd179d31a0807c28afffdb6a7aaf0c4f017e4cf" // 2nd TransferToEthAddr expectedTxID2 := "0x029720ff506153f970f120ac638cd7ee759eeff2c2012e7634a78e4fdc05c04a90" // 2nd TransferToEthAddr
// batch2 // batch2
// prepare the PoolL2Txs // prepare the PoolL2Txs
@@ -497,3 +497,134 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch()) err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err) require.NoError(t, err)
} }
func TestTransferToBjj(t *testing.T) {
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
CreateAccountDeposit(1) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
addTokens(t, tc, txsel.l2db.DB())
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 20,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to create some accounts with positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx.
batchPoolL2 := `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// Now the BJJ-only account for B is already created, so the transfer
// happens without an L1CoordTx that creates the user account.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx. Since it's a
// transfer of a token for which the coordinator doesn't have a fee
// account, another L1CoordTx will be created for the coordinator to
// receive the fees.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(1) B-A: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
// to receive the fees by the coordinator and another one for the
// recipient of the l2tx
assert.Equal(t, 2, len(oL1CoordTxs))
// [0] Coordinator account cration for token 1
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
// [1] User A BJJ-only account creation for token 1
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}