Browse Source

Add HashGlobalInputs for ZKInputs

Add HashGlobalInputs for ZKInputs compatible with js & circom circuits version.

Compatible with hermeznetwork/commonjs at version: c6a1448db5
(c6a1448db5)
feature/sql-semaphore1
arnaucube 3 years ago
parent
commit
ce772b1d19
15 changed files with 501 additions and 52 deletions
  1. +8
    -1
      batchbuilder/batchbuilder.go
  2. +2
    -0
      common/account.go
  3. +5
    -3
      common/l1tx.go
  4. +1
    -1
      common/l2tx.go
  5. +17
    -0
      common/utils.go
  6. +21
    -0
      common/utils_test.go
  7. +122
    -5
      common/zk.go
  8. +1
    -1
      common/zk_test.go
  9. +38
    -10
      db/statedb/txprocessors.go
  10. +259
    -24
      db/statedb/txprocessors_test.go
  11. +1
    -1
      eth/rollup.go
  12. +8
    -1
      synchronizer/synchronizer.go
  13. +2
    -2
      test/til/README.md
  14. +9
    -2
      txselector/txselector.go
  15. +7
    -1
      txselector/txselector_test.go

+ 8
- 1
batchbuilder/batchbuilder.go

@ -52,7 +52,14 @@ func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) e
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
ptOut, err := bb.localStateDB.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
//nolint:gomnd
ptc := statedb.ProcessTxsConfig{ // TODO TMP
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 64,
}
ptOut, err := bb.localStateDB.ProcessTxs(ptc, coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
if err != nil {
return nil, err
}

+ 2
- 0
common/account.go

@ -125,11 +125,13 @@ type Account struct {
func (a *Account) String() string {
buf := bytes.NewBufferString("")
fmt.Fprintf(buf, "Idx: %v, ", a.Idx)
fmt.Fprintf(buf, "PublicKey: %s..., ", a.PublicKey.String()[:10])
fmt.Fprintf(buf, "EthAddr: %s..., ", a.EthAddr.String()[:10])
fmt.Fprintf(buf, "TokenID: %v, ", a.TokenID)
fmt.Fprintf(buf, "Nonce: %d, ", a.Nonce)
fmt.Fprintf(buf, "Balance: %s, ", a.Balance.String())
fmt.Fprintf(buf, "BatchNum: %v, ", a.BatchNum)
return buf.String()
}

+ 5
- 3
common/l1tx.go

@ -153,9 +153,11 @@ func (tx L1Tx) Tx() Tx {
func (tx *L1Tx) BytesGeneric() ([]byte, error) {
var b [L1UserTxBytesLen]byte
copy(b[0:20], tx.FromEthAddr.Bytes())
pkCompL := tx.FromBJJ.Compress()
pkCompB := SwapEndianness(pkCompL[:])
copy(b[20:52], pkCompB[:])
if tx.FromBJJ != nil {
pkCompL := tx.FromBJJ.Compress()
pkCompB := SwapEndianness(pkCompL[:])
copy(b[20:52], pkCompB[:])
}
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, err

+ 1
- 1
common/l2tx.go

@ -104,7 +104,7 @@ func L2TxsToPoolL2Txs(txs []L2Tx) []PoolL2Tx {
}
// Bytes encodes a L2Tx into []byte
func (tx *L2Tx) Bytes(nLevels int) ([]byte, error) {
func (tx L2Tx) Bytes(nLevels uint32) ([]byte, error) {
idxLen := nLevels / 8 //nolint:gomnd
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd

+ 17
- 0
common/utils.go

@ -1,9 +1,11 @@
package common
import (
"encoding/hex"
"math/big"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/iden3/go-iden3-crypto/babyjub"
)
// SwapEndianness swaps the order of the bytes in the slice.
@ -19,3 +21,18 @@ func SwapEndianness(b []byte) []byte {
func EthAddrToBigInt(a ethCommon.Address) *big.Int {
return new(big.Int).SetBytes(a.Bytes())
}
// BJJFromStringWithChecksum parses a hex string in Hermez format (which has
// the Hermez checksum at the last byte, and is encoded in BigEndian) and
// returns the corresponding *babyjub.PublicKey. This method is not part of the
// spec, is used for importing javascript test vectors data.
func BJJFromStringWithChecksum(s string) (*babyjub.PublicKey, error) {
b, err := hex.DecodeString(s)
if err != nil {
return nil, err
}
pkBytes := SwapEndianness(b)
var pkComp babyjub.PublicKeyComp
copy(pkComp[:], pkBytes[:])
return pkComp.Decompress()
}

+ 21
- 0
common/utils_test.go

@ -0,0 +1,21 @@
package common
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBJJFromStringWithChecksum(t *testing.T) {
s := "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7"
pk, err := BJJFromStringWithChecksum(s)
assert.Nil(t, err)
sBytes, err := hex.DecodeString(s)
assert.Nil(t, err)
assert.Equal(t, hex.EncodeToString(SwapEndianness(sBytes)), pk.Compress().String())
// expected values computed with js implementation
assert.Equal(t, "2492816973395423007340226948038371729989170225696553239457870892535792679622", pk.X.String())
assert.Equal(t, "15238403086306505038849621710779816852318505119327426213168494964113886299863", pk.Y.String())
}

+ 122
- 5
common/zk.go

@ -3,10 +3,15 @@
package common
import (
"crypto/sha256"
"encoding/binary"
"encoding/json"
"fmt"
"math/big"
"github.com/hermeznetwork/hermez-node/log"
cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
"github.com/iden3/go-merkletree"
"github.com/mitchellh/mapstructure"
)
@ -21,6 +26,8 @@ type ZKMetadata struct {
MaxLevels uint32
// absolute maximum of L1 transaction allowed
MaxL1Tx uint32
// total txs allowed
MaxTx uint32
// Maximum number of Idxs where Fees can be send in a batch (currently
// is constant for all circuits: 64)
MaxFeeIdxs uint32
@ -28,6 +35,10 @@ type ZKMetadata struct {
L1TxsData [][]byte
L2TxsData [][]byte
ChainID uint16
NewLastIdxRaw Idx
NewStateRootRaw *merkletree.Hash
NewExitRootRaw *merkletree.Hash
}
// ZKInputs represents the inputs that will be used to generate the zkSNARK proof
@ -250,11 +261,14 @@ func (z ZKInputs) MarshalJSON() ([]byte, error) {
}
// NewZKInputs returns a pointer to an initialized struct of ZKInputs
func NewZKInputs(nTx, maxFeeIdxs, nLevels int) *ZKInputs {
func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32) *ZKInputs {
zki := &ZKInputs{}
zki.Metadata.NTx = uint32(nTx)
zki.Metadata.MaxFeeIdxs = uint32(maxFeeIdxs)
zki.Metadata.NLevels = uint32(nLevels)
zki.Metadata.NTx = nTx
zki.Metadata.MaxFeeIdxs = maxFeeIdxs
zki.Metadata.NLevels = nLevels
zki.Metadata.MaxLevels = uint32(48) //nolint:gomnd
zki.Metadata.MaxL1Tx = maxL1Tx
zki.Metadata.MaxTx = maxTx
// General
zki.OldLastIdx = big.NewInt(0)
@ -359,10 +373,113 @@ func NewZKInputs(nTx, maxFeeIdxs, nLevels int) *ZKInputs {
// set all the elements, and if a transaction does not use a parameter, can be
// leaved as it is in the ZKInputs, as will be 0, so later when using the
// ZKInputs to generate the zkSnark proof there is no 'nil'/'null' values.
func newSlice(n int) []*big.Int {
func newSlice(n uint32) []*big.Int {
s := make([]*big.Int, n)
for i := 0; i < len(s); i++ {
s[i] = big.NewInt(0)
}
return s
}
// HashGlobalData returns the HashGlobalData
func (z ZKInputs) HashGlobalData() (*big.Int, error) {
b, err := z.ToHashGlobalData()
if err != nil {
return nil, err
}
h := sha256.New()
_, err = h.Write(b)
if err != nil {
return nil, err
}
r := new(big.Int).SetBytes(h.Sum(nil))
v := r.Mod(r, cryptoConstants.Q)
return v, nil
}
// ToHashGlobalData returns the data to be hashed in the method HashGlobalData
func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
var b []byte
bytesMaxLevels := int(z.Metadata.MaxLevels / 8) //nolint:gomnd
// [MAX_NLEVELS bits] oldLastIdx
oldLastIdx := make([]byte, bytesMaxLevels)
copy(oldLastIdx, z.OldLastIdx.Bytes())
b = append(b, SwapEndianness(oldLastIdx)...)
// [MAX_NLEVELS bits] newLastIdx
newLastIdx := make([]byte, bytesMaxLevels)
newLastIdxBytes, err := z.Metadata.NewLastIdxRaw.Bytes()
if err != nil {
return nil, err
}
copy(newLastIdx, newLastIdxBytes[len(newLastIdxBytes)-bytesMaxLevels:])
b = append(b, newLastIdx...)
// [256 bits] oldStRoot
oldStateRoot := make([]byte, 32)
copy(oldStateRoot, z.OldStateRoot.Bytes())
b = append(b, oldStateRoot...)
// [256 bits] newStateRoot
newStateRoot := make([]byte, 32)
copy(newStateRoot, z.Metadata.NewStateRootRaw.Bytes())
b = append(b, newStateRoot...)
// [256 bits] newExitRoot
newExitRoot := make([]byte, 32)
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 480)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ {
dataLen := int(l1TxDataLen) / 8 //nolint:gomnd
pos0 := i * dataLen
pos1 := i*dataLen + dataLen
copy(l1TxsData[pos0:pos1], z.Metadata.L1TxsData[i])
}
b = append(b, l1TxsData...)
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ {
l2TxsData = append(l2TxsData, z.Metadata.L2TxsData[i]...)
}
if len(l2TxsData) > int(expectedL2TxsDataLen) {
return nil, fmt.Errorf("len(l2TxsData): %d, expected: %d", len(l2TxsData), expectedL2TxsDataLen)
}
l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
b = append(b, l2TxsPadding...)
b = append(b, l2TxsData...)
// [NLevels * MAX_TOKENS_FEE bits] feeTxsData
for i := 0; i < len(z.FeeIdxs); i++ {
var r []byte
padding := make([]byte, bytesMaxLevels/4) //nolint:gomnd
r = append(r, padding...)
feeIdx := make([]byte, bytesMaxLevels/2) //nolint:gomnd
feeIdxBytes := z.FeeIdxs[i].Bytes()
copy(feeIdx[len(feeIdx)-len(feeIdxBytes):], feeIdxBytes[:])
r = append(r, feeIdx...)
b = append(b, r...)
}
// [16 bits] chainID
var chainID [2]byte
binary.BigEndian.PutUint16(chainID[:], z.Metadata.ChainID)
b = append(b, chainID[:]...)
return b, nil
}

+ 1
- 1
common/zk_test.go

@ -8,7 +8,7 @@ import (
)
func TestZKInputs(t *testing.T) {
zki := NewZKInputs(100, 24, 32)
zki := NewZKInputs(100, 16, 512, 24, 32)
_, err := json.Marshal(zki)
require.Nil(t, err)
// fmt.Println(string(s))

+ 38
- 10
db/statedb/txprocessors.go

@ -40,6 +40,14 @@ type ProcessTxOutput struct {
CollectedFees map[common.TokenID]*big.Int
}
// ProcessTxsConfig contains the config for ProcessTxs
type ProcessTxsConfig struct {
NLevels uint32
MaxFeeTx uint32
MaxTx uint32
MaxL1Tx uint32
}
// ProcessTxs process the given L1Txs & L2Txs applying the needed updates to
// the StateDB depending on the transaction Type. If StateDB
// type==TypeBatchBuilder, returns the common.ZKInputs to generate the
@ -49,7 +57,7 @@ type ProcessTxOutput struct {
// the HistoryDB, and adds Nonce & TokenID to the L2Txs.
// And if TypeSynchronizer returns an array of common.Account with all the
// created accounts.
func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (ptOut *ProcessTxOutput, err error) {
func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) (ptOut *ProcessTxOutput, err error) {
defer func() {
if err == nil {
err = s.MakeCheckpoint()
@ -80,9 +88,8 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
exits := make([]processedExit, nTx)
if s.typ == TypeBatchBuilder {
maxFeeTx := 64 // TODO this value will be a parameter
s.zki = common.NewZKInputs(nTx, maxFeeTx, s.mt.MaxLevels())
s.zki.OldLastIdx = (s.idx - 1).BigInt()
s.zki = common.NewZKInputs(uint32(nTx), ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels)
s.zki.OldLastIdx = s.idx.BigInt()
s.zki.OldStateRoot = s.mt.Root().BigInt()
}
@ -197,6 +204,13 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
}
s.i++
}
if s.zki != nil {
l2TxData, err := l2txs[i].L2Tx().Bytes(s.zki.Metadata.NLevels)
if err != nil {
return nil, err
}
s.zki.Metadata.L2TxsData = append(s.zki.Metadata.L2TxsData, l2TxData)
}
}
// distribute the AccumulatedFees from the processed L2Txs into the
@ -302,6 +316,8 @@ func (s *StateDB) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinatortxs
return nil, err
}
s.zki.FeePlanTokens = tokenIDs
s.zki.Metadata.NewStateRootRaw = s.mt.Root()
s.zki.Metadata.NewExitRootRaw = exitTree.Root()
// s.zki.ISInitStateRootFee = s.mt.Root().BigInt()
@ -569,6 +585,8 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error {
}
s.zki.OldKey1[s.i] = p.OldKey.BigInt()
s.zki.OldValue1[s.i] = p.OldValue.BigInt()
s.zki.Metadata.NewLastIdxRaw = s.idx + 1
}
s.idx = s.idx + 1
@ -657,12 +675,6 @@ func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, coll
log.Error(err)
return err
}
accReceiver, err := s.GetAccount(auxToIdx)
if err != nil {
log.Error(err)
return err
}
if !tx.IsL1 {
// increment nonce
accSender.Nonce++
@ -692,6 +704,20 @@ func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, coll
accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.Amount)
}
var accReceiver *common.Account
if tx.FromIdx == auxToIdx {
// if Sender is the Receiver, reuse 'accSender' pointer,
// because in the DB the account for 'auxToIdx' won't be
// updated yet
accReceiver = accSender
} else {
accReceiver, err = s.GetAccount(auxToIdx)
if err != nil {
log.Error(err)
return err
}
}
// add amount-feeAmount to the receiver
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
@ -772,6 +798,8 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error {
}
s.zki.OldKey1[s.i] = p.OldKey.BigInt()
s.zki.OldValue1[s.i] = p.OldValue.BigInt()
s.zki.Metadata.NewLastIdxRaw = s.idx + 1
}
// update receiver account in localStateDB

+ 259
- 24
db/statedb/txprocessors_test.go

@ -1,12 +1,15 @@
package statedb
import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"os"
"testing"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test/til"
@ -36,28 +39,34 @@ func TestProcessTxsBalances(t *testing.T) {
// Coordinator Idx where to send the fees
coordIdxs := []common.Idx{256, 257}
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
}
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
_, err = sdb.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
_, err = sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err)
log.Debug("block:0 batch:1")
l1UserTxs := []common.L1Tx{}
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
log.Debug("block:0 batch:2")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "A", 0, "500")
log.Debug("block:0 batch:3")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[3].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[3].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[3].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "A", 0, "500")
checkBalance(t, tc, sdb, "A", 1, "500")
@ -65,7 +74,7 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:0 batch:4")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[4].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[4].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[4].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "A", 0, "500")
checkBalance(t, tc, sdb, "A", 1, "500")
@ -73,7 +82,7 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:0 batch:5")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[5].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[5].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[5].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "A", 0, "600")
checkBalance(t, tc, sdb, "A", 1, "500")
@ -82,7 +91,7 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:0 batch:6")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[6].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[6].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[6].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "Coord", 0, "10")
checkBalance(t, tc, sdb, "Coord", 1, "20")
@ -96,7 +105,7 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:0 batch:7")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[7].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[7].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[7].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "Coord", 0, "35")
checkBalance(t, tc, sdb, "Coord", 1, "30")
@ -111,7 +120,7 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:1 batch:0")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "Coord", 0, "75")
checkBalance(t, tc, sdb, "Coord", 1, "30")
@ -126,14 +135,14 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:1 batch:1")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
// use Set of PoolL2 txs
poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow0)
assert.Nil(t, err)
_, err = sdb.ProcessTxs(coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "Coord", 0, "105")
checkBalance(t, tc, sdb, "Coord", 1, "40")
@ -175,17 +184,24 @@ func TestProcessTxsSynchronizer(t *testing.T) {
// Idx of user 'A'
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
}
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
// to create the Coordinator accounts to receive the fees
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
ptOut, err := sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err)
assert.Equal(t, 4, len(ptOut.CreatedAccounts))
assert.Equal(t, 0, len(ptOut.CollectedFees))
log.Debug("block:0 batch:1")
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].Rollup.L1UserTxs,
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[0].Rollup.L1UserTxs,
blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
assert.Equal(t, 0, len(ptOut.ExitInfos))
@ -201,7 +217,7 @@ func TestProcessTxsSynchronizer(t *testing.T) {
log.Debug("block:0 batch:2")
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
@ -221,7 +237,7 @@ func TestProcessTxsSynchronizer(t *testing.T) {
assert.Equal(t, common.Nonce(0), l2Txs[1].Nonce)
assert.Equal(t, common.Nonce(0), l2Txs[2].Nonce)
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
// after processing expect l2Txs[0:2].Nonce!=0 and has expected value
@ -242,7 +258,7 @@ func TestProcessTxsSynchronizer(t *testing.T) {
log.Debug("block:1 batch:1")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[1].Rollup.L1UserTxs,
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[1].Rollup.L1UserTxs,
blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
@ -287,17 +303,24 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
// Idx of user 'A'
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 32,
}
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
// to create the Coordinator accounts to receive the fees
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
ptOut, err := sdb.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
ptOut, err := sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err)
// expect 0 at CreatedAccount, as is only computed when StateDB.Type==TypeSynchronizer
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
log.Debug("block:0 batch:1")
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, blocks[0].Rollup.L1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[0].Rollup.L1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
@ -307,7 +330,7 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
log.Debug("block:0 batch:2")
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
ptOut, err = sdb.ProcessTxs(coordIdxs, nil, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
assert.Equal(t, 0, len(ptOut.ExitInfos))
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
@ -317,7 +340,7 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
log.Debug("block:1 batch:0")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, nil, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
acc, err = sdb.GetAccount(idxA1)
require.Nil(t, err)
@ -325,7 +348,7 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
log.Debug("block:1 batch:1")
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
_, err = sdb.ProcessTxs(coordIdxs, blocks[1].Rollup.L1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
_, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[1].Rollup.L1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.Nil(t, err)
acc, err = sdb.GetAccount(idxA1)
assert.Nil(t, err)
@ -378,24 +401,236 @@ func TestZKInputsGeneration(t *testing.T) {
// Coordinator Idx where to send the fees
coordIdxs := []common.Idx{256}
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 8,
MaxTx: 32,
MaxL1Tx: 16,
}
log.Debug("block:0 batch:0, only L1UserTx")
_, err = sdb.ProcessTxs(nil, blocks[0].Rollup.L1UserTxs, nil, nil)
_, err = sdb.ProcessTxs(ptc, nil, blocks[0].Rollup.L1UserTxs, nil, nil)
require.Nil(t, err)
log.Debug("block:0 batch:1, only L1CoordinatorTxs")
_, err = sdb.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, nil)
_, err = sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, nil)
require.Nil(t, err)
log.Debug("block:0 batch:2, only L2Txs")
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
ptOut, err := sdb.ProcessTxs(coordIdxs, nil, nil, l2Txs)
ptOut, err := sdb.ProcessTxs(ptc, coordIdxs, nil, nil, l2Txs)
require.Nil(t, err)
checkBalance(t, tc, sdb, "A", 1, "2")
s, err := json.Marshal(ptOut.ZKInputs)
require.Nil(t, err)
debug := false
debug = true
if debug {
fmt.Println(string(s))
}
}
func TestProcessTxsRootTestVectors(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
assert.Nil(t, err)
// same values than in the js test
bjj0, err := common.BJJFromStringWithChecksum("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7")
assert.Nil(t, err)
l1Txs := []common.L1Tx{
{
FromIdx: 0,
LoadAmount: big.NewInt(16000000),
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: bjj0,
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
ToIdx: 0,
Type: common.TxTypeCreateAccountDeposit,
},
}
l2Txs := []common.PoolL2Tx{
{
FromIdx: 256,
ToIdx: 256,
TokenID: 1,
Amount: big.NewInt(1000),
Nonce: 0,
Fee: 126,
Type: common.TxTypeTransfer,
},
}
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 8,
MaxTx: 32,
MaxL1Tx: 16,
}
_, err = sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
require.Nil(t, err)
assert.Equal(t, "9827704113668630072730115158977131501210702363656902211840117643154933433410", sdb.mt.Root().BigInt().String())
}
func TestZKInputsHashTestVector0(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
assert.Nil(t, err)
// same values than in the js test
bjj0, err := common.BJJFromStringWithChecksum("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7")
assert.Nil(t, err)
l1Txs := []common.L1Tx{
{
FromIdx: 0,
// LoadAmount: big.NewInt(10400),
LoadAmount: big.NewInt(16000000),
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: bjj0,
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
ToIdx: 0,
Type: common.TxTypeCreateAccountDeposit,
},
}
l2Txs := []common.PoolL2Tx{
{
FromIdx: 256,
ToIdx: 256,
TokenID: 1,
Amount: big.NewInt(1000),
Nonce: 0,
Fee: 126,
Type: common.TxTypeTransfer,
},
}
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 8,
MaxTx: 32,
MaxL1Tx: 16,
}
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
require.Nil(t, err)
// check expected account keys values from tx inputs
acc, err := sdb.GetAccount(common.Idx(256))
require.Nil(t, err)
assert.Equal(t, "d746824f7d0ac5044a573f51b278acb56d823bec39551d1d7bf7378b68a1b021", acc.PublicKey.Compress().String())
assert.Equal(t, "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf", acc.EthAddr.Hex())
// check that there no exist more accounts
_, err = sdb.GetAccount(common.Idx(257))
require.NotNil(t, err)
ptOut.ZKInputs.FeeIdxs[0] = common.Idx(256).BigInt()
toHash, err := ptOut.ZKInputs.ToHashGlobalData()
assert.Nil(t, err)
// value from js test vector
expectedToHash := "0000000000ff000000000100000000000000000000000000000000000000000000000000000000000000000015ba488d749f6b891d29d0bf3a72481ec812e4d4ecef2bf7a3fc64f3c010444200000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010003e87e00000100000000000000000000000000000000000000000000000000000000000000"
// checks are splitted to find the difference easier
assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000])
assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000])
assert.Equal(t, expectedToHash[2000:], hex.EncodeToString(toHash)[2000:])
h, err := ptOut.ZKInputs.HashGlobalData()
require.Nil(t, err)
// value from js test vector
assert.Equal(t, "80757288244566854497474223360206077562032050734432637237701187686677568506", h.String())
}
func TestZKInputsHashTestVector1(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
assert.Nil(t, err)
// same values than in the js test
bjj0, err := common.BJJFromStringWithChecksum("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7")
assert.Nil(t, err)
bjj1, err := common.BJJFromStringWithChecksum("093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d")
assert.Nil(t, err)
l1Txs := []common.L1Tx{
{
FromIdx: 0,
// LoadAmount: big.NewInt(10400),
LoadAmount: big.NewInt(16000000),
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: bjj0,
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
ToIdx: 0,
Type: common.TxTypeCreateAccountDeposit,
},
{
FromIdx: 0,
LoadAmount: big.NewInt(16000000),
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: bjj1,
FromEthAddr: ethCommon.HexToAddress("0x2b5ad5c4795c026514f8317c7a215e218dccd6cf"),
ToIdx: 0,
Type: common.TxTypeCreateAccountDeposit,
},
}
l2Txs := []common.PoolL2Tx{
{
FromIdx: 257,
ToIdx: 256,
TokenID: 1,
Amount: big.NewInt(1000),
Nonce: 0,
Fee: 137,
Type: common.TxTypeTransfer,
},
}
ptc := ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 8,
MaxTx: 32,
MaxL1Tx: 16,
}
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
require.Nil(t, err)
// check expected account keys values from tx inputs
acc, err := sdb.GetAccount(common.Idx(256))
require.Nil(t, err)
assert.Equal(t, "d746824f7d0ac5044a573f51b278acb56d823bec39551d1d7bf7378b68a1b021", acc.PublicKey.Compress().String())
assert.Equal(t, "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf", acc.EthAddr.Hex())
acc, err = sdb.GetAccount(common.Idx(257))
require.Nil(t, err)
assert.Equal(t, "4d05c307400c65795f02db96b1b81c60386fd53e947d9d3f749f3d99b1853909", acc.PublicKey.Compress().String())
assert.Equal(t, "0x2B5AD5c4795c026514f8317c7a215E218DcCD6cF", acc.EthAddr.Hex())
// check that there no exist more accounts
_, err = sdb.GetAccount(common.Idx(258))
require.NotNil(t, err)
ptOut.ZKInputs.FeeIdxs[0] = common.Idx(257).BigInt()
toHash, err := ptOut.ZKInputs.ToHashGlobalData()
assert.Nil(t, err)
// value from js test vector
expectedToHash := "0000000000ff0000000001010000000000000000000000000000000000000000000000000000000000000000304a3f3aef4f416cca887aab7265227449077627138345c2eb25bf8ff946b09500000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000010003e88900000101000000000000000000000000000000000000000000000000000000000000"
// checks are splitted to find the difference easier
assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000])
assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000])
assert.Equal(t, expectedToHash[2000:], hex.EncodeToString(toHash)[2000:])
h, err := ptOut.ZKInputs.HashGlobalData()
require.Nil(t, err)
// value from js test vector
assert.Equal(t, "10900521462378877053056992084240080637954406133884857263674494661625916419481", h.String())
}

+ 1
- 1
eth/rollup.go

@ -245,7 +245,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs) (tx *types.T
var l2DataBytes []byte
for i := 0; i < len(args.L2TxsData); i++ {
l2 := args.L2TxsData[i]
bytesl2, err := l2.Bytes(int(nLevels))
bytesl2, err := l2.Bytes(uint32(nLevels))
if err != nil {
return nil, err
}

+ 8
- 1
synchronizer/synchronizer.go

@ -616,7 +616,14 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
poolL2Txs := common.L2TxsToPoolL2Txs(forgeBatchArgs.L2TxsData) // NOTE: This is a big ugly, find a better way
// ProcessTxs updates poolL2Txs adding: Nonce (and also TokenID, but we don't use it).
processTxsOut, err := s.stateDB.ProcessTxs(forgeBatchArgs.FeeIdxCoordinator, l1UserTxs,
//nolint:gomnd
ptc := statedb.ProcessTxsConfig{ // TODO TMP
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 64,
}
processTxsOut, err := s.stateDB.ProcessTxs(ptc, forgeBatchArgs.FeeIdxCoordinator, l1UserTxs,
batchData.L1CoordinatorTxs, poolL2Txs)
if err != nil {
return nil, err

+ 2
- 2
test/til/README.md

@ -90,11 +90,11 @@ PoolExit(1) A: 3 (1)
tc := til.NewContext(eth.RollupConstMaxL1UserTx)
// generate Blockchain blocks data from the common.SetBlockcahin0 instructions set
blocks, err = tc.GenerateBlocks(common.SetBlockchain0)
blocks, err = tc.GenerateBlocks(common.SetBlockchainMinimumFlow0)
assert.Nil(t, err)
// generate PoolL2 transactions data from the common.SetPool0 instructions set
poolL2Txs, err = tc.GenerateBlocks(common.SetPool0)
poolL2Txs, err = tc.GeneratePoolL2Txs(common.SetPoolL2MinimumFlow0)
assert.Nil(t, err)
```

+ 9
- 2
txselector/txselector.go

@ -211,11 +211,18 @@ func (txsel *TxSelector) GetL1L2TxSelection(coordIdxs []common.Idx, batchNum com
}
// get most profitable L2-tx
maxL2Txs := txsel.MaxTxs - uint64(len(l1CoordinatorTxs)) // - len(l1UserTxs)
maxL2Txs := txsel.MaxTxs - uint64(len(l1CoordinatorTxs)) // - len(l1UserTxs) // TODO if there are L1UserTxs take them in to account
l2Txs := txsel.getL2Profitable(validTxs, maxL2Txs)
//nolint:gomnd
ptc := statedb.ProcessTxsConfig{ // TODO TMP
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 64,
}
// process the txs in the local AccountsDB
_, err = txsel.localAccountsDB.ProcessTxs(coordIdxs, l1Txs, l1CoordinatorTxs, l2Txs)
_, err = txsel.localAccountsDB.ProcessTxs(ptc, coordIdxs, l1Txs, l1CoordinatorTxs, l2Txs)
if err != nil {
return nil, nil, nil, err
}

+ 7
- 1
txselector/txselector_test.go

@ -83,9 +83,15 @@ func TestGetL2TxSelection(t *testing.T) {
}
addTokens(t, tokens, txsel.l2db.DB())
ptc := statedb.ProcessTxsConfig{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 64,
}
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
// to create the Coordinator accounts to receive the fees
_, err = txsel.localAccountsDB.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
_, err = txsel.localAccountsDB.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
require.Nil(t, err)
// add the 1st batch of transactions to the TxSelector

Loading…
Cancel
Save