From dde92504293d977b08861d086bcb1651ca0a21fb Mon Sep 17 00:00:00 2001 From: arnaucube Date: Wed, 25 Nov 2020 20:07:37 +0100 Subject: [PATCH] Update Common & StateDB & ZKInputs to last protocol version - Add InvalidData flag to L1Tx - Add BytesDataAvailability to L1Tx - Update ZKInputs & HashGlobalInputs to last spec of the protocol (massive migrations) - TxProcessor check correctness of L1Txs Compatible with hermeznetwork/commonjs v0.0.4 (https://github.com/hermeznetwork/commonjs/commit/c345239bba7171eb5089fea5ef76d8e741c0ef1f) --- common/batch.go | 5 ++ common/l1tx.go | 46 ++++++++-- common/l1tx_test.go | 22 +++++ common/l2tx.go | 4 +- common/l2tx_test.go | 2 +- common/tx.go | 1 - common/zk.go | 34 ++++++-- common/zk_test.go | 3 +- db/historydb/historydb.go | 53 +++++++----- db/historydb/views.go | 36 ++++---- db/migrations/0001.sql | 2 + db/statedb/txprocessors.go | 139 ++++++++++++++++++++++++++---- db/statedb/txprocessors_test.go | 136 +++++++++++++++++++++++++++-- eth/rollup.go | 2 +- synchronizer/synchronizer_test.go | 11 ++- test/til/sets.go | 20 +++++ test/til/txs.go | 16 ++-- 17 files changed, 442 insertions(+), 90 deletions(-) diff --git a/common/batch.go b/common/batch.go index 323cb9e..a75ae88 100644 --- a/common/batch.go +++ b/common/batch.go @@ -36,6 +36,11 @@ func (bn BatchNum) Bytes() []byte { return batchNumBytes[:] } +// BigInt returns a *big.Int representing the BatchNum +func (bn BatchNum) BigInt() *big.Int { + return big.NewInt(int64(bn)) +} + // BatchNumFromBytes returns BatchNum from a []byte func BatchNumFromBytes(b []byte) (BatchNum, error) { if len(b) != batchNumBytesLen { diff --git a/common/l1tx.go b/common/l1tx.go index 13ff925..3f9e003 100644 --- a/common/l1tx.go +++ b/common/l1tx.go @@ -37,10 +37,14 @@ type L1Tx struct { ToIdx Idx `meddler:"to_idx"` // ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer TokenID TokenID `meddler:"token_id"` Amount *big.Int `meddler:"amount,bigint"` - LoadAmount *big.Int `meddler:"load_amount,bigint"` - EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue - Type TxType `meddler:"type"` - BatchNum *BatchNum `meddler:"batch_num"` + // EffectiveAmount only applies to L1UserTx. + EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"` + LoadAmount *big.Int `meddler:"load_amount,bigint"` + // EffectiveLoadAmount only applies to L1UserTx. + EffectiveLoadAmount *big.Int `meddler:"effective_load_amount,bigintnull"` + EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue + Type TxType `meddler:"type"` + BatchNum *BatchNum `meddler:"batch_num"` } // NewL1Tx returns the given L1Tx with the TxId & Type parameters calculated @@ -117,7 +121,7 @@ func (tx *L1Tx) CalcTxID() (*TxID, error) { // Tx returns a *Tx from the L1Tx func (tx L1Tx) Tx() Tx { - f := new(big.Float).SetInt(tx.Amount) + f := new(big.Float).SetInt(tx.EffectiveAmount) amountFloat, _ := f.Float64() userOrigin := new(bool) *userOrigin = tx.UserOrigin @@ -128,14 +132,14 @@ func (tx L1Tx) Tx() Tx { Position: tx.Position, FromIdx: tx.FromIdx, ToIdx: tx.ToIdx, - Amount: tx.Amount, + Amount: tx.EffectiveAmount, AmountFloat: amountFloat, TokenID: tx.TokenID, ToForgeL1TxsNum: tx.ToForgeL1TxsNum, UserOrigin: userOrigin, FromEthAddr: tx.FromEthAddr, FromBJJ: tx.FromBJJ, - LoadAmount: tx.LoadAmount, + LoadAmount: tx.EffectiveLoadAmount, EthBlockNum: tx.EthBlockNum, } if tx.LoadAmount != nil { @@ -183,6 +187,34 @@ func (tx L1Tx) TxCompressedData() (*big.Int, error) { return bi, nil } +// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability +func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { + idxLen := nLevels / 8 //nolint:gomnd + + b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd + + fromIdxBytes, err := tx.FromIdx.Bytes() + if err != nil { + return nil, err + } + copy(b[0:idxLen], fromIdxBytes[6-idxLen:]) + toIdxBytes, err := tx.ToIdx.Bytes() + if err != nil { + return nil, err + } + copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:]) + + if tx.EffectiveAmount != nil { + amountFloat16, err := NewFloat16(tx.EffectiveAmount) + if err != nil { + return nil, err + } + copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes()) + } + // fee = 0 (as is L1Tx) b[10:11] + return b[:], nil +} + // BytesGeneric returns the generic representation of a L1Tx. This method is // used to compute the []byte representation of a L1UserTx, and also to compute // the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method diff --git a/common/l1tx_test.go b/common/l1tx_test.go index 9dbee1f..ead8c7e 100644 --- a/common/l1tx_test.go +++ b/common/l1tx_test.go @@ -65,6 +65,28 @@ func TestL1TxCompressedData(t *testing.T) { assert.Equal(t, "050004000000000003000000000002000100000000", hex.EncodeToString(txCompressedData.Bytes())) } +func TestBytesDataAvailability(t *testing.T) { + tx := L1Tx{ + FromIdx: 2, + ToIdx: 3, + Amount: big.NewInt(4), + TokenID: 5, + } + txCompressedData, err := tx.BytesDataAvailability(32) + assert.Nil(t, err) + assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData)) + + tx = L1Tx{ + FromIdx: 2, + ToIdx: 3, + EffectiveAmount: big.NewInt(4), + TokenID: 5, + } + txCompressedData, err = tx.BytesDataAvailability(32) + assert.Nil(t, err) + assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData)) +} + func TestL1userTxByteParsers(t *testing.T) { var pkComp babyjub.PublicKeyComp pkCompL := []byte("0x56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c") diff --git a/common/l2tx.go b/common/l2tx.go index db5515d..a34c36e 100644 --- a/common/l2tx.go +++ b/common/l2tx.go @@ -103,8 +103,8 @@ func L2TxsToPoolL2Txs(txs []L2Tx) []PoolL2Tx { return r } -// Bytes encodes a L2Tx into []byte -func (tx L2Tx) Bytes(nLevels uint32) ([]byte, error) { +// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability +func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) { idxLen := nLevels / 8 //nolint:gomnd b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd diff --git a/common/l2tx_test.go b/common/l2tx_test.go index af406ef..4fdd04c 100644 --- a/common/l2tx_test.go +++ b/common/l2tx_test.go @@ -32,7 +32,7 @@ func TestL2TxByteParsers(t *testing.T) { } // Data from the compatibility test expected := "00000101000001002b16c9" - encodedData, err := l2Tx.Bytes(32) + encodedData, err := l2Tx.BytesDataAvailability(32) require.Nil(t, err) assert.Equal(t, expected, hex.EncodeToString(encodedData)) diff --git a/common/tx.go b/common/tx.go index cd0311e..e6e9d26 100644 --- a/common/tx.go +++ b/common/tx.go @@ -117,7 +117,6 @@ const ( ) // Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx & PoolL2Tx -// TODO: this should be changed for "mini Tx" type Tx struct { // Generic IsL1 bool `meddler:"is_l1"` diff --git a/common/zk.go b/common/zk.go index fe7f3c4..a183310 100644 --- a/common/zk.go +++ b/common/zk.go @@ -32,9 +32,10 @@ type ZKMetadata struct { // is constant for all circuits: 64) MaxFeeIdxs uint32 - L1TxsData [][]byte - L2TxsData [][]byte - ChainID uint16 + L1TxsData [][]byte + L1TxsDataAvailability [][]byte + L2TxsData [][]byte + ChainID uint16 NewLastIdxRaw Idx NewStateRootRaw *merkletree.Hash @@ -49,6 +50,8 @@ type ZKInputs struct { // General // + // CurrentNumBatch is the current batch number processed + CurrentNumBatch *big.Int `json:"currentNumBatch"` // uint32 // inputs for final `hashGlobalInputs` // OldLastIdx is the last index assigned to an account OldLastIdx *big.Int `json:"oldLastIdx"` // uint64 (max nLevels bits) @@ -72,6 +75,9 @@ type ZKInputs struct { TxCompressedData []*big.Int `json:"txCompressedData"` // big.Int (max 251 bits), len: [nTx] // TxCompressedDataV2, only used in L2Txs, in L1Txs is set to 0 TxCompressedDataV2 []*big.Int `json:"txCompressedDataV2"` // big.Int (max 193 bits), len: [nTx] + // MaxNumBatch is the maximum allowed batch number when the transaction + // can be processed + MaxNumBatch []*big.Int `json:"maxNumBatch"` // uint32 // FromIdx FromIdx []*big.Int `json:"fromIdx"` // uint64 (max nLevels bits), len: [nTx] @@ -266,7 +272,7 @@ func (z ZKInputs) MarshalJSON() ([]byte, error) { } // NewZKInputs returns a pointer to an initialized struct of ZKInputs -func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32) *ZKInputs { +func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32, currentNumBatch *big.Int) *ZKInputs { zki := &ZKInputs{} zki.Metadata.NTx = nTx zki.Metadata.MaxFeeIdxs = maxFeeIdxs @@ -276,15 +282,17 @@ func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32) *ZKInputs { zki.Metadata.MaxTx = maxTx // General + zki.CurrentNumBatch = currentNumBatch zki.OldLastIdx = big.NewInt(0) zki.OldStateRoot = big.NewInt(0) - zki.GlobalChainID = big.NewInt(0) + zki.GlobalChainID = big.NewInt(0) // TODO pass by parameter zki.FeeIdxs = newSlice(maxFeeIdxs) zki.FeePlanTokens = newSlice(maxFeeIdxs) // Txs zki.TxCompressedData = newSlice(nTx) zki.TxCompressedDataV2 = newSlice(nTx) + zki.MaxNumBatch = newSlice(nTx) zki.FromIdx = newSlice(nTx) zki.AuxFromIdx = newSlice(nTx) zki.ToIdx = newSlice(nTx) @@ -451,6 +459,12 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) { } b = append(b, l1TxsData...) + var l1TxsDataAvailability []byte + for i := 0; i < len(z.Metadata.L1TxsDataAvailability); i++ { + l1TxsDataAvailability = append(l1TxsDataAvailability, z.Metadata.L1TxsDataAvailability[i]...) + } + b = append(b, l1TxsDataAvailability...) + // [MAX_TX*(2*NLevels + 24) bits] L2TxsData var l2TxsData []byte l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd @@ -463,9 +477,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) { return nil, fmt.Errorf("len(l2TxsData): %d, expected: %d", len(l2TxsData), expectedL2TxsDataLen) } - l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd - b = append(b, l2TxsPadding...) b = append(b, l2TxsData...) + l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L1TxsDataAvailability)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd + b = append(b, l2TxsPadding...) // [NLevels * MAX_TOKENS_FEE bits] feeTxsData for i := 0; i < len(z.FeeIdxs); i++ { @@ -486,5 +500,11 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) { binary.BigEndian.PutUint16(chainID[:], z.Metadata.ChainID) b = append(b, chainID[:]...) + // [32 bits] currentNumBatch + currNumBatchBytes := z.CurrentNumBatch.Bytes() + var currNumBatch [4]byte + copy(currNumBatch[4-len(currNumBatchBytes):], currNumBatchBytes) + b = append(b, currNumBatch[:]...) + return b, nil } diff --git a/common/zk_test.go b/common/zk_test.go index 283dfbb..7950347 100644 --- a/common/zk_test.go +++ b/common/zk_test.go @@ -2,13 +2,14 @@ package common import ( "encoding/json" + "math/big" "testing" "github.com/stretchr/testify/require" ) func TestZKInputs(t *testing.T) { - zki := NewZKInputs(100, 16, 512, 24, 32) + zki := NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1)) _, err := json.Marshal(zki) require.Nil(t, err) // fmt.Println(string(s)) diff --git a/db/historydb/historydb.go b/db/historydb/historydb.go index 7afa09b..15f379e 100644 --- a/db/historydb/historydb.go +++ b/db/historydb/historydb.go @@ -783,24 +783,26 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error { loadAmountFloat, _ := laf.Float64() txs = append(txs, txWrite{ // Generic - IsL1: true, - TxID: l1txs[i].TxID, - Type: l1txs[i].Type, - Position: l1txs[i].Position, - FromIdx: &l1txs[i].FromIdx, - ToIdx: l1txs[i].ToIdx, - Amount: l1txs[i].Amount, - AmountFloat: amountFloat, - TokenID: l1txs[i].TokenID, - BatchNum: l1txs[i].BatchNum, - EthBlockNum: l1txs[i].EthBlockNum, + IsL1: true, + TxID: l1txs[i].TxID, + Type: l1txs[i].Type, + Position: l1txs[i].Position, + FromIdx: &l1txs[i].FromIdx, + ToIdx: l1txs[i].ToIdx, + Amount: l1txs[i].Amount, + EffectiveAmount: l1txs[i].EffectiveAmount, + AmountFloat: amountFloat, + TokenID: l1txs[i].TokenID, + BatchNum: l1txs[i].BatchNum, + EthBlockNum: l1txs[i].EthBlockNum, // L1 - ToForgeL1TxsNum: l1txs[i].ToForgeL1TxsNum, - UserOrigin: &l1txs[i].UserOrigin, - FromEthAddr: &l1txs[i].FromEthAddr, - FromBJJ: l1txs[i].FromBJJ, - LoadAmount: l1txs[i].LoadAmount, - LoadAmountFloat: &loadAmountFloat, + ToForgeL1TxsNum: l1txs[i].ToForgeL1TxsNum, + UserOrigin: &l1txs[i].UserOrigin, + FromEthAddr: &l1txs[i].FromEthAddr, + FromBJJ: l1txs[i].FromBJJ, + LoadAmount: l1txs[i].LoadAmount, + EffectiveLoadAmount: l1txs[i].EffectiveLoadAmount, + LoadAmountFloat: &loadAmountFloat, }) } return hdb.addTxs(d, txs) @@ -846,6 +848,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error { from_idx, to_idx, amount, + effective_amount, amount_f, token_id, batch_num, @@ -855,6 +858,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error { from_eth_addr, from_bjj, load_amount, + effective_load_amount, load_amount_f, fee, nonce @@ -1162,8 +1166,9 @@ func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) { err := meddler.QueryAll( hdb.db, &txs, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, - tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.amount, - tx.load_amount, tx.eth_block_num, tx.type, tx.batch_num + tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, + tx.amount, tx.effective_amount, tx.load_amount, tx.effective_load_amount, + tx.eth_block_num, tx.type, tx.batch_num FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE;`, ) return db.SlicePtrsToSlice(txs).([]common.L1Tx), err @@ -1175,8 +1180,9 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) { err := meddler.QueryAll( hdb.db, &txs, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, - tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.amount, - tx.load_amount, tx.eth_block_num, tx.type, tx.batch_num + tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, + tx.amount, tx.effective_amount, tx.load_amount, tx.effective_load_amount, + tx.eth_block_num, tx.type, tx.batch_num FROM tx WHERE is_l1 = TRUE AND user_origin = FALSE;`, ) return db.SlicePtrsToSlice(txs).([]common.L1Tx), err @@ -1201,8 +1207,9 @@ func (hdb *HistoryDB) GetL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) err := meddler.QueryAll( hdb.db, &txs, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, - tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.amount, - tx.load_amount, tx.eth_block_num, tx.type, tx.batch_num + tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, + tx.amount, tx.effective_amount, tx.load_amount, tx.effective_load_amount, + tx.eth_block_num, tx.type, tx.batch_num FROM tx WHERE to_forge_l1_txs_num = $1 AND is_l1 = TRUE AND user_origin = TRUE;`, toForgeL1TxsNum, ) diff --git a/db/historydb/views.go b/db/historydb/views.go index c9fc28e..2e18ad1 100644 --- a/db/historydb/views.go +++ b/db/historydb/views.go @@ -112,24 +112,26 @@ func (tx TxAPI) MarshalJSON() ([]byte, error) { // in order to perform inserts into tx table type txWrite struct { // Generic - IsL1 bool `meddler:"is_l1"` - TxID common.TxID `meddler:"id"` - Type common.TxType `meddler:"type"` - Position int `meddler:"position"` - FromIdx *common.Idx `meddler:"from_idx"` - ToIdx common.Idx `meddler:"to_idx"` - Amount *big.Int `meddler:"amount,bigint"` - AmountFloat float64 `meddler:"amount_f"` - TokenID common.TokenID `meddler:"token_id"` - BatchNum *common.BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0 - EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue + IsL1 bool `meddler:"is_l1"` + TxID common.TxID `meddler:"id"` + Type common.TxType `meddler:"type"` + Position int `meddler:"position"` + FromIdx *common.Idx `meddler:"from_idx"` + ToIdx common.Idx `meddler:"to_idx"` + Amount *big.Int `meddler:"amount,bigint"` + EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"` + AmountFloat float64 `meddler:"amount_f"` + TokenID common.TokenID `meddler:"token_id"` + BatchNum *common.BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0 + EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue // L1 - ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged - UserOrigin *bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes - FromEthAddr *ethCommon.Address `meddler:"from_eth_addr"` - FromBJJ *babyjub.PublicKey `meddler:"from_bjj"` - LoadAmount *big.Int `meddler:"load_amount,bigintnull"` - LoadAmountFloat *float64 `meddler:"load_amount_f"` + ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` // toForgeL1TxsNum in which the tx was forged / will be forged + UserOrigin *bool `meddler:"user_origin"` // true if the tx was originated by a user, false if it was aoriginated by a coordinator. Note that this differ from the spec for implementation simplification purpposes + FromEthAddr *ethCommon.Address `meddler:"from_eth_addr"` + FromBJJ *babyjub.PublicKey `meddler:"from_bjj"` + LoadAmount *big.Int `meddler:"load_amount,bigintnull"` + EffectiveLoadAmount *big.Int `meddler:"effective_load_amount,bigintnull"` + LoadAmountFloat *float64 `meddler:"load_amount_f"` // L2 Fee *common.FeeSelector `meddler:"fee"` Nonce *common.Nonce `meddler:"nonce"` diff --git a/db/migrations/0001.sql b/db/migrations/0001.sql index e35fdb5..0ad25b6 100644 --- a/db/migrations/0001.sql +++ b/db/migrations/0001.sql @@ -149,6 +149,7 @@ CREATE TABLE tx ( to_eth_addr BYTEA, to_bjj BYTEA, amount BYTEA NOT NULL, + effective_amount BYTEA, amount_f NUMERIC NOT NULL, token_id INT NOT NULL REFERENCES token (token_id), amount_usd NUMERIC, -- Value of the amount in USD at the moment the tx was inserted in the DB @@ -158,6 +159,7 @@ CREATE TABLE tx ( to_forge_l1_txs_num BIGINT, user_origin BOOLEAN, load_amount BYTEA, + effective_load_amount BYTEA, load_amount_f NUMERIC, load_amount_usd NUMERIC, -- L2 diff --git a/db/statedb/txprocessors.go b/db/statedb/txprocessors.go index ca60a00..e75f698 100644 --- a/db/statedb/txprocessors.go +++ b/db/statedb/txprocessors.go @@ -1,6 +1,7 @@ package statedb import ( + "bytes" "errors" "fmt" "io/ioutil" @@ -93,7 +94,7 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use exits := make([]processedExit, nTx) if s.typ == TypeBatchBuilder { - s.zki = common.NewZKInputs(uint32(nTx), ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels) + s.zki = common.NewZKInputs(uint32(nTx), ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels, s.currentBatch.BigInt()) s.zki.OldLastIdx = s.idx.BigInt() s.zki.OldStateRoot = s.mt.Root().BigInt() } @@ -138,6 +139,12 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use } s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData) + l1TxDataAvailability, err := l1usertxs[i].BytesDataAvailability(s.zki.Metadata.NLevels) + if err != nil { + return nil, err + } + s.zki.Metadata.L1TxsDataAvailability = append(s.zki.Metadata.L1TxsDataAvailability, l1TxDataAvailability) + if s.i < nTx-1 { s.zki.ISOutIdx[s.i] = s.idx.BigInt() s.zki.ISStateRoot[s.i] = s.mt.Root().BigInt() @@ -222,7 +229,7 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use return nil, err } if s.zki != nil { - l2TxData, err := l2txs[i].L2Tx().Bytes(s.zki.Metadata.NLevels) + l2TxData, err := l2txs[i].L2Tx().BytesDataAvailability(s.zki.Metadata.NLevels) if err != nil { return nil, err } @@ -443,6 +450,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx) switch tx.Type { case common.TxTypeForceTransfer: + s.computeEffectiveAmounts(tx) + // go to the MT account of sender and receiver, and update balance // & nonce @@ -454,6 +463,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx) return nil, nil, false, nil, err } case common.TxTypeCreateAccountDeposit: + s.computeEffectiveAmounts(tx) + // add new account to the MT, update balance of the MT account err := s.applyCreateAccount(tx) if err != nil { @@ -464,6 +475,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx) // which in the case type==TypeSynchronizer will be added to an // array of created accounts that will be returned case common.TxTypeDeposit: + s.computeEffectiveAmounts(tx) + // update balance of the MT account err := s.applyDeposit(tx, false) if err != nil { @@ -471,6 +484,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx) return nil, nil, false, nil, err } case common.TxTypeDepositTransfer: + s.computeEffectiveAmounts(tx) + // update balance in MT account, update balance & nonce of sender // & receiver err := s.applyDeposit(tx, true) @@ -479,6 +494,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx) return nil, nil, false, nil, err } case common.TxTypeCreateAccountDepositTransfer: + s.computeEffectiveAmounts(tx) + // add new account to the merkletree, update balance in MT account, // update balance & nonce of sender & receiver err := s.applyCreateAccountDepositTransfer(tx) @@ -487,6 +504,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx) return nil, nil, false, nil, err } case common.TxTypeForceExit: + s.computeEffectiveAmounts(tx) + // execute exit flow // coordIdxsMap is 'nil', as at L1Txs there is no L2 fees exitAccount, newExit, err := s.applyExit(nil, nil, exitTree, tx.Tx()) @@ -520,6 +539,11 @@ func (s *StateDB) processL2Tx(coordIdxsMap map[common.TokenID]common.Idx, collec var err error // if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) { + if s.typ == TypeSynchronizer { + // this should never be reached + log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0") + return nil, nil, false, fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0") + } // case when tx.Type== common.TxTypeTransferToEthAddr or common.TxTypeTransferToBJJ tx.AuxToIdx, err = s.GetIdxByEthAddrBJJ(tx.ToEthAddr, tx.ToBJJ, tx.TokenID) if err != nil { @@ -612,7 +636,7 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error { account := &common.Account{ TokenID: tx.TokenID, Nonce: 0, - Balance: tx.LoadAmount, + Balance: tx.EffectiveLoadAmount, PublicKey: tx.FromBJJ, EthAddr: tx.FromEthAddr, } @@ -628,7 +652,7 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error { s.zki.Sign1[s.i] = big.NewInt(1) } s.zki.Ay1[s.i] = tx.FromBJJ.Y - s.zki.Balance1[s.i] = tx.LoadAmount + s.zki.Balance1[s.i] = tx.EffectiveLoadAmount s.zki.EthAddr1[s.i] = common.EthAddrToBigInt(tx.FromEthAddr) s.zki.Siblings1[s.i] = siblingsToZKInputFormat(p.Siblings) if p.IsOld0 { @@ -656,12 +680,12 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error { // andTransfer parameter is set to true, the method will also apply the // Transfer of the L1Tx/DepositTransfer func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error { - // deposit the tx.LoadAmount into the sender account + // deposit the tx.EffectiveLoadAmount into the sender account accSender, err := s.GetAccount(tx.FromIdx) if err != nil { return err } - accSender.Balance = new(big.Int).Add(accSender.Balance, tx.LoadAmount) + accSender.Balance = new(big.Int).Add(accSender.Balance, tx.EffectiveLoadAmount) // in case that the tx is a L1Tx>DepositTransfer var accReceiver *common.Account @@ -671,9 +695,9 @@ func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error { return err } // subtract amount to the sender - accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.Amount) + accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.EffectiveAmount) // add amount to the receiver - accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount) + accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) } // update sender account in localStateDB p, err := s.UpdateAccount(tx.FromIdx, accSender) @@ -723,7 +747,8 @@ func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error { // tx.ToIdx==0, then toIdx!=0, and will be used the toIdx parameter as Idx of // the receiver. This parameter is used when the tx.ToIdx is not specified and // the real ToIdx is found trhrough the ToEthAddr or ToBJJ. -func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, collectedFees map[common.TokenID]*big.Int, +func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, + collectedFees map[common.TokenID]*big.Int, tx common.Tx, auxToIdx common.Idx) error { if auxToIdx == common.Idx(0) { auxToIdx = tx.ToIdx @@ -824,7 +849,7 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error { accSender := &common.Account{ TokenID: tx.TokenID, Nonce: 0, - Balance: tx.LoadAmount, + Balance: tx.EffectiveLoadAmount, PublicKey: tx.FromBJJ, EthAddr: tx.FromEthAddr, } @@ -833,9 +858,9 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error { return err } // subtract amount to the sender - accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.Amount) + accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.EffectiveAmount) // add amount to the receiver - accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount) + accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) // create Account of the Sender p, err := s.CreateAccount(common.Idx(s.idx+1), accSender) @@ -849,7 +874,7 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error { s.zki.Sign1[s.i] = big.NewInt(1) } s.zki.Ay1[s.i] = tx.FromBJJ.Y - s.zki.Balance1[s.i] = tx.LoadAmount + s.zki.Balance1[s.i] = tx.EffectiveLoadAmount s.zki.EthAddr1[s.i] = common.EthAddrToBigInt(tx.FromEthAddr) s.zki.Siblings1[s.i] = siblingsToZKInputFormat(p.Siblings) if p.IsOld0 { @@ -890,8 +915,9 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error { // It returns the ExitAccount and a boolean determining if the Exit created a // new Leaf in the ExitTree. -func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, collectedFees map[common.TokenID]*big.Int, - exitTree *merkletree.MerkleTree, tx common.Tx) (*common.Account, bool, error) { +func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, + collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree, + tx common.Tx) (*common.Account, bool, error) { // 0. subtract tx.Amount from current Account in StateMT // add the tx.Amount into the Account (tx.FromIdx) in the ExitMT acc, err := s.GetAccount(tx.FromIdx) @@ -971,6 +997,89 @@ func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, collecte return exitAccount, false, err } +// computeEffectiveAmounts checks that the L1Tx data is correct +func (s *StateDB) computeEffectiveAmounts(tx *common.L1Tx) { + if !tx.UserOrigin { + // case where the L1Tx is generated by the Coordinator + tx.EffectiveAmount = big.NewInt(0) + tx.EffectiveLoadAmount = big.NewInt(0) + return + } + + tx.EffectiveAmount = tx.Amount + tx.EffectiveLoadAmount = tx.LoadAmount + if tx.Type == common.TxTypeCreateAccountDeposit { + return + } + + if tx.ToIdx >= common.UserThreshold && tx.FromIdx == common.Idx(0) { + // CreateAccountDepositTransfer case + cmp := tx.LoadAmount.Cmp(tx.Amount) + if cmp == -1 { // LoadAmount batchL1 + > batchL1 + > block + ` + tc := til.NewContext(common.RollupConstMaxL1UserTx) + blocks, err := tc.GenerateBlocks(set) + require.Nil(t, err) + + ptc := ProcessTxsConfig{ + NLevels: 32, + MaxFeeTx: 64, + MaxTx: 512, + MaxL1Tx: 16, + } + _, err = sdb.ProcessTxs(ptc, nil, blocks[0].Rollup.L1UserTxs, nil, nil) + require.Nil(t, err) + + tx := common.L1Tx{ + FromIdx: 256, + ToIdx: 257, + Amount: big.NewInt(10), + LoadAmount: big.NewInt(0), + FromEthAddr: tc.Users["A"].Addr, + UserOrigin: true, + } + sdb.computeEffectiveAmounts(&tx) + assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount) + assert.Equal(t, big.NewInt(10), tx.EffectiveAmount) + + // expect error due not enough funds + tx = common.L1Tx{ + FromIdx: 256, + ToIdx: 257, + Amount: big.NewInt(11), + LoadAmount: big.NewInt(0), + FromEthAddr: tc.Users["A"].Addr, + UserOrigin: true, + } + sdb.computeEffectiveAmounts(&tx) + assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount) + assert.Equal(t, big.NewInt(0), tx.EffectiveAmount) + + // expect no-error due not enough funds in a + // CreateAccountDepositTransfer transction + tx = common.L1Tx{ + FromIdx: 0, + ToIdx: 257, + Amount: big.NewInt(10), + LoadAmount: big.NewInt(10), + UserOrigin: true, + } + sdb.computeEffectiveAmounts(&tx) + assert.Equal(t, big.NewInt(10), tx.EffectiveLoadAmount) + assert.Equal(t, big.NewInt(10), tx.EffectiveAmount) + + // expect error due not enough funds in a CreateAccountDepositTransfer + // transction + tx = common.L1Tx{ + FromIdx: 0, + ToIdx: 257, + Amount: big.NewInt(11), + LoadAmount: big.NewInt(10), + UserOrigin: true, + } + sdb.computeEffectiveAmounts(&tx) + assert.Equal(t, big.NewInt(10), tx.EffectiveLoadAmount) + assert.Equal(t, big.NewInt(0), tx.EffectiveAmount) + + // expect error due not same TokenID + tx = common.L1Tx{ + FromIdx: 256, + ToIdx: 258, + Amount: big.NewInt(5), + LoadAmount: big.NewInt(0), + FromEthAddr: tc.Users["A"].Addr, + UserOrigin: true, + } + sdb.computeEffectiveAmounts(&tx) + assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount) + assert.Equal(t, big.NewInt(0), tx.EffectiveAmount) + + // expect error due not same EthAddr + tx = common.L1Tx{ + FromIdx: 256, + ToIdx: 257, + Amount: big.NewInt(8), + LoadAmount: big.NewInt(0), + FromEthAddr: tc.Users["B"].Addr, + UserOrigin: true, + } + sdb.computeEffectiveAmounts(&tx) + assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount) + assert.Equal(t, big.NewInt(0), tx.EffectiveAmount) +} + func TestProcessTxsBalances(t *testing.T) { dir, err := ioutil.TempDir("", "tmpdb") require.Nil(t, err) @@ -139,7 +249,7 @@ func TestProcessTxsBalances(t *testing.T) { require.Nil(t, err) // use Set of PoolL2 txs - poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow0) + poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow1) assert.Nil(t, err) _, err = sdb.ProcessTxs(ptc, coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs) @@ -452,6 +562,7 @@ func TestProcessTxsRootTestVectors(t *testing.T) { FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"), ToIdx: 0, Type: common.TxTypeCreateAccountDeposit, + UserOrigin: true, }, } l2Txs := []common.PoolL2Tx{ @@ -499,6 +610,7 @@ func TestCircomTest(t *testing.T) { FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"), ToIdx: 0, Type: common.TxTypeCreateAccountDeposit, + UserOrigin: true, }, } l2Txs := []common.PoolL2Tx{ @@ -564,8 +676,7 @@ func TestZKInputsHashTestVector0(t *testing.T) { assert.Nil(t, err) l1Txs := []common.L1Tx{ { - FromIdx: 0, - // LoadAmount: big.NewInt(10400), + FromIdx: 0, LoadAmount: big.NewInt(16000000), Amount: big.NewInt(0), TokenID: 1, @@ -573,6 +684,7 @@ func TestZKInputsHashTestVector0(t *testing.T) { FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"), ToIdx: 0, Type: common.TxTypeCreateAccountDeposit, + UserOrigin: true, }, } l2Txs := []common.PoolL2Tx{ @@ -593,6 +705,10 @@ func TestZKInputsHashTestVector0(t *testing.T) { MaxTx: 32, MaxL1Tx: 16, } + // skip first batch to do the test with BatchNum=1 + _, err = sdb.ProcessTxs(ptc, nil, nil, nil, nil) + require.Nil(t, err) + ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs) require.Nil(t, err) @@ -610,7 +726,7 @@ func TestZKInputsHashTestVector0(t *testing.T) { toHash, err := ptOut.ZKInputs.ToHashGlobalData() assert.Nil(t, err) // value from js test vector - expectedToHash := "0000000000ff000000000100000000000000000000000000000000000000000000000000000000000000000015ba488d749f6b891d29d0bf3a72481ec812e4d4ecef2bf7a3fc64f3c010444200000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010003e87e00000100000000000000000000000000000000000000000000000000000000000000" + expectedToHash := "0000000000ff000000000100000000000000000000000000000000000000000000000000000000000000000015ba488d749f6b891d29d0bf3a72481ec812e4d4ecef2bf7a3fc64f3c010444200000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010003e87e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000001" // checks are splitted to find the difference easier assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000]) assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000]) @@ -619,7 +735,7 @@ func TestZKInputsHashTestVector0(t *testing.T) { h, err := ptOut.ZKInputs.HashGlobalData() require.Nil(t, err) // value from js test vector - assert.Equal(t, "80757288244566854497474223360206077562032050734432637237701187686677568506", h.String()) + assert.Equal(t, "4356692423721763303547321618014315464040324829724049399065961225345730555597", h.String()) } func TestZKInputsHashTestVector1(t *testing.T) { @@ -646,6 +762,7 @@ func TestZKInputsHashTestVector1(t *testing.T) { FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"), ToIdx: 0, Type: common.TxTypeCreateAccountDeposit, + UserOrigin: true, }, { FromIdx: 0, @@ -656,6 +773,7 @@ func TestZKInputsHashTestVector1(t *testing.T) { FromEthAddr: ethCommon.HexToAddress("0x2b5ad5c4795c026514f8317c7a215e218dccd6cf"), ToIdx: 0, Type: common.TxTypeCreateAccountDeposit, + UserOrigin: true, }, } l2Txs := []common.PoolL2Tx{ @@ -676,6 +794,10 @@ func TestZKInputsHashTestVector1(t *testing.T) { MaxTx: 32, MaxL1Tx: 16, } + // skip first batch to do the test with BatchNum=1 + _, err = sdb.ProcessTxs(ptc, nil, nil, nil, nil) + require.Nil(t, err) + ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs) require.Nil(t, err) @@ -697,7 +819,7 @@ func TestZKInputsHashTestVector1(t *testing.T) { toHash, err := ptOut.ZKInputs.ToHashGlobalData() assert.Nil(t, err) // value from js test vector - expectedToHash := "0000000000ff0000000001010000000000000000000000000000000000000000000000000000000000000000304a3f3aef4f416cca887aab7265227449077627138345c2eb25bf8ff946b09500000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000010003e88900000101000000000000000000000000000000000000000000000000000000000000" + expectedToHash := "0000000000ff0000000001010000000000000000000000000000000000000000000000000000000000000000304a3f3aef4f416cca887aab7265227449077627138345c2eb25bf8ff946b09500000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00000000000028a000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000010003e889000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000000001" // checks are splitted to find the difference easier assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000]) assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000]) @@ -706,5 +828,5 @@ func TestZKInputsHashTestVector1(t *testing.T) { h, err := ptOut.ZKInputs.HashGlobalData() require.Nil(t, err) // value from js test vector - assert.Equal(t, "10900521462378877053056992084240080637954406133884857263674494661625916419481", h.String()) + assert.Equal(t, "20293112365009290386650039345314592436395562810005523677125576447132206192598", h.String()) } diff --git a/eth/rollup.go b/eth/rollup.go index 2c44246..99a8110 100644 --- a/eth/rollup.go +++ b/eth/rollup.go @@ -251,7 +251,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs) (tx *types.T var l2DataBytes []byte for i := 0; i < len(args.L2TxsData); i++ { l2 := args.L2TxsData[i] - bytesl2, err := l2.Bytes(uint32(nLevels)) + bytesl2, err := l2.BytesDataAvailability(uint32(nLevels)) if err != nil { return nil, err } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 6ac8b2b..c37cb8e 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -80,9 +80,14 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc assert.Equal(t, len(block.Rollup.L1UserTxs), len(syncBlock.Rollup.L1UserTxs)) dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs() require.Nil(t, err) - // Ignore BatchNum in syncBlock.L1UserTxs because this value is set by the HistoryDB + // Ignore BatchNum in syncBlock.L1UserTxs because this value is set by + // the HistoryDB. Also ignore EffectiveAmount & EffectiveLoadAmount + // because this value is set by StateDB.ProcessTxs. for i := range syncBlock.Rollup.L1UserTxs { syncBlock.Rollup.L1UserTxs[i].BatchNum = block.Rollup.L1UserTxs[i].BatchNum + syncBlock.Rollup.L1UserTxs[i].EffectiveAmount = block.Rollup.L1UserTxs[i].EffectiveAmount + syncBlock.Rollup.L1UserTxs[i].EffectiveLoadAmount = + block.Rollup.L1UserTxs[i].EffectiveLoadAmount } assert.Equal(t, block.Rollup.L1UserTxs, syncBlock.Rollup.L1UserTxs) for _, tx := range block.Rollup.L1UserTxs { @@ -134,6 +139,7 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc assert.Equal(t, batch.L2Txs, syncBatch.L2Txs) // In exit tree, we only check AccountIdx and Balance, because // it's what we have precomputed before. + require.Equal(t, len(batch.ExitTree), len(syncBatch.ExitTree)) for j := range batch.ExitTree { exit := &batch.ExitTree[j] assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx) @@ -384,6 +390,7 @@ func TestSync(t *testing.T) { > block // blockNum=2 CreateAccountDepositTransfer(1) E-A: 1000, 200 // Idx=256+7=263 + ForceTransfer(1) C-B: 80 ForceExit(1) A: 100 ForceExit(1) B: 80 ForceTransfer(1) A-D: 100 @@ -415,7 +422,7 @@ func TestSync(t *testing.T) { // blocks 1 (blockNum=3) i = 1 require.Equal(t, 3, int(blocks[i].Block.Num)) - require.Equal(t, 4, len(blocks[i].Rollup.L1UserTxs)) + require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs)) diff --git a/test/til/sets.go b/test/til/sets.go index ccb3fd1..e977206 100644 --- a/test/til/sets.go +++ b/test/til/sets.go @@ -333,3 +333,23 @@ PoolTransferToEthAddr(1) A-B: 100 (126) // D(0): 360 // F(0): 100 ` + +// SetPoolL2MinimumFlow1 contains the same transactions than the +// SetPoolL2MinimumFlow0, but simulating coming from the smart contract +// (always with the parameter ToIdx filled) +var SetPoolL2MinimumFlow1 = ` +Type: PoolL2 + +PoolTransfer(0) A-B: 100 (126) +PoolTransfer(0) D-F: 100 (126) +PoolExit(0) A: 100 (126) +PoolTransfer(1) A-B: 100 (126) + +// Expected balances: +// Coord(0): 105, Coord(1): 40 +// A(0): 510, A(1): 170 +// B(0): 480, B(1): 190 +// C(0): 845, C(1): 100 +// D(0): 360 +// F(0): 100 +` diff --git a/test/til/txs.go b/test/til/txs.go index b9cff9f..9369859 100644 --- a/test/til/txs.go +++ b/test/til/txs.go @@ -207,7 +207,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) { toIdxName: inst.to, L1Tx: tx, } - if err := tc.addToL1Queue(testTx); err != nil { + if err := tc.addToL1UserQueue(testTx); err != nil { return nil, err } case common.TxTypeDeposit, common.TxTypeDepositTransfer: // tx source: L1UserTx @@ -234,7 +234,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) { toIdxName: inst.to, L1Tx: tx, } - if err := tc.addToL1Queue(testTx); err != nil { + if err := tc.addToL1UserQueue(testTx); err != nil { return nil, err } case common.TxTypeTransfer: // L2Tx @@ -274,7 +274,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) { toIdxName: inst.to, L1Tx: tx, } - if err := tc.addToL1Queue(testTx); err != nil { + if err := tc.addToL1UserQueue(testTx); err != nil { return nil, err } case common.TxTypeExit: // tx source: L2Tx @@ -316,7 +316,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) { toIdxName: inst.to, L1Tx: tx, } - if err := tc.addToL1Queue(testTx); err != nil { + if err := tc.addToL1UserQueue(testTx); err != nil { return nil, err } case typeNewBatch: @@ -449,8 +449,8 @@ func (tc *Context) setIdxs() error { return nil } -// addToL1Queue adds the L1Tx into the queue that is open and has space -func (tc *Context) addToL1Queue(tx L1Tx) error { +// addToL1UserQueue adds the L1UserTx into the queue that is open and has space +func (tc *Context) addToL1UserQueue(tx L1Tx) error { if len(tc.Queues[tc.openToForge]) >= tc.rollupConstMaxL1UserTx { // if current OpenToForge queue reached its Max, move into a // new queue @@ -698,6 +698,8 @@ func (tc *Context) FillBlocksL1UserTxsBatchNum(blocks []common.BlockData) { // - blocks[].Rollup.Batch.L1CoordinatorTxs[].BatchNum // - blocks[].Rollup.Batch.L1CoordinatorTxs[].EthBlockNum // - blocks[].Rollup.Batch.L1CoordinatorTxs[].Position +// - blocks[].Rollup.Batch.L1CoordinatorTxs[].EffectiveAmount +// - blocks[].Rollup.Batch.L1CoordinatorTxs[].EffectiveLoadAmount // - blocks[].Rollup.Batch.L2Txs[].TxID // - blocks[].Rollup.Batch.L2Txs[].Position // - blocks[].Rollup.Batch.L2Txs[].Nonce @@ -779,6 +781,8 @@ func (tc *Context) FillBlocksExtra(blocks []common.BlockData, cfg *ConfigExtra) tx := &batch.L1CoordinatorTxs[k] tx.Position = position position++ + tx.EffectiveAmount = big.NewInt(0) + tx.EffectiveLoadAmount = big.NewInt(0) nTx, err := common.NewL1Tx(tx) if err != nil { return err