mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Update Common & StateDB & ZKInputs to last protocol version
- Add InvalidData flag to L1Tx
- Add BytesDataAvailability to L1Tx
- Update ZKInputs & HashGlobalInputs to last spec of the protocol
(massive migrations)
- TxProcessor check correctness of L1Txs
Compatible with hermeznetwork/commonjs v0.0.4
(c345239bba)
This commit is contained in:
@@ -36,6 +36,11 @@ func (bn BatchNum) Bytes() []byte {
|
|||||||
return batchNumBytes[:]
|
return batchNumBytes[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BigInt returns a *big.Int representing the BatchNum
|
||||||
|
func (bn BatchNum) BigInt() *big.Int {
|
||||||
|
return big.NewInt(int64(bn))
|
||||||
|
}
|
||||||
|
|
||||||
// BatchNumFromBytes returns BatchNum from a []byte
|
// BatchNumFromBytes returns BatchNum from a []byte
|
||||||
func BatchNumFromBytes(b []byte) (BatchNum, error) {
|
func BatchNumFromBytes(b []byte) (BatchNum, error) {
|
||||||
if len(b) != batchNumBytesLen {
|
if len(b) != batchNumBytesLen {
|
||||||
|
|||||||
@@ -37,7 +37,11 @@ type L1Tx struct {
|
|||||||
ToIdx Idx `meddler:"to_idx"` // ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer
|
ToIdx Idx `meddler:"to_idx"` // ToIdx is ignored in L1Tx/Deposit, but used in the L1Tx/DepositAndTransfer
|
||||||
TokenID TokenID `meddler:"token_id"`
|
TokenID TokenID `meddler:"token_id"`
|
||||||
Amount *big.Int `meddler:"amount,bigint"`
|
Amount *big.Int `meddler:"amount,bigint"`
|
||||||
|
// EffectiveAmount only applies to L1UserTx.
|
||||||
|
EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"`
|
||||||
LoadAmount *big.Int `meddler:"load_amount,bigint"`
|
LoadAmount *big.Int `meddler:"load_amount,bigint"`
|
||||||
|
// EffectiveLoadAmount only applies to L1UserTx.
|
||||||
|
EffectiveLoadAmount *big.Int `meddler:"effective_load_amount,bigintnull"`
|
||||||
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
|
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum Block Number in which this L1Tx was added to the queue
|
||||||
Type TxType `meddler:"type"`
|
Type TxType `meddler:"type"`
|
||||||
BatchNum *BatchNum `meddler:"batch_num"`
|
BatchNum *BatchNum `meddler:"batch_num"`
|
||||||
@@ -117,7 +121,7 @@ func (tx *L1Tx) CalcTxID() (*TxID, error) {
|
|||||||
|
|
||||||
// Tx returns a *Tx from the L1Tx
|
// Tx returns a *Tx from the L1Tx
|
||||||
func (tx L1Tx) Tx() Tx {
|
func (tx L1Tx) Tx() Tx {
|
||||||
f := new(big.Float).SetInt(tx.Amount)
|
f := new(big.Float).SetInt(tx.EffectiveAmount)
|
||||||
amountFloat, _ := f.Float64()
|
amountFloat, _ := f.Float64()
|
||||||
userOrigin := new(bool)
|
userOrigin := new(bool)
|
||||||
*userOrigin = tx.UserOrigin
|
*userOrigin = tx.UserOrigin
|
||||||
@@ -128,14 +132,14 @@ func (tx L1Tx) Tx() Tx {
|
|||||||
Position: tx.Position,
|
Position: tx.Position,
|
||||||
FromIdx: tx.FromIdx,
|
FromIdx: tx.FromIdx,
|
||||||
ToIdx: tx.ToIdx,
|
ToIdx: tx.ToIdx,
|
||||||
Amount: tx.Amount,
|
Amount: tx.EffectiveAmount,
|
||||||
AmountFloat: amountFloat,
|
AmountFloat: amountFloat,
|
||||||
TokenID: tx.TokenID,
|
TokenID: tx.TokenID,
|
||||||
ToForgeL1TxsNum: tx.ToForgeL1TxsNum,
|
ToForgeL1TxsNum: tx.ToForgeL1TxsNum,
|
||||||
UserOrigin: userOrigin,
|
UserOrigin: userOrigin,
|
||||||
FromEthAddr: tx.FromEthAddr,
|
FromEthAddr: tx.FromEthAddr,
|
||||||
FromBJJ: tx.FromBJJ,
|
FromBJJ: tx.FromBJJ,
|
||||||
LoadAmount: tx.LoadAmount,
|
LoadAmount: tx.EffectiveLoadAmount,
|
||||||
EthBlockNum: tx.EthBlockNum,
|
EthBlockNum: tx.EthBlockNum,
|
||||||
}
|
}
|
||||||
if tx.LoadAmount != nil {
|
if tx.LoadAmount != nil {
|
||||||
@@ -183,6 +187,34 @@ func (tx L1Tx) TxCompressedData() (*big.Int, error) {
|
|||||||
return bi, nil
|
return bi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BytesDataAvailability encodes a L1Tx into []byte for the Data Availability
|
||||||
|
func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||||
|
idxLen := nLevels / 8 //nolint:gomnd
|
||||||
|
|
||||||
|
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
||||||
|
|
||||||
|
fromIdxBytes, err := tx.FromIdx.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
copy(b[0:idxLen], fromIdxBytes[6-idxLen:])
|
||||||
|
toIdxBytes, err := tx.ToIdx.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
copy(b[idxLen:idxLen*2], toIdxBytes[6-idxLen:])
|
||||||
|
|
||||||
|
if tx.EffectiveAmount != nil {
|
||||||
|
amountFloat16, err := NewFloat16(tx.EffectiveAmount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
copy(b[idxLen*2:idxLen*2+2], amountFloat16.Bytes())
|
||||||
|
}
|
||||||
|
// fee = 0 (as is L1Tx) b[10:11]
|
||||||
|
return b[:], nil
|
||||||
|
}
|
||||||
|
|
||||||
// BytesGeneric returns the generic representation of a L1Tx. This method is
|
// BytesGeneric returns the generic representation of a L1Tx. This method is
|
||||||
// used to compute the []byte representation of a L1UserTx, and also to compute
|
// used to compute the []byte representation of a L1UserTx, and also to compute
|
||||||
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
|
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method
|
||||||
|
|||||||
@@ -65,6 +65,28 @@ func TestL1TxCompressedData(t *testing.T) {
|
|||||||
assert.Equal(t, "050004000000000003000000000002000100000000", hex.EncodeToString(txCompressedData.Bytes()))
|
assert.Equal(t, "050004000000000003000000000002000100000000", hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBytesDataAvailability(t *testing.T) {
|
||||||
|
tx := L1Tx{
|
||||||
|
FromIdx: 2,
|
||||||
|
ToIdx: 3,
|
||||||
|
Amount: big.NewInt(4),
|
||||||
|
TokenID: 5,
|
||||||
|
}
|
||||||
|
txCompressedData, err := tx.BytesDataAvailability(32)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "0000000200000003000000", hex.EncodeToString(txCompressedData))
|
||||||
|
|
||||||
|
tx = L1Tx{
|
||||||
|
FromIdx: 2,
|
||||||
|
ToIdx: 3,
|
||||||
|
EffectiveAmount: big.NewInt(4),
|
||||||
|
TokenID: 5,
|
||||||
|
}
|
||||||
|
txCompressedData, err = tx.BytesDataAvailability(32)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "0000000200000003000400", hex.EncodeToString(txCompressedData))
|
||||||
|
}
|
||||||
|
|
||||||
func TestL1userTxByteParsers(t *testing.T) {
|
func TestL1userTxByteParsers(t *testing.T) {
|
||||||
var pkComp babyjub.PublicKeyComp
|
var pkComp babyjub.PublicKeyComp
|
||||||
pkCompL := []byte("0x56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
|
pkCompL := []byte("0x56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
|
||||||
|
|||||||
@@ -103,8 +103,8 @@ func L2TxsToPoolL2Txs(txs []L2Tx) []PoolL2Tx {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes encodes a L2Tx into []byte
|
// BytesDataAvailability encodes a L2Tx into []byte for the Data Availability
|
||||||
func (tx L2Tx) Bytes(nLevels uint32) ([]byte, error) {
|
func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
|
||||||
idxLen := nLevels / 8 //nolint:gomnd
|
idxLen := nLevels / 8 //nolint:gomnd
|
||||||
|
|
||||||
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
b := make([]byte, ((nLevels*2)+16+8)/8) //nolint:gomnd
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func TestL2TxByteParsers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Data from the compatibility test
|
// Data from the compatibility test
|
||||||
expected := "00000101000001002b16c9"
|
expected := "00000101000001002b16c9"
|
||||||
encodedData, err := l2Tx.Bytes(32)
|
encodedData, err := l2Tx.BytesDataAvailability(32)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
assert.Equal(t, expected, hex.EncodeToString(encodedData))
|
||||||
|
|
||||||
|
|||||||
@@ -117,7 +117,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx & PoolL2Tx
|
// Tx is a struct used by the TxSelector & BatchBuilder as a generic type generated from L1Tx & PoolL2Tx
|
||||||
// TODO: this should be changed for "mini Tx"
|
|
||||||
type Tx struct {
|
type Tx struct {
|
||||||
// Generic
|
// Generic
|
||||||
IsL1 bool `meddler:"is_l1"`
|
IsL1 bool `meddler:"is_l1"`
|
||||||
|
|||||||
28
common/zk.go
28
common/zk.go
@@ -33,6 +33,7 @@ type ZKMetadata struct {
|
|||||||
MaxFeeIdxs uint32
|
MaxFeeIdxs uint32
|
||||||
|
|
||||||
L1TxsData [][]byte
|
L1TxsData [][]byte
|
||||||
|
L1TxsDataAvailability [][]byte
|
||||||
L2TxsData [][]byte
|
L2TxsData [][]byte
|
||||||
ChainID uint16
|
ChainID uint16
|
||||||
|
|
||||||
@@ -49,6 +50,8 @@ type ZKInputs struct {
|
|||||||
// General
|
// General
|
||||||
//
|
//
|
||||||
|
|
||||||
|
// CurrentNumBatch is the current batch number processed
|
||||||
|
CurrentNumBatch *big.Int `json:"currentNumBatch"` // uint32
|
||||||
// inputs for final `hashGlobalInputs`
|
// inputs for final `hashGlobalInputs`
|
||||||
// OldLastIdx is the last index assigned to an account
|
// OldLastIdx is the last index assigned to an account
|
||||||
OldLastIdx *big.Int `json:"oldLastIdx"` // uint64 (max nLevels bits)
|
OldLastIdx *big.Int `json:"oldLastIdx"` // uint64 (max nLevels bits)
|
||||||
@@ -72,6 +75,9 @@ type ZKInputs struct {
|
|||||||
TxCompressedData []*big.Int `json:"txCompressedData"` // big.Int (max 251 bits), len: [nTx]
|
TxCompressedData []*big.Int `json:"txCompressedData"` // big.Int (max 251 bits), len: [nTx]
|
||||||
// TxCompressedDataV2, only used in L2Txs, in L1Txs is set to 0
|
// TxCompressedDataV2, only used in L2Txs, in L1Txs is set to 0
|
||||||
TxCompressedDataV2 []*big.Int `json:"txCompressedDataV2"` // big.Int (max 193 bits), len: [nTx]
|
TxCompressedDataV2 []*big.Int `json:"txCompressedDataV2"` // big.Int (max 193 bits), len: [nTx]
|
||||||
|
// MaxNumBatch is the maximum allowed batch number when the transaction
|
||||||
|
// can be processed
|
||||||
|
MaxNumBatch []*big.Int `json:"maxNumBatch"` // uint32
|
||||||
|
|
||||||
// FromIdx
|
// FromIdx
|
||||||
FromIdx []*big.Int `json:"fromIdx"` // uint64 (max nLevels bits), len: [nTx]
|
FromIdx []*big.Int `json:"fromIdx"` // uint64 (max nLevels bits), len: [nTx]
|
||||||
@@ -266,7 +272,7 @@ func (z ZKInputs) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewZKInputs returns a pointer to an initialized struct of ZKInputs
|
// NewZKInputs returns a pointer to an initialized struct of ZKInputs
|
||||||
func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32) *ZKInputs {
|
func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32, currentNumBatch *big.Int) *ZKInputs {
|
||||||
zki := &ZKInputs{}
|
zki := &ZKInputs{}
|
||||||
zki.Metadata.NTx = nTx
|
zki.Metadata.NTx = nTx
|
||||||
zki.Metadata.MaxFeeIdxs = maxFeeIdxs
|
zki.Metadata.MaxFeeIdxs = maxFeeIdxs
|
||||||
@@ -276,15 +282,17 @@ func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32) *ZKInputs {
|
|||||||
zki.Metadata.MaxTx = maxTx
|
zki.Metadata.MaxTx = maxTx
|
||||||
|
|
||||||
// General
|
// General
|
||||||
|
zki.CurrentNumBatch = currentNumBatch
|
||||||
zki.OldLastIdx = big.NewInt(0)
|
zki.OldLastIdx = big.NewInt(0)
|
||||||
zki.OldStateRoot = big.NewInt(0)
|
zki.OldStateRoot = big.NewInt(0)
|
||||||
zki.GlobalChainID = big.NewInt(0)
|
zki.GlobalChainID = big.NewInt(0) // TODO pass by parameter
|
||||||
zki.FeeIdxs = newSlice(maxFeeIdxs)
|
zki.FeeIdxs = newSlice(maxFeeIdxs)
|
||||||
zki.FeePlanTokens = newSlice(maxFeeIdxs)
|
zki.FeePlanTokens = newSlice(maxFeeIdxs)
|
||||||
|
|
||||||
// Txs
|
// Txs
|
||||||
zki.TxCompressedData = newSlice(nTx)
|
zki.TxCompressedData = newSlice(nTx)
|
||||||
zki.TxCompressedDataV2 = newSlice(nTx)
|
zki.TxCompressedDataV2 = newSlice(nTx)
|
||||||
|
zki.MaxNumBatch = newSlice(nTx)
|
||||||
zki.FromIdx = newSlice(nTx)
|
zki.FromIdx = newSlice(nTx)
|
||||||
zki.AuxFromIdx = newSlice(nTx)
|
zki.AuxFromIdx = newSlice(nTx)
|
||||||
zki.ToIdx = newSlice(nTx)
|
zki.ToIdx = newSlice(nTx)
|
||||||
@@ -451,6 +459,12 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
b = append(b, l1TxsData...)
|
b = append(b, l1TxsData...)
|
||||||
|
|
||||||
|
var l1TxsDataAvailability []byte
|
||||||
|
for i := 0; i < len(z.Metadata.L1TxsDataAvailability); i++ {
|
||||||
|
l1TxsDataAvailability = append(l1TxsDataAvailability, z.Metadata.L1TxsDataAvailability[i]...)
|
||||||
|
}
|
||||||
|
b = append(b, l1TxsDataAvailability...)
|
||||||
|
|
||||||
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
|
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData
|
||||||
var l2TxsData []byte
|
var l2TxsData []byte
|
||||||
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
|
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd
|
||||||
@@ -463,9 +477,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
|||||||
return nil, fmt.Errorf("len(l2TxsData): %d, expected: %d", len(l2TxsData), expectedL2TxsDataLen)
|
return nil, fmt.Errorf("len(l2TxsData): %d, expected: %d", len(l2TxsData), expectedL2TxsDataLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
|
|
||||||
b = append(b, l2TxsPadding...)
|
|
||||||
b = append(b, l2TxsData...)
|
b = append(b, l2TxsData...)
|
||||||
|
l2TxsPadding := make([]byte, (int(z.Metadata.MaxTx)-len(z.Metadata.L1TxsDataAvailability)-len(z.Metadata.L2TxsData))*int(l2TxDataLen)/8) //nolint:gomnd
|
||||||
|
b = append(b, l2TxsPadding...)
|
||||||
|
|
||||||
// [NLevels * MAX_TOKENS_FEE bits] feeTxsData
|
// [NLevels * MAX_TOKENS_FEE bits] feeTxsData
|
||||||
for i := 0; i < len(z.FeeIdxs); i++ {
|
for i := 0; i < len(z.FeeIdxs); i++ {
|
||||||
@@ -486,5 +500,11 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
|
|||||||
binary.BigEndian.PutUint16(chainID[:], z.Metadata.ChainID)
|
binary.BigEndian.PutUint16(chainID[:], z.Metadata.ChainID)
|
||||||
b = append(b, chainID[:]...)
|
b = append(b, chainID[:]...)
|
||||||
|
|
||||||
|
// [32 bits] currentNumBatch
|
||||||
|
currNumBatchBytes := z.CurrentNumBatch.Bytes()
|
||||||
|
var currNumBatch [4]byte
|
||||||
|
copy(currNumBatch[4-len(currNumBatchBytes):], currNumBatchBytes)
|
||||||
|
b = append(b, currNumBatch[:]...)
|
||||||
|
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestZKInputs(t *testing.T) {
|
func TestZKInputs(t *testing.T) {
|
||||||
zki := NewZKInputs(100, 16, 512, 24, 32)
|
zki := NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
|
||||||
_, err := json.Marshal(zki)
|
_, err := json.Marshal(zki)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
// fmt.Println(string(s))
|
// fmt.Println(string(s))
|
||||||
|
|||||||
@@ -790,6 +790,7 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error {
|
|||||||
FromIdx: &l1txs[i].FromIdx,
|
FromIdx: &l1txs[i].FromIdx,
|
||||||
ToIdx: l1txs[i].ToIdx,
|
ToIdx: l1txs[i].ToIdx,
|
||||||
Amount: l1txs[i].Amount,
|
Amount: l1txs[i].Amount,
|
||||||
|
EffectiveAmount: l1txs[i].EffectiveAmount,
|
||||||
AmountFloat: amountFloat,
|
AmountFloat: amountFloat,
|
||||||
TokenID: l1txs[i].TokenID,
|
TokenID: l1txs[i].TokenID,
|
||||||
BatchNum: l1txs[i].BatchNum,
|
BatchNum: l1txs[i].BatchNum,
|
||||||
@@ -800,6 +801,7 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error {
|
|||||||
FromEthAddr: &l1txs[i].FromEthAddr,
|
FromEthAddr: &l1txs[i].FromEthAddr,
|
||||||
FromBJJ: l1txs[i].FromBJJ,
|
FromBJJ: l1txs[i].FromBJJ,
|
||||||
LoadAmount: l1txs[i].LoadAmount,
|
LoadAmount: l1txs[i].LoadAmount,
|
||||||
|
EffectiveLoadAmount: l1txs[i].EffectiveLoadAmount,
|
||||||
LoadAmountFloat: &loadAmountFloat,
|
LoadAmountFloat: &loadAmountFloat,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -846,6 +848,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
|
|||||||
from_idx,
|
from_idx,
|
||||||
to_idx,
|
to_idx,
|
||||||
amount,
|
amount,
|
||||||
|
effective_amount,
|
||||||
amount_f,
|
amount_f,
|
||||||
token_id,
|
token_id,
|
||||||
batch_num,
|
batch_num,
|
||||||
@@ -855,6 +858,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
|
|||||||
from_eth_addr,
|
from_eth_addr,
|
||||||
from_bjj,
|
from_bjj,
|
||||||
load_amount,
|
load_amount,
|
||||||
|
effective_load_amount,
|
||||||
load_amount_f,
|
load_amount_f,
|
||||||
fee,
|
fee,
|
||||||
nonce
|
nonce
|
||||||
@@ -1162,8 +1166,9 @@ func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
|
|||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs,
|
hdb.db, &txs,
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.amount,
|
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.load_amount, tx.eth_block_num, tx.type, tx.batch_num
|
tx.amount, tx.effective_amount, tx.load_amount, tx.effective_load_amount,
|
||||||
|
tx.eth_block_num, tx.type, tx.batch_num
|
||||||
FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE;`,
|
FROM tx WHERE is_l1 = TRUE AND user_origin = TRUE;`,
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(txs).([]common.L1Tx), err
|
return db.SlicePtrsToSlice(txs).([]common.L1Tx), err
|
||||||
@@ -1175,8 +1180,9 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
|
|||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs,
|
hdb.db, &txs,
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.amount,
|
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.load_amount, tx.eth_block_num, tx.type, tx.batch_num
|
tx.amount, tx.effective_amount, tx.load_amount, tx.effective_load_amount,
|
||||||
|
tx.eth_block_num, tx.type, tx.batch_num
|
||||||
FROM tx WHERE is_l1 = TRUE AND user_origin = FALSE;`,
|
FROM tx WHERE is_l1 = TRUE AND user_origin = FALSE;`,
|
||||||
)
|
)
|
||||||
return db.SlicePtrsToSlice(txs).([]common.L1Tx), err
|
return db.SlicePtrsToSlice(txs).([]common.L1Tx), err
|
||||||
@@ -1201,8 +1207,9 @@ func (hdb *HistoryDB) GetL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error)
|
|||||||
err := meddler.QueryAll(
|
err := meddler.QueryAll(
|
||||||
hdb.db, &txs,
|
hdb.db, &txs,
|
||||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.amount,
|
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
tx.load_amount, tx.eth_block_num, tx.type, tx.batch_num
|
tx.amount, tx.effective_amount, tx.load_amount, tx.effective_load_amount,
|
||||||
|
tx.eth_block_num, tx.type, tx.batch_num
|
||||||
FROM tx WHERE to_forge_l1_txs_num = $1 AND is_l1 = TRUE AND user_origin = TRUE;`,
|
FROM tx WHERE to_forge_l1_txs_num = $1 AND is_l1 = TRUE AND user_origin = TRUE;`,
|
||||||
toForgeL1TxsNum,
|
toForgeL1TxsNum,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -119,6 +119,7 @@ type txWrite struct {
|
|||||||
FromIdx *common.Idx `meddler:"from_idx"`
|
FromIdx *common.Idx `meddler:"from_idx"`
|
||||||
ToIdx common.Idx `meddler:"to_idx"`
|
ToIdx common.Idx `meddler:"to_idx"`
|
||||||
Amount *big.Int `meddler:"amount,bigint"`
|
Amount *big.Int `meddler:"amount,bigint"`
|
||||||
|
EffectiveAmount *big.Int `meddler:"effective_amount,bigintnull"`
|
||||||
AmountFloat float64 `meddler:"amount_f"`
|
AmountFloat float64 `meddler:"amount_f"`
|
||||||
TokenID common.TokenID `meddler:"token_id"`
|
TokenID common.TokenID `meddler:"token_id"`
|
||||||
BatchNum *common.BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0
|
BatchNum *common.BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged. If the tx is L2, this must be != 0
|
||||||
@@ -129,6 +130,7 @@ type txWrite struct {
|
|||||||
FromEthAddr *ethCommon.Address `meddler:"from_eth_addr"`
|
FromEthAddr *ethCommon.Address `meddler:"from_eth_addr"`
|
||||||
FromBJJ *babyjub.PublicKey `meddler:"from_bjj"`
|
FromBJJ *babyjub.PublicKey `meddler:"from_bjj"`
|
||||||
LoadAmount *big.Int `meddler:"load_amount,bigintnull"`
|
LoadAmount *big.Int `meddler:"load_amount,bigintnull"`
|
||||||
|
EffectiveLoadAmount *big.Int `meddler:"effective_load_amount,bigintnull"`
|
||||||
LoadAmountFloat *float64 `meddler:"load_amount_f"`
|
LoadAmountFloat *float64 `meddler:"load_amount_f"`
|
||||||
// L2
|
// L2
|
||||||
Fee *common.FeeSelector `meddler:"fee"`
|
Fee *common.FeeSelector `meddler:"fee"`
|
||||||
|
|||||||
@@ -149,6 +149,7 @@ CREATE TABLE tx (
|
|||||||
to_eth_addr BYTEA,
|
to_eth_addr BYTEA,
|
||||||
to_bjj BYTEA,
|
to_bjj BYTEA,
|
||||||
amount BYTEA NOT NULL,
|
amount BYTEA NOT NULL,
|
||||||
|
effective_amount BYTEA,
|
||||||
amount_f NUMERIC NOT NULL,
|
amount_f NUMERIC NOT NULL,
|
||||||
token_id INT NOT NULL REFERENCES token (token_id),
|
token_id INT NOT NULL REFERENCES token (token_id),
|
||||||
amount_usd NUMERIC, -- Value of the amount in USD at the moment the tx was inserted in the DB
|
amount_usd NUMERIC, -- Value of the amount in USD at the moment the tx was inserted in the DB
|
||||||
@@ -158,6 +159,7 @@ CREATE TABLE tx (
|
|||||||
to_forge_l1_txs_num BIGINT,
|
to_forge_l1_txs_num BIGINT,
|
||||||
user_origin BOOLEAN,
|
user_origin BOOLEAN,
|
||||||
load_amount BYTEA,
|
load_amount BYTEA,
|
||||||
|
effective_load_amount BYTEA,
|
||||||
load_amount_f NUMERIC,
|
load_amount_f NUMERIC,
|
||||||
load_amount_usd NUMERIC,
|
load_amount_usd NUMERIC,
|
||||||
-- L2
|
-- L2
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package statedb
|
package statedb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -93,7 +94,7 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
|
|||||||
exits := make([]processedExit, nTx)
|
exits := make([]processedExit, nTx)
|
||||||
|
|
||||||
if s.typ == TypeBatchBuilder {
|
if s.typ == TypeBatchBuilder {
|
||||||
s.zki = common.NewZKInputs(uint32(nTx), ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels)
|
s.zki = common.NewZKInputs(uint32(nTx), ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels, s.currentBatch.BigInt())
|
||||||
s.zki.OldLastIdx = s.idx.BigInt()
|
s.zki.OldLastIdx = s.idx.BigInt()
|
||||||
s.zki.OldStateRoot = s.mt.Root().BigInt()
|
s.zki.OldStateRoot = s.mt.Root().BigInt()
|
||||||
}
|
}
|
||||||
@@ -138,6 +139,12 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
|
|||||||
}
|
}
|
||||||
s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData)
|
s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData)
|
||||||
|
|
||||||
|
l1TxDataAvailability, err := l1usertxs[i].BytesDataAvailability(s.zki.Metadata.NLevels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s.zki.Metadata.L1TxsDataAvailability = append(s.zki.Metadata.L1TxsDataAvailability, l1TxDataAvailability)
|
||||||
|
|
||||||
if s.i < nTx-1 {
|
if s.i < nTx-1 {
|
||||||
s.zki.ISOutIdx[s.i] = s.idx.BigInt()
|
s.zki.ISOutIdx[s.i] = s.idx.BigInt()
|
||||||
s.zki.ISStateRoot[s.i] = s.mt.Root().BigInt()
|
s.zki.ISStateRoot[s.i] = s.mt.Root().BigInt()
|
||||||
@@ -222,7 +229,7 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if s.zki != nil {
|
if s.zki != nil {
|
||||||
l2TxData, err := l2txs[i].L2Tx().Bytes(s.zki.Metadata.NLevels)
|
l2TxData, err := l2txs[i].L2Tx().BytesDataAvailability(s.zki.Metadata.NLevels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -443,6 +450,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
|
|||||||
|
|
||||||
switch tx.Type {
|
switch tx.Type {
|
||||||
case common.TxTypeForceTransfer:
|
case common.TxTypeForceTransfer:
|
||||||
|
s.computeEffectiveAmounts(tx)
|
||||||
|
|
||||||
// go to the MT account of sender and receiver, and update balance
|
// go to the MT account of sender and receiver, and update balance
|
||||||
// & nonce
|
// & nonce
|
||||||
|
|
||||||
@@ -454,6 +463,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
|
|||||||
return nil, nil, false, nil, err
|
return nil, nil, false, nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeCreateAccountDeposit:
|
case common.TxTypeCreateAccountDeposit:
|
||||||
|
s.computeEffectiveAmounts(tx)
|
||||||
|
|
||||||
// add new account to the MT, update balance of the MT account
|
// add new account to the MT, update balance of the MT account
|
||||||
err := s.applyCreateAccount(tx)
|
err := s.applyCreateAccount(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -464,6 +475,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
|
|||||||
// which in the case type==TypeSynchronizer will be added to an
|
// which in the case type==TypeSynchronizer will be added to an
|
||||||
// array of created accounts that will be returned
|
// array of created accounts that will be returned
|
||||||
case common.TxTypeDeposit:
|
case common.TxTypeDeposit:
|
||||||
|
s.computeEffectiveAmounts(tx)
|
||||||
|
|
||||||
// update balance of the MT account
|
// update balance of the MT account
|
||||||
err := s.applyDeposit(tx, false)
|
err := s.applyDeposit(tx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -471,6 +484,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
|
|||||||
return nil, nil, false, nil, err
|
return nil, nil, false, nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeDepositTransfer:
|
case common.TxTypeDepositTransfer:
|
||||||
|
s.computeEffectiveAmounts(tx)
|
||||||
|
|
||||||
// update balance in MT account, update balance & nonce of sender
|
// update balance in MT account, update balance & nonce of sender
|
||||||
// & receiver
|
// & receiver
|
||||||
err := s.applyDeposit(tx, true)
|
err := s.applyDeposit(tx, true)
|
||||||
@@ -479,6 +494,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
|
|||||||
return nil, nil, false, nil, err
|
return nil, nil, false, nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeCreateAccountDepositTransfer:
|
case common.TxTypeCreateAccountDepositTransfer:
|
||||||
|
s.computeEffectiveAmounts(tx)
|
||||||
|
|
||||||
// add new account to the merkletree, update balance in MT account,
|
// add new account to the merkletree, update balance in MT account,
|
||||||
// update balance & nonce of sender & receiver
|
// update balance & nonce of sender & receiver
|
||||||
err := s.applyCreateAccountDepositTransfer(tx)
|
err := s.applyCreateAccountDepositTransfer(tx)
|
||||||
@@ -487,6 +504,8 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
|
|||||||
return nil, nil, false, nil, err
|
return nil, nil, false, nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeForceExit:
|
case common.TxTypeForceExit:
|
||||||
|
s.computeEffectiveAmounts(tx)
|
||||||
|
|
||||||
// execute exit flow
|
// execute exit flow
|
||||||
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
|
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
|
||||||
exitAccount, newExit, err := s.applyExit(nil, nil, exitTree, tx.Tx())
|
exitAccount, newExit, err := s.applyExit(nil, nil, exitTree, tx.Tx())
|
||||||
@@ -520,6 +539,11 @@ func (s *StateDB) processL2Tx(coordIdxsMap map[common.TokenID]common.Idx, collec
|
|||||||
var err error
|
var err error
|
||||||
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
// if tx.ToIdx==0, get toIdx by ToEthAddr or ToBJJ
|
||||||
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
if tx.ToIdx == common.Idx(0) && tx.AuxToIdx == common.Idx(0) {
|
||||||
|
if s.typ == TypeSynchronizer {
|
||||||
|
// this should never be reached
|
||||||
|
log.Error("WARNING: In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
||||||
|
return nil, nil, false, fmt.Errorf("In StateDB with Synchronizer mode L2.ToIdx can't be 0")
|
||||||
|
}
|
||||||
// case when tx.Type== common.TxTypeTransferToEthAddr or common.TxTypeTransferToBJJ
|
// case when tx.Type== common.TxTypeTransferToEthAddr or common.TxTypeTransferToBJJ
|
||||||
tx.AuxToIdx, err = s.GetIdxByEthAddrBJJ(tx.ToEthAddr, tx.ToBJJ, tx.TokenID)
|
tx.AuxToIdx, err = s.GetIdxByEthAddrBJJ(tx.ToEthAddr, tx.ToBJJ, tx.TokenID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -612,7 +636,7 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error {
|
|||||||
account := &common.Account{
|
account := &common.Account{
|
||||||
TokenID: tx.TokenID,
|
TokenID: tx.TokenID,
|
||||||
Nonce: 0,
|
Nonce: 0,
|
||||||
Balance: tx.LoadAmount,
|
Balance: tx.EffectiveLoadAmount,
|
||||||
PublicKey: tx.FromBJJ,
|
PublicKey: tx.FromBJJ,
|
||||||
EthAddr: tx.FromEthAddr,
|
EthAddr: tx.FromEthAddr,
|
||||||
}
|
}
|
||||||
@@ -628,7 +652,7 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error {
|
|||||||
s.zki.Sign1[s.i] = big.NewInt(1)
|
s.zki.Sign1[s.i] = big.NewInt(1)
|
||||||
}
|
}
|
||||||
s.zki.Ay1[s.i] = tx.FromBJJ.Y
|
s.zki.Ay1[s.i] = tx.FromBJJ.Y
|
||||||
s.zki.Balance1[s.i] = tx.LoadAmount
|
s.zki.Balance1[s.i] = tx.EffectiveLoadAmount
|
||||||
s.zki.EthAddr1[s.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
s.zki.EthAddr1[s.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
||||||
s.zki.Siblings1[s.i] = siblingsToZKInputFormat(p.Siblings)
|
s.zki.Siblings1[s.i] = siblingsToZKInputFormat(p.Siblings)
|
||||||
if p.IsOld0 {
|
if p.IsOld0 {
|
||||||
@@ -656,12 +680,12 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error {
|
|||||||
// andTransfer parameter is set to true, the method will also apply the
|
// andTransfer parameter is set to true, the method will also apply the
|
||||||
// Transfer of the L1Tx/DepositTransfer
|
// Transfer of the L1Tx/DepositTransfer
|
||||||
func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
||||||
// deposit the tx.LoadAmount into the sender account
|
// deposit the tx.EffectiveLoadAmount into the sender account
|
||||||
accSender, err := s.GetAccount(tx.FromIdx)
|
accSender, err := s.GetAccount(tx.FromIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
accSender.Balance = new(big.Int).Add(accSender.Balance, tx.LoadAmount)
|
accSender.Balance = new(big.Int).Add(accSender.Balance, tx.EffectiveLoadAmount)
|
||||||
|
|
||||||
// in case that the tx is a L1Tx>DepositTransfer
|
// in case that the tx is a L1Tx>DepositTransfer
|
||||||
var accReceiver *common.Account
|
var accReceiver *common.Account
|
||||||
@@ -671,9 +695,9 @@ func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// subtract amount to the sender
|
// subtract amount to the sender
|
||||||
accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.Amount)
|
accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.EffectiveAmount)
|
||||||
// add amount to the receiver
|
// add amount to the receiver
|
||||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
|
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
||||||
}
|
}
|
||||||
// update sender account in localStateDB
|
// update sender account in localStateDB
|
||||||
p, err := s.UpdateAccount(tx.FromIdx, accSender)
|
p, err := s.UpdateAccount(tx.FromIdx, accSender)
|
||||||
@@ -723,7 +747,8 @@ func (s *StateDB) applyDeposit(tx *common.L1Tx, transfer bool) error {
|
|||||||
// tx.ToIdx==0, then toIdx!=0, and will be used the toIdx parameter as Idx of
|
// tx.ToIdx==0, then toIdx!=0, and will be used the toIdx parameter as Idx of
|
||||||
// the receiver. This parameter is used when the tx.ToIdx is not specified and
|
// the receiver. This parameter is used when the tx.ToIdx is not specified and
|
||||||
// the real ToIdx is found trhrough the ToEthAddr or ToBJJ.
|
// the real ToIdx is found trhrough the ToEthAddr or ToBJJ.
|
||||||
func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, collectedFees map[common.TokenID]*big.Int,
|
func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
|
||||||
|
collectedFees map[common.TokenID]*big.Int,
|
||||||
tx common.Tx, auxToIdx common.Idx) error {
|
tx common.Tx, auxToIdx common.Idx) error {
|
||||||
if auxToIdx == common.Idx(0) {
|
if auxToIdx == common.Idx(0) {
|
||||||
auxToIdx = tx.ToIdx
|
auxToIdx = tx.ToIdx
|
||||||
@@ -824,7 +849,7 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error {
|
|||||||
accSender := &common.Account{
|
accSender := &common.Account{
|
||||||
TokenID: tx.TokenID,
|
TokenID: tx.TokenID,
|
||||||
Nonce: 0,
|
Nonce: 0,
|
||||||
Balance: tx.LoadAmount,
|
Balance: tx.EffectiveLoadAmount,
|
||||||
PublicKey: tx.FromBJJ,
|
PublicKey: tx.FromBJJ,
|
||||||
EthAddr: tx.FromEthAddr,
|
EthAddr: tx.FromEthAddr,
|
||||||
}
|
}
|
||||||
@@ -833,9 +858,9 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// subtract amount to the sender
|
// subtract amount to the sender
|
||||||
accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.Amount)
|
accSender.Balance = new(big.Int).Sub(accSender.Balance, tx.EffectiveAmount)
|
||||||
// add amount to the receiver
|
// add amount to the receiver
|
||||||
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
|
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
|
||||||
|
|
||||||
// create Account of the Sender
|
// create Account of the Sender
|
||||||
p, err := s.CreateAccount(common.Idx(s.idx+1), accSender)
|
p, err := s.CreateAccount(common.Idx(s.idx+1), accSender)
|
||||||
@@ -849,7 +874,7 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error {
|
|||||||
s.zki.Sign1[s.i] = big.NewInt(1)
|
s.zki.Sign1[s.i] = big.NewInt(1)
|
||||||
}
|
}
|
||||||
s.zki.Ay1[s.i] = tx.FromBJJ.Y
|
s.zki.Ay1[s.i] = tx.FromBJJ.Y
|
||||||
s.zki.Balance1[s.i] = tx.LoadAmount
|
s.zki.Balance1[s.i] = tx.EffectiveLoadAmount
|
||||||
s.zki.EthAddr1[s.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
s.zki.EthAddr1[s.i] = common.EthAddrToBigInt(tx.FromEthAddr)
|
||||||
s.zki.Siblings1[s.i] = siblingsToZKInputFormat(p.Siblings)
|
s.zki.Siblings1[s.i] = siblingsToZKInputFormat(p.Siblings)
|
||||||
if p.IsOld0 {
|
if p.IsOld0 {
|
||||||
@@ -890,8 +915,9 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error {
|
|||||||
|
|
||||||
// It returns the ExitAccount and a boolean determining if the Exit created a
|
// It returns the ExitAccount and a boolean determining if the Exit created a
|
||||||
// new Leaf in the ExitTree.
|
// new Leaf in the ExitTree.
|
||||||
func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, collectedFees map[common.TokenID]*big.Int,
|
func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
|
||||||
exitTree *merkletree.MerkleTree, tx common.Tx) (*common.Account, bool, error) {
|
collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree,
|
||||||
|
tx common.Tx) (*common.Account, bool, error) {
|
||||||
// 0. subtract tx.Amount from current Account in StateMT
|
// 0. subtract tx.Amount from current Account in StateMT
|
||||||
// add the tx.Amount into the Account (tx.FromIdx) in the ExitMT
|
// add the tx.Amount into the Account (tx.FromIdx) in the ExitMT
|
||||||
acc, err := s.GetAccount(tx.FromIdx)
|
acc, err := s.GetAccount(tx.FromIdx)
|
||||||
@@ -971,6 +997,89 @@ func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, collecte
|
|||||||
return exitAccount, false, err
|
return exitAccount, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// computeEffectiveAmounts checks that the L1Tx data is correct
|
||||||
|
func (s *StateDB) computeEffectiveAmounts(tx *common.L1Tx) {
|
||||||
|
if !tx.UserOrigin {
|
||||||
|
// case where the L1Tx is generated by the Coordinator
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.EffectiveAmount = tx.Amount
|
||||||
|
tx.EffectiveLoadAmount = tx.LoadAmount
|
||||||
|
if tx.Type == common.TxTypeCreateAccountDeposit {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tx.ToIdx >= common.UserThreshold && tx.FromIdx == common.Idx(0) {
|
||||||
|
// CreateAccountDepositTransfer case
|
||||||
|
cmp := tx.LoadAmount.Cmp(tx.Amount)
|
||||||
|
if cmp == -1 { // LoadAmount<Amount
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
accSender, err := s.GetAccount(tx.FromIdx)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("EffectiveAmount & EffectiveLoadAmount = 0: can not get account for tx.FromIdx: %d", tx.FromIdx)
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that tx.TokenID corresponds to the Sender account TokenID
|
||||||
|
if tx.TokenID != accSender.TokenID {
|
||||||
|
log.Debugf("EffectiveAmount & EffectiveLoadAmount = 0: tx.TokenID (%d) !=sender account TokenID (%d)", tx.TokenID, accSender.TokenID)
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that Sender has enough balance
|
||||||
|
bal := accSender.Balance
|
||||||
|
if tx.LoadAmount != nil {
|
||||||
|
bal = new(big.Int).Add(bal, tx.EffectiveLoadAmount)
|
||||||
|
}
|
||||||
|
cmp := bal.Cmp(tx.Amount)
|
||||||
|
if cmp == -1 {
|
||||||
|
log.Debugf("EffectiveAmount = 0: Not enough funds (%s<%s)", bal.String(), tx.Amount.String())
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that the tx.FromEthAddr is the same than the EthAddress of the
|
||||||
|
// Sender
|
||||||
|
if !bytes.Equal(tx.FromEthAddr.Bytes(), accSender.EthAddr.Bytes()) {
|
||||||
|
log.Debugf("EffectiveAmount & EffectiveLoadAmount = 0: tx.FromEthAddr (%s) must be the same EthAddr of the sender account by the Idx (%s)", tx.FromEthAddr.Hex(), accSender.EthAddr.Hex())
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tx.ToIdx == common.Idx(1) || tx.ToIdx == common.Idx(0) {
|
||||||
|
// if transfer is Exit type, there are no more checks
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that TokenID is the same for Sender & Receiver account
|
||||||
|
accReceiver, err := s.GetAccount(tx.ToIdx)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("EffectiveAmount & EffectiveLoadAmount = 0: can not get account for tx.ToIdx: %d", tx.ToIdx)
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if accSender.TokenID != accReceiver.TokenID {
|
||||||
|
log.Debugf("EffectiveAmount & EffectiveLoadAmount = 0: sender account TokenID (%d) != receiver account TokenID (%d)", tx.TokenID, accSender.TokenID)
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// getIdx returns the stored Idx from the localStateDB, which is the last Idx
|
// getIdx returns the stored Idx from the localStateDB, which is the last Idx
|
||||||
// used for an Account in the localStateDB.
|
// used for an Account in the localStateDB.
|
||||||
func (s *StateDB) getIdx() (common.Idx, error) {
|
func (s *StateDB) getIdx() (common.Idx, error) {
|
||||||
|
|||||||
@@ -24,6 +24,116 @@ func checkBalance(t *testing.T, tc *til.Context, sdb *StateDB, username string,
|
|||||||
assert.Equal(t, expected, acc.Balance.String())
|
assert.Equal(t, expected, acc.Balance.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCheckL1TxInvalidData(t *testing.T) {
|
||||||
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
|
require.Nil(t, err)
|
||||||
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
AddToken(1)
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) A: 10
|
||||||
|
CreateAccountDeposit(0) B: 10
|
||||||
|
CreateAccountDeposit(1) C: 10
|
||||||
|
> batchL1
|
||||||
|
> batchL1
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
ptc := ProcessTxsConfig{
|
||||||
|
NLevels: 32,
|
||||||
|
MaxFeeTx: 64,
|
||||||
|
MaxTx: 512,
|
||||||
|
MaxL1Tx: 16,
|
||||||
|
}
|
||||||
|
_, err = sdb.ProcessTxs(ptc, nil, blocks[0].Rollup.L1UserTxs, nil, nil)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
tx := common.L1Tx{
|
||||||
|
FromIdx: 256,
|
||||||
|
ToIdx: 257,
|
||||||
|
Amount: big.NewInt(10),
|
||||||
|
LoadAmount: big.NewInt(0),
|
||||||
|
FromEthAddr: tc.Users["A"].Addr,
|
||||||
|
UserOrigin: true,
|
||||||
|
}
|
||||||
|
sdb.computeEffectiveAmounts(&tx)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount)
|
||||||
|
assert.Equal(t, big.NewInt(10), tx.EffectiveAmount)
|
||||||
|
|
||||||
|
// expect error due not enough funds
|
||||||
|
tx = common.L1Tx{
|
||||||
|
FromIdx: 256,
|
||||||
|
ToIdx: 257,
|
||||||
|
Amount: big.NewInt(11),
|
||||||
|
LoadAmount: big.NewInt(0),
|
||||||
|
FromEthAddr: tc.Users["A"].Addr,
|
||||||
|
UserOrigin: true,
|
||||||
|
}
|
||||||
|
sdb.computeEffectiveAmounts(&tx)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||||
|
|
||||||
|
// expect no-error due not enough funds in a
|
||||||
|
// CreateAccountDepositTransfer transction
|
||||||
|
tx = common.L1Tx{
|
||||||
|
FromIdx: 0,
|
||||||
|
ToIdx: 257,
|
||||||
|
Amount: big.NewInt(10),
|
||||||
|
LoadAmount: big.NewInt(10),
|
||||||
|
UserOrigin: true,
|
||||||
|
}
|
||||||
|
sdb.computeEffectiveAmounts(&tx)
|
||||||
|
assert.Equal(t, big.NewInt(10), tx.EffectiveLoadAmount)
|
||||||
|
assert.Equal(t, big.NewInt(10), tx.EffectiveAmount)
|
||||||
|
|
||||||
|
// expect error due not enough funds in a CreateAccountDepositTransfer
|
||||||
|
// transction
|
||||||
|
tx = common.L1Tx{
|
||||||
|
FromIdx: 0,
|
||||||
|
ToIdx: 257,
|
||||||
|
Amount: big.NewInt(11),
|
||||||
|
LoadAmount: big.NewInt(10),
|
||||||
|
UserOrigin: true,
|
||||||
|
}
|
||||||
|
sdb.computeEffectiveAmounts(&tx)
|
||||||
|
assert.Equal(t, big.NewInt(10), tx.EffectiveLoadAmount)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||||
|
|
||||||
|
// expect error due not same TokenID
|
||||||
|
tx = common.L1Tx{
|
||||||
|
FromIdx: 256,
|
||||||
|
ToIdx: 258,
|
||||||
|
Amount: big.NewInt(5),
|
||||||
|
LoadAmount: big.NewInt(0),
|
||||||
|
FromEthAddr: tc.Users["A"].Addr,
|
||||||
|
UserOrigin: true,
|
||||||
|
}
|
||||||
|
sdb.computeEffectiveAmounts(&tx)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||||
|
|
||||||
|
// expect error due not same EthAddr
|
||||||
|
tx = common.L1Tx{
|
||||||
|
FromIdx: 256,
|
||||||
|
ToIdx: 257,
|
||||||
|
Amount: big.NewInt(8),
|
||||||
|
LoadAmount: big.NewInt(0),
|
||||||
|
FromEthAddr: tc.Users["B"].Addr,
|
||||||
|
UserOrigin: true,
|
||||||
|
}
|
||||||
|
sdb.computeEffectiveAmounts(&tx)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveLoadAmount)
|
||||||
|
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessTxsBalances(t *testing.T) {
|
func TestProcessTxsBalances(t *testing.T) {
|
||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
@@ -139,7 +249,7 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
// use Set of PoolL2 txs
|
// use Set of PoolL2 txs
|
||||||
poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow0)
|
poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow1)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
|
_, err = sdb.ProcessTxs(ptc, coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
|
||||||
@@ -452,6 +562,7 @@ func TestProcessTxsRootTestVectors(t *testing.T) {
|
|||||||
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
||||||
ToIdx: 0,
|
ToIdx: 0,
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
|
UserOrigin: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
l2Txs := []common.PoolL2Tx{
|
l2Txs := []common.PoolL2Tx{
|
||||||
@@ -499,6 +610,7 @@ func TestCircomTest(t *testing.T) {
|
|||||||
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
||||||
ToIdx: 0,
|
ToIdx: 0,
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
|
UserOrigin: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
l2Txs := []common.PoolL2Tx{
|
l2Txs := []common.PoolL2Tx{
|
||||||
@@ -565,7 +677,6 @@ func TestZKInputsHashTestVector0(t *testing.T) {
|
|||||||
l1Txs := []common.L1Tx{
|
l1Txs := []common.L1Tx{
|
||||||
{
|
{
|
||||||
FromIdx: 0,
|
FromIdx: 0,
|
||||||
// LoadAmount: big.NewInt(10400),
|
|
||||||
LoadAmount: big.NewInt(16000000),
|
LoadAmount: big.NewInt(16000000),
|
||||||
Amount: big.NewInt(0),
|
Amount: big.NewInt(0),
|
||||||
TokenID: 1,
|
TokenID: 1,
|
||||||
@@ -573,6 +684,7 @@ func TestZKInputsHashTestVector0(t *testing.T) {
|
|||||||
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
||||||
ToIdx: 0,
|
ToIdx: 0,
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
|
UserOrigin: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
l2Txs := []common.PoolL2Tx{
|
l2Txs := []common.PoolL2Tx{
|
||||||
@@ -593,6 +705,10 @@ func TestZKInputsHashTestVector0(t *testing.T) {
|
|||||||
MaxTx: 32,
|
MaxTx: 32,
|
||||||
MaxL1Tx: 16,
|
MaxL1Tx: 16,
|
||||||
}
|
}
|
||||||
|
// skip first batch to do the test with BatchNum=1
|
||||||
|
_, err = sdb.ProcessTxs(ptc, nil, nil, nil, nil)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
|
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
@@ -610,7 +726,7 @@ func TestZKInputsHashTestVector0(t *testing.T) {
|
|||||||
toHash, err := ptOut.ZKInputs.ToHashGlobalData()
|
toHash, err := ptOut.ZKInputs.ToHashGlobalData()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
// value from js test vector
|
// value from js test vector
|
||||||
expectedToHash := "0000000000ff000000000100000000000000000000000000000000000000000000000000000000000000000015ba488d749f6b891d29d0bf3a72481ec812e4d4ecef2bf7a3fc64f3c010444200000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010003e87e00000100000000000000000000000000000000000000000000000000000000000000"
|
expectedToHash := "0000000000ff000000000100000000000000000000000000000000000000000000000000000000000000000015ba488d749f6b891d29d0bf3a72481ec812e4d4ecef2bf7a3fc64f3c010444200000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010003e87e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000001"
|
||||||
// checks are splitted to find the difference easier
|
// checks are splitted to find the difference easier
|
||||||
assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000])
|
assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000])
|
||||||
assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000])
|
assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000])
|
||||||
@@ -619,7 +735,7 @@ func TestZKInputsHashTestVector0(t *testing.T) {
|
|||||||
h, err := ptOut.ZKInputs.HashGlobalData()
|
h, err := ptOut.ZKInputs.HashGlobalData()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
// value from js test vector
|
// value from js test vector
|
||||||
assert.Equal(t, "80757288244566854497474223360206077562032050734432637237701187686677568506", h.String())
|
assert.Equal(t, "4356692423721763303547321618014315464040324829724049399065961225345730555597", h.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestZKInputsHashTestVector1(t *testing.T) {
|
func TestZKInputsHashTestVector1(t *testing.T) {
|
||||||
@@ -646,6 +762,7 @@ func TestZKInputsHashTestVector1(t *testing.T) {
|
|||||||
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
||||||
ToIdx: 0,
|
ToIdx: 0,
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
|
UserOrigin: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
FromIdx: 0,
|
FromIdx: 0,
|
||||||
@@ -656,6 +773,7 @@ func TestZKInputsHashTestVector1(t *testing.T) {
|
|||||||
FromEthAddr: ethCommon.HexToAddress("0x2b5ad5c4795c026514f8317c7a215e218dccd6cf"),
|
FromEthAddr: ethCommon.HexToAddress("0x2b5ad5c4795c026514f8317c7a215e218dccd6cf"),
|
||||||
ToIdx: 0,
|
ToIdx: 0,
|
||||||
Type: common.TxTypeCreateAccountDeposit,
|
Type: common.TxTypeCreateAccountDeposit,
|
||||||
|
UserOrigin: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
l2Txs := []common.PoolL2Tx{
|
l2Txs := []common.PoolL2Tx{
|
||||||
@@ -676,6 +794,10 @@ func TestZKInputsHashTestVector1(t *testing.T) {
|
|||||||
MaxTx: 32,
|
MaxTx: 32,
|
||||||
MaxL1Tx: 16,
|
MaxL1Tx: 16,
|
||||||
}
|
}
|
||||||
|
// skip first batch to do the test with BatchNum=1
|
||||||
|
_, err = sdb.ProcessTxs(ptc, nil, nil, nil, nil)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
|
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
@@ -697,7 +819,7 @@ func TestZKInputsHashTestVector1(t *testing.T) {
|
|||||||
toHash, err := ptOut.ZKInputs.ToHashGlobalData()
|
toHash, err := ptOut.ZKInputs.ToHashGlobalData()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
// value from js test vector
|
// value from js test vector
|
||||||
expectedToHash := "0000000000ff0000000001010000000000000000000000000000000000000000000000000000000000000000304a3f3aef4f416cca887aab7265227449077627138345c2eb25bf8ff946b09500000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00000000000028a00000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000010003e88900000101000000000000000000000000000000000000000000000000000000000000"
|
expectedToHash := "0000000000ff0000000001010000000000000000000000000000000000000000000000000000000000000000304a3f3aef4f416cca887aab7265227449077627138345c2eb25bf8ff946b09500000000000000000000000000000000000000000000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700000000000028a00000000000010000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00000000000028a000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000010003e889000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000000001"
|
||||||
// checks are splitted to find the difference easier
|
// checks are splitted to find the difference easier
|
||||||
assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000])
|
assert.Equal(t, expectedToHash[:1000], hex.EncodeToString(toHash)[:1000])
|
||||||
assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000])
|
assert.Equal(t, expectedToHash[1000:2000], hex.EncodeToString(toHash)[1000:2000])
|
||||||
@@ -706,5 +828,5 @@ func TestZKInputsHashTestVector1(t *testing.T) {
|
|||||||
h, err := ptOut.ZKInputs.HashGlobalData()
|
h, err := ptOut.ZKInputs.HashGlobalData()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
// value from js test vector
|
// value from js test vector
|
||||||
assert.Equal(t, "10900521462378877053056992084240080637954406133884857263674494661625916419481", h.String())
|
assert.Equal(t, "20293112365009290386650039345314592436395562810005523677125576447132206192598", h.String())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs) (tx *types.T
|
|||||||
var l2DataBytes []byte
|
var l2DataBytes []byte
|
||||||
for i := 0; i < len(args.L2TxsData); i++ {
|
for i := 0; i < len(args.L2TxsData); i++ {
|
||||||
l2 := args.L2TxsData[i]
|
l2 := args.L2TxsData[i]
|
||||||
bytesl2, err := l2.Bytes(uint32(nLevels))
|
bytesl2, err := l2.BytesDataAvailability(uint32(nLevels))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -80,9 +80,14 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
|
|||||||
assert.Equal(t, len(block.Rollup.L1UserTxs), len(syncBlock.Rollup.L1UserTxs))
|
assert.Equal(t, len(block.Rollup.L1UserTxs), len(syncBlock.Rollup.L1UserTxs))
|
||||||
dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs()
|
dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
// Ignore BatchNum in syncBlock.L1UserTxs because this value is set by the HistoryDB
|
// Ignore BatchNum in syncBlock.L1UserTxs because this value is set by
|
||||||
|
// the HistoryDB. Also ignore EffectiveAmount & EffectiveLoadAmount
|
||||||
|
// because this value is set by StateDB.ProcessTxs.
|
||||||
for i := range syncBlock.Rollup.L1UserTxs {
|
for i := range syncBlock.Rollup.L1UserTxs {
|
||||||
syncBlock.Rollup.L1UserTxs[i].BatchNum = block.Rollup.L1UserTxs[i].BatchNum
|
syncBlock.Rollup.L1UserTxs[i].BatchNum = block.Rollup.L1UserTxs[i].BatchNum
|
||||||
|
syncBlock.Rollup.L1UserTxs[i].EffectiveAmount = block.Rollup.L1UserTxs[i].EffectiveAmount
|
||||||
|
syncBlock.Rollup.L1UserTxs[i].EffectiveLoadAmount =
|
||||||
|
block.Rollup.L1UserTxs[i].EffectiveLoadAmount
|
||||||
}
|
}
|
||||||
assert.Equal(t, block.Rollup.L1UserTxs, syncBlock.Rollup.L1UserTxs)
|
assert.Equal(t, block.Rollup.L1UserTxs, syncBlock.Rollup.L1UserTxs)
|
||||||
for _, tx := range block.Rollup.L1UserTxs {
|
for _, tx := range block.Rollup.L1UserTxs {
|
||||||
@@ -134,6 +139,7 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
|
|||||||
assert.Equal(t, batch.L2Txs, syncBatch.L2Txs)
|
assert.Equal(t, batch.L2Txs, syncBatch.L2Txs)
|
||||||
// In exit tree, we only check AccountIdx and Balance, because
|
// In exit tree, we only check AccountIdx and Balance, because
|
||||||
// it's what we have precomputed before.
|
// it's what we have precomputed before.
|
||||||
|
require.Equal(t, len(batch.ExitTree), len(syncBatch.ExitTree))
|
||||||
for j := range batch.ExitTree {
|
for j := range batch.ExitTree {
|
||||||
exit := &batch.ExitTree[j]
|
exit := &batch.ExitTree[j]
|
||||||
assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx)
|
assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx)
|
||||||
@@ -384,6 +390,7 @@ func TestSync(t *testing.T) {
|
|||||||
> block // blockNum=2
|
> block // blockNum=2
|
||||||
|
|
||||||
CreateAccountDepositTransfer(1) E-A: 1000, 200 // Idx=256+7=263
|
CreateAccountDepositTransfer(1) E-A: 1000, 200 // Idx=256+7=263
|
||||||
|
ForceTransfer(1) C-B: 80
|
||||||
ForceExit(1) A: 100
|
ForceExit(1) A: 100
|
||||||
ForceExit(1) B: 80
|
ForceExit(1) B: 80
|
||||||
ForceTransfer(1) A-D: 100
|
ForceTransfer(1) A-D: 100
|
||||||
@@ -415,7 +422,7 @@ func TestSync(t *testing.T) {
|
|||||||
// blocks 1 (blockNum=3)
|
// blocks 1 (blockNum=3)
|
||||||
i = 1
|
i = 1
|
||||||
require.Equal(t, 3, int(blocks[i].Block.Num))
|
require.Equal(t, 3, int(blocks[i].Block.Num))
|
||||||
require.Equal(t, 4, len(blocks[i].Rollup.L1UserTxs))
|
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
|
||||||
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
require.Equal(t, 2, len(blocks[i].Rollup.Batches))
|
||||||
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
|
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
|
||||||
|
|
||||||
|
|||||||
@@ -333,3 +333,23 @@ PoolTransferToEthAddr(1) A-B: 100 (126)
|
|||||||
// D(0): 360
|
// D(0): 360
|
||||||
// F(0): 100
|
// F(0): 100
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// SetPoolL2MinimumFlow1 contains the same transactions than the
|
||||||
|
// SetPoolL2MinimumFlow0, but simulating coming from the smart contract
|
||||||
|
// (always with the parameter ToIdx filled)
|
||||||
|
var SetPoolL2MinimumFlow1 = `
|
||||||
|
Type: PoolL2
|
||||||
|
|
||||||
|
PoolTransfer(0) A-B: 100 (126)
|
||||||
|
PoolTransfer(0) D-F: 100 (126)
|
||||||
|
PoolExit(0) A: 100 (126)
|
||||||
|
PoolTransfer(1) A-B: 100 (126)
|
||||||
|
|
||||||
|
// Expected balances:
|
||||||
|
// Coord(0): 105, Coord(1): 40
|
||||||
|
// A(0): 510, A(1): 170
|
||||||
|
// B(0): 480, B(1): 190
|
||||||
|
// C(0): 845, C(1): 100
|
||||||
|
// D(0): 360
|
||||||
|
// F(0): 100
|
||||||
|
`
|
||||||
|
|||||||
@@ -207,7 +207,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) {
|
|||||||
toIdxName: inst.to,
|
toIdxName: inst.to,
|
||||||
L1Tx: tx,
|
L1Tx: tx,
|
||||||
}
|
}
|
||||||
if err := tc.addToL1Queue(testTx); err != nil {
|
if err := tc.addToL1UserQueue(testTx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeDeposit, common.TxTypeDepositTransfer: // tx source: L1UserTx
|
case common.TxTypeDeposit, common.TxTypeDepositTransfer: // tx source: L1UserTx
|
||||||
@@ -234,7 +234,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) {
|
|||||||
toIdxName: inst.to,
|
toIdxName: inst.to,
|
||||||
L1Tx: tx,
|
L1Tx: tx,
|
||||||
}
|
}
|
||||||
if err := tc.addToL1Queue(testTx); err != nil {
|
if err := tc.addToL1UserQueue(testTx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeTransfer: // L2Tx
|
case common.TxTypeTransfer: // L2Tx
|
||||||
@@ -274,7 +274,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) {
|
|||||||
toIdxName: inst.to,
|
toIdxName: inst.to,
|
||||||
L1Tx: tx,
|
L1Tx: tx,
|
||||||
}
|
}
|
||||||
if err := tc.addToL1Queue(testTx); err != nil {
|
if err := tc.addToL1UserQueue(testTx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case common.TxTypeExit: // tx source: L2Tx
|
case common.TxTypeExit: // tx source: L2Tx
|
||||||
@@ -316,7 +316,7 @@ func (tc *Context) GenerateBlocks(set string) ([]common.BlockData, error) {
|
|||||||
toIdxName: inst.to,
|
toIdxName: inst.to,
|
||||||
L1Tx: tx,
|
L1Tx: tx,
|
||||||
}
|
}
|
||||||
if err := tc.addToL1Queue(testTx); err != nil {
|
if err := tc.addToL1UserQueue(testTx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case typeNewBatch:
|
case typeNewBatch:
|
||||||
@@ -449,8 +449,8 @@ func (tc *Context) setIdxs() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addToL1Queue adds the L1Tx into the queue that is open and has space
|
// addToL1UserQueue adds the L1UserTx into the queue that is open and has space
|
||||||
func (tc *Context) addToL1Queue(tx L1Tx) error {
|
func (tc *Context) addToL1UserQueue(tx L1Tx) error {
|
||||||
if len(tc.Queues[tc.openToForge]) >= tc.rollupConstMaxL1UserTx {
|
if len(tc.Queues[tc.openToForge]) >= tc.rollupConstMaxL1UserTx {
|
||||||
// if current OpenToForge queue reached its Max, move into a
|
// if current OpenToForge queue reached its Max, move into a
|
||||||
// new queue
|
// new queue
|
||||||
@@ -698,6 +698,8 @@ func (tc *Context) FillBlocksL1UserTxsBatchNum(blocks []common.BlockData) {
|
|||||||
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].BatchNum
|
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].BatchNum
|
||||||
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].EthBlockNum
|
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].EthBlockNum
|
||||||
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].Position
|
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].Position
|
||||||
|
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].EffectiveAmount
|
||||||
|
// - blocks[].Rollup.Batch.L1CoordinatorTxs[].EffectiveLoadAmount
|
||||||
// - blocks[].Rollup.Batch.L2Txs[].TxID
|
// - blocks[].Rollup.Batch.L2Txs[].TxID
|
||||||
// - blocks[].Rollup.Batch.L2Txs[].Position
|
// - blocks[].Rollup.Batch.L2Txs[].Position
|
||||||
// - blocks[].Rollup.Batch.L2Txs[].Nonce
|
// - blocks[].Rollup.Batch.L2Txs[].Nonce
|
||||||
@@ -779,6 +781,8 @@ func (tc *Context) FillBlocksExtra(blocks []common.BlockData, cfg *ConfigExtra)
|
|||||||
tx := &batch.L1CoordinatorTxs[k]
|
tx := &batch.L1CoordinatorTxs[k]
|
||||||
tx.Position = position
|
tx.Position = position
|
||||||
position++
|
position++
|
||||||
|
tx.EffectiveAmount = big.NewInt(0)
|
||||||
|
tx.EffectiveLoadAmount = big.NewInt(0)
|
||||||
nTx, err := common.NewL1Tx(tx)
|
nTx, err := common.NewL1Tx(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
Reference in New Issue
Block a user