Browse Source

Add methods for ZKInputs IntermStates generation

- Add L1Tx TxCompressedData method
- Add PoolL2Tx TxCompressedDataV2 method
- Update ProcessTxs logic
- Add ZKInputs Intermediate States & Fee parameters calculation
feature/sql-semaphore1
arnaucube 4 years ago
parent
commit
d3a38a3ee1
10 changed files with 388 additions and 76 deletions
  1. +10
    -10
      common/fee.go
  2. +37
    -0
      common/l1tx.go
  3. +16
    -0
      common/l1tx_test.go
  4. +52
    -1
      common/pooll2tx.go
  5. +49
    -6
      common/pooll2tx_test.go
  6. +5
    -0
      common/token.go
  7. +12
    -7
      common/zk.go
  8. +117
    -52
      db/statedb/txprocessors.go
  9. +74
    -0
      db/statedb/txprocessors_test.go
  10. +16
    -0
      db/statedb/utils.go

+ 10
- 10
common/fee.go

@ -6,6 +6,16 @@ import (
"math/big" "math/big"
) )
// MaxFeePlan is the maximum value of the FeePlan
const MaxFeePlan = 256
// FeePlan represents the fee model, a position in the array indicates the
// percentage of tokens paid in concept of fee for a transaction
var FeePlan = [MaxFeePlan]float64{}
// FeeFactorLsh60 is the feeFactor << 60
var FeeFactorLsh60 [256]*big.Int
// RecommendedFee is the recommended fee to pay in USD per transaction set by // RecommendedFee is the recommended fee to pay in USD per transaction set by
// the coordinator according to the tx type (if the tx requires to create an // the coordinator according to the tx type (if the tx requires to create an
// account and register, only register or he account already esists) // account and register, only register or he account already esists)
@ -31,13 +41,6 @@ func (f FeeSelector) Percentage() float64 {
} }
} }
// MaxFeePlan is the maximum value of the FeePlan
const MaxFeePlan = 256
// FeePlan represents the fee model, a position in the array indicates the
// percentage of tokens paid in concept of fee for a transaction
var FeePlan = [MaxFeePlan]float64{}
// CalcFeeAmount calculates the fee amount in tokens from an amount and // CalcFeeAmount calculates the fee amount in tokens from an amount and
// feeSelector (fee index). // feeSelector (fee index).
func CalcFeeAmount(amount *big.Int, feeSel FeeSelector) (*big.Int, error) { func CalcFeeAmount(amount *big.Int, feeSel FeeSelector) (*big.Int, error) {
@ -55,9 +58,6 @@ func init() {
setFeeFactorLsh60(&FeeFactorLsh60) setFeeFactorLsh60(&FeeFactorLsh60)
} }
// FeeFactorLsh60 is the feeFactor << 60
var FeeFactorLsh60 [256]*big.Int
func setFeeFactorLsh60(feeFactorLsh60 *[256]*big.Int) { func setFeeFactorLsh60(feeFactorLsh60 *[256]*big.Int) {
feeFactorLsh60[0], _ = new(big.Int).SetString("0", 10) feeFactorLsh60[0], _ = new(big.Int).SetString("0", 10)
feeFactorLsh60[1], _ = new(big.Int).SetString("3", 10) feeFactorLsh60[1], _ = new(big.Int).SetString("3", 10)

+ 37
- 0
common/l1tx.go

@ -146,6 +146,43 @@ func (tx L1Tx) Tx() Tx {
return genericTx return genericTx
} }
// TxCompressedData spec:
// [ 1 bits ] empty (toBJJSign) // 1 byte
// [ 8 bits ] empty (userFee) // 1 byte
// [ 40 bits ] empty (nonce) // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData() (*big.Int, error) {
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, err
}
var b [31]byte
// b[0:7] empty: no fee neither nonce
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, err
}
copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, err
}
copy(b[19:25], fromIdxBytes[:])
copy(b[25:27], []byte{0, 1}) // TODO this will be generated by the ChainID config parameter
// b[27:] empty: no signature
bi := new(big.Int).SetBytes(b[:])
return bi, nil
}
// BytesGeneric returns the generic representation of a L1Tx. This method is // BytesGeneric returns the generic representation of a L1Tx. This method is
// used to compute the []byte representation of a L1UserTx, and also to compute // used to compute the []byte representation of a L1UserTx, and also to compute
// the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method // the L1TxData for the ZKInputs (at the HashGlobalInputs), using this method

+ 16
- 0
common/l1tx_test.go

@ -49,6 +49,22 @@ func TestNewL1CoordinatorTx(t *testing.T) {
assert.Equal(t, "0x01000000000000cafe005800", l1Tx.TxID.String()) assert.Equal(t, "0x01000000000000cafe005800", l1Tx.TxID.String())
} }
func TestL1TxCompressedData(t *testing.T) {
tx := L1Tx{
FromIdx: 2,
ToIdx: 3,
Amount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err := tx.TxCompressedData()
assert.Nil(t, err)
// test vector value generated from javascript implementation
expectedStr := "7307597389635308713748674793997299267460566876160"
assert.Equal(t, expectedStr, txCompressedData.String())
assert.Equal(t, "050004000000000003000000000002000100000000", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestL1userTxByteParsers(t *testing.T) { func TestL1userTxByteParsers(t *testing.T) {
var pkComp babyjub.PublicKeyComp var pkComp babyjub.PublicKeyComp
pkCompL := []byte("0x56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c") pkCompL := []byte("0x56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")

+ 52
- 1
common/pooll2tx.go

@ -140,13 +140,64 @@ func (tx *PoolL2Tx) TxCompressedData() (*big.Int, error) {
return nil, err return nil, err
} }
copy(b[19:25], fromIdxBytes[:]) copy(b[19:25], fromIdxBytes[:])
copy(b[25:27], []byte{0, 1, 0, 0}) // TODO check js implementation (unexpected behaviour from test vector generated from js)
copy(b[25:27], []byte{0, 1}) // TODO this will be generated by the ChainID config parameter
copy(b[27:31], sc.Bytes()) copy(b[27:31], sc.Bytes())
bi := new(big.Int).SetBytes(b[:]) bi := new(big.Int).SetBytes(b[:])
return bi, nil return bi, nil
} }
// TxCompressedDataV2 spec:
// [ 1 bits ] toBJJSign // 1 byte
// [ 8 bits ] userFee // 1 byte
// [ 40 bits ] nonce // 5 bytes
// [ 32 bits ] tokenID // 4 bytes
// [ 16 bits ] amountFloat16 // 2 bytes
// [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil {
tx.Amount = big.NewInt(0)
}
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, err
}
var b [25]byte
toBJJSign := byte(0)
if tx.ToBJJ != nil && babyjub.PointCoordSign(tx.ToBJJ.X) {
toBJJSign = byte(1)
}
b[0] = toBJJSign
b[1] = byte(tx.Fee)
nonceBytes, err := tx.Nonce.Bytes()
if err != nil {
return nil, err
}
copy(b[2:7], nonceBytes[:])
copy(b[7:11], tx.TokenID.Bytes())
copy(b[11:13], amountFloat16.Bytes())
toIdxBytes, err := tx.ToIdx.Bytes()
if err != nil {
return nil, err
}
copy(b[13:19], toIdxBytes[:])
fromIdxBytes, err := tx.FromIdx.Bytes()
if err != nil {
return nil, err
}
copy(b[19:25], fromIdxBytes[:])
bi := new(big.Int).SetBytes(b[:])
return bi, nil
}
// RqTxCompressedDataV2 is like the TxCompressedDataV2 but using the 'Rq'
// parameters. In a future iteration of the hermez-node, the 'Rq' parameters
// can be inside a struct, which contains the 'Rq' transaction grouped inside,
// so then computing the 'RqTxCompressedDataV2' would be just calling
// 'tx.Rq.TxCompressedDataV2()'.
// RqTxCompressedDataV2 spec: // RqTxCompressedDataV2 spec:
// [ 1 bits ] rqToBJJSign // 1 byte // [ 1 bits ] rqToBJJSign // 1 byte
// [ 8 bits ] rqUserFee // 1 byte // [ 8 bits ] rqUserFee // 1 byte

+ 49
- 6
common/pooll2tx_test.go

@ -40,10 +40,7 @@ func TestTxCompressedData(t *testing.T) {
// test vector value generated from javascript implementation // test vector value generated from javascript implementation
expectedStr := "1766847064778421992193717128424891165872736891548909569553540449389241871" expectedStr := "1766847064778421992193717128424891165872736891548909569553540449389241871"
assert.Equal(t, expectedStr, txCompressedData.String()) assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "10000000000060000000500040000000000030000000000020001c60be60f", hex.EncodeToString(txCompressedData.Bytes())[1:])
assert.Equal(t, "010000000000060000000500040000000000030000000000020001c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
tx = PoolL2Tx{ tx = PoolL2Tx{
RqFromIdx: 7, RqFromIdx: 7,
RqToIdx: 8, RqToIdx: 8,
@ -58,10 +55,56 @@ func TestTxCompressedData(t *testing.T) {
// test vector value generated from javascript implementation // test vector value generated from javascript implementation
expectedStr = "6571340879233176732837827812956721483162819083004853354503" expectedStr = "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String()) assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok = new(big.Int).SetString(expectedStr, 10)
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.Nil(t, err)
tx := PoolL2Tx{
FromIdx: 7,
ToIdx: 8,
Amount: big.NewInt(9),
TokenID: 10,
Nonce: 11,
Fee: 12,
ToBJJ: sk.Public(),
}
txCompressedData, err := tx.TxCompressedDataV2()
assert.Nil(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
}
func TestRqTxCompressedDataV2(t *testing.T) {
var sk babyjub.PrivateKey
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
assert.Nil(t, err)
tx := PoolL2Tx{
RqFromIdx: 7,
RqToIdx: 8,
RqAmount: big.NewInt(9),
RqTokenID: 10,
RqNonce: 11,
RqFee: 12,
RqToBJJ: sk.Public(),
}
txCompressedData, err := tx.RqTxCompressedDataV2()
assert.Nil(t, err)
// test vector value generated from javascript implementation
expectedStr := "6571340879233176732837827812956721483162819083004853354503"
assert.Equal(t, expectedStr, txCompressedData.String())
expected, ok := new(big.Int).SetString(expectedStr, 10)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, expected.Bytes(), txCompressedData.Bytes()) assert.Equal(t, expected.Bytes(), txCompressedData.Bytes())
assert.Equal(t, "10c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes())[1:])
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
} }
func TestHashToSign(t *testing.T) { func TestHashToSign(t *testing.T) {

+ 5
- 0
common/token.go

@ -52,3 +52,8 @@ func TokenIDFromBytes(b []byte) (TokenID, error) {
tid := binary.BigEndian.Uint32(b[:4]) tid := binary.BigEndian.Uint32(b[:4])
return TokenID(tid), nil return TokenID(tid), nil
} }
// TokenIDFromBigInt returns a TokenID with the value of the given *big.Int
func TokenIDFromBigInt(b *big.Int) TokenID {
return TokenID(b.Int64())
}

+ 12
- 7
common/zk.go

@ -43,7 +43,7 @@ type ZKMetadata struct {
// ZKInputs represents the inputs that will be used to generate the zkSNARK proof // ZKInputs represents the inputs that will be used to generate the zkSNARK proof
type ZKInputs struct { type ZKInputs struct {
Metadata ZKMetadata
Metadata ZKMetadata `json:"-"`
// //
// General // General
@ -183,23 +183,28 @@ type ZKInputs struct {
// will not be used. // will not be used.
// decode-tx // decode-tx
// ISOnChain indicates if tx is L1 (true) or L2 (false)
// ISOnChain indicates if tx is L1 (true (1)) or L2 (false (0))
ISOnChain []*big.Int `json:"imOnChain"` // bool, len: [nTx - 1] ISOnChain []*big.Int `json:"imOnChain"` // bool, len: [nTx - 1]
// ISOutIdx current index account for each Tx // ISOutIdx current index account for each Tx
// Contains the index of the created account in case that the tx is of
// account creation type.
ISOutIdx []*big.Int `json:"imOutIdx"` // uint64 (max nLevels bits), len: [nTx - 1] ISOutIdx []*big.Int `json:"imOutIdx"` // uint64 (max nLevels bits), len: [nTx - 1]
// rollup-tx // rollup-tx
// ISStateRoot root at the moment of the Tx, the state root value once the Tx is processed into the state tree
// ISStateRoot root at the moment of the Tx (once processed), the state root value once the Tx is processed into the state tree
ISStateRoot []*big.Int `json:"imStateRoot"` // Hash, len: [nTx - 1] ISStateRoot []*big.Int `json:"imStateRoot"` // Hash, len: [nTx - 1]
// ISExitTree root at the moment of the Tx the value once the Tx is processed into the exit tree
// ISExitTree root at the moment (once processed) of the Tx the value
// once the Tx is processed into the exit tree
ISExitRoot []*big.Int `json:"imExitRoot"` // Hash, len: [nTx - 1] ISExitRoot []*big.Int `json:"imExitRoot"` // Hash, len: [nTx - 1]
// ISAccFeeOut accumulated fees once the Tx is processed
// ISAccFeeOut accumulated fees once the Tx is processed. Contains the
// array of FeeAccount Balances at each moment of each Tx processed.
ISAccFeeOut [][]*big.Int `json:"imAccFeeOut"` // big.Int, len: [nTx - 1][maxFeeIdxs] ISAccFeeOut [][]*big.Int `json:"imAccFeeOut"` // big.Int, len: [nTx - 1][maxFeeIdxs]
// fee-tx // fee-tx
// ISStateRootFee root at the moment of the Tx, the state root value once the Tx is processed into the state tree
// ISStateRootFee root at the moment of the Tx (once processed), the state root value once the Tx is processed into the state tree
ISStateRootFee []*big.Int `json:"imStateRootFee"` // Hash, len: [maxFeeIdxs - 1] ISStateRootFee []*big.Int `json:"imStateRootFee"` // Hash, len: [maxFeeIdxs - 1]
// ISInitStateRootFee state root once all L1-L2 tx are processed (before computing the fees-tx) // ISInitStateRootFee state root once all L1-L2 tx are processed (before computing the fees-tx)
ISInitStateRootFee *big.Int `json:"imInitStateRootFee"` // Hash ISInitStateRootFee *big.Int `json:"imInitStateRootFee"` // Hash
// ISFinalAccFee final accumulated fees (before computing the fees-tx)
// ISFinalAccFee final accumulated fees (before computing the fees-tx).
// Contains the final values of the ISAccFeeOut parameter
ISFinalAccFee []*big.Int `json:"imFinalAccFee"` // big.Int, len: [maxFeeIdxs - 1] ISFinalAccFee []*big.Int `json:"imFinalAccFee"` // big.Int, len: [maxFeeIdxs - 1]
} }

+ 117
- 52
db/statedb/txprocessors.go

@ -2,6 +2,7 @@ package statedb
import ( import (
"errors" "errors"
"fmt"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"os" "os"
@ -72,6 +73,10 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
} }
defer s.resetZKInputs() defer s.resetZKInputs()
if len(coordIdxs) >= int(ptc.MaxFeeTx) {
return nil, fmt.Errorf("CoordIdxs (%d) length must be smaller than MaxFeeTx (%d)", len(coordIdxs), ptc.MaxFeeTx)
}
s.accumulatedFees = make(map[common.Idx]*big.Int) s.accumulatedFees = make(map[common.Idx]*big.Int)
nTx := len(l1usertxs) + len(l1coordinatortxs) + len(l2txs) nTx := len(l1usertxs) + len(l1coordinatortxs) + len(l2txs)
@ -94,7 +99,7 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
} }
// TBD if ExitTree is only in memory or stored in disk, for the moment // TBD if ExitTree is only in memory or stored in disk, for the moment
// only needed in memory
// is only needed in memory
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder { if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree") tmpDir, err := ioutil.TempDir("", "hermez-statedb-exittree")
if err != nil { if err != nil {
@ -122,17 +127,6 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
if err != nil { if err != nil {
return nil, err return nil, err
} }
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
if exitIdx != nil && exitTree != nil {
exits[s.i] = processedExit{
exit: true,
newExit: newExit,
idx: *exitIdx,
acc: *exitAccount,
}
}
s.i++
}
if s.typ == TypeSynchronizer && createdAccount != nil { if s.typ == TypeSynchronizer && createdAccount != nil {
createdAccounts = append(createdAccounts, *createdAccount) createdAccounts = append(createdAccounts, *createdAccount)
} }
@ -143,6 +137,22 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
return nil, err return nil, err
} }
s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData) s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData)
if s.i < nTx-1 {
s.zki.ISOutIdx[s.i] = s.idx.BigInt()
s.zki.ISStateRoot[s.i] = s.mt.Root().BigInt()
}
}
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
if exitIdx != nil && exitTree != nil {
exits[s.i] = processedExit{
exit: true,
newExit: newExit,
idx: *exitIdx,
acc: *exitAccount,
}
}
s.i++
} }
} }
@ -164,6 +174,12 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
return nil, err return nil, err
} }
s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData) s.zki.Metadata.L1TxsData = append(s.zki.Metadata.L1TxsData, l1TxData)
if s.i < nTx-1 {
s.zki.ISOutIdx[s.i] = s.idx.BigInt()
s.zki.ISStateRoot[s.i] = s.mt.Root().BigInt()
}
s.i++
} }
} }
@ -179,20 +195,49 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
if err != nil { if err != nil {
return nil, err return nil, err
} }
// collectedFees will contain the amount of fee collected for each
// TokenID
var collectedFees map[common.TokenID]*big.Int var collectedFees map[common.TokenID]*big.Int
if s.typ == TypeSynchronizer {
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
collectedFees = make(map[common.TokenID]*big.Int) collectedFees = make(map[common.TokenID]*big.Int)
for tokenID := range coordIdxsMap { for tokenID := range coordIdxsMap {
collectedFees[tokenID] = big.NewInt(0) collectedFees[tokenID] = big.NewInt(0)
} }
} }
if s.zki != nil {
// get the feePlanTokens
feePlanTokens, err := s.getFeePlanTokens(coordIdxs, l2txs)
if err != nil {
log.Error(err)
return nil, err
}
copy(s.zki.FeePlanTokens, feePlanTokens)
}
// Process L2Txs // Process L2Txs
for i := 0; i < len(l2txs); i++ { for i := 0; i < len(l2txs); i++ {
exitIdx, exitAccount, newExit, err := s.processL2Tx(coordIdxsMap, collectedFees, exitTree, &l2txs[i]) exitIdx, exitAccount, newExit, err := s.processL2Tx(coordIdxsMap, collectedFees, exitTree, &l2txs[i])
if err != nil { if err != nil {
return nil, err return nil, err
} }
if s.zki != nil {
l2TxData, err := l2txs[i].L2Tx().Bytes(s.zki.Metadata.NLevels)
if err != nil {
return nil, err
}
s.zki.Metadata.L2TxsData = append(s.zki.Metadata.L2TxsData, l2TxData)
if s.i < nTx-1 {
// Intermediate States
s.zki.ISOutIdx[s.i] = s.idx.BigInt()
s.zki.ISStateRoot[s.i] = s.mt.Root().BigInt()
s.zki.ISAccFeeOut[s.i] = formatAccumulatedFees(collectedFees, s.zki.FeePlanTokens)
}
if s.i == nTx-1 {
s.zki.ISFinalAccFee = formatAccumulatedFees(collectedFees, s.zki.FeePlanTokens)
}
}
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder { if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
if exitIdx != nil && exitTree != nil { if exitIdx != nil && exitTree != nil {
exits[s.i] = processedExit{ exits[s.i] = processedExit{
@ -204,13 +249,11 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
} }
s.i++ s.i++
} }
if s.zki != nil {
l2TxData, err := l2txs[i].L2Tx().Bytes(s.zki.Metadata.NLevels)
if err != nil {
return nil, err
}
s.zki.Metadata.L2TxsData = append(s.zki.Metadata.L2TxsData, l2TxData)
}
}
if s.zki != nil {
// before computing the Fees txs, set the ISInitStateRootFee
s.zki.ISInitStateRootFee = s.mt.Root().BigInt()
} }
// distribute the AccumulatedFees from the processed L2Txs into the // distribute the AccumulatedFees from the processed L2Txs into the
@ -242,6 +285,8 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
// add Coord Idx to ZKInputs.FeeTxsData // add Coord Idx to ZKInputs.FeeTxsData
s.zki.FeeIdxs[iFee] = idx.BigInt() s.zki.FeeIdxs[iFee] = idx.BigInt()
s.zki.ISStateRootFee[iFee] = s.mt.Root().BigInt()
} }
iFee++ iFee++
} }
@ -293,6 +338,10 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
} }
s.zki.OldKey2[i] = p.OldKey.BigInt() s.zki.OldKey2[i] = p.OldKey.BigInt()
s.zki.OldValue2[i] = p.OldValue.BigInt() s.zki.OldValue2[i] = p.OldValue.BigInt()
if i < nTx-1 {
s.zki.ISExitRoot[i] = exitTree.Root().BigInt()
}
} }
} }
if s.typ == TypeSynchronizer { if s.typ == TypeSynchronizer {
@ -310,17 +359,9 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
// compute last ZKInputs parameters // compute last ZKInputs parameters
s.zki.GlobalChainID = big.NewInt(0) // TODO, 0: ethereum, this will be get from config file s.zki.GlobalChainID = big.NewInt(0) // TODO, 0: ethereum, this will be get from config file
// zki.FeeIdxs = ? // TODO, this will be get from the config file // zki.FeeIdxs = ? // TODO, this will be get from the config file
tokenIDs, err := s.getTokenIDsBigInt(l1usertxs, l1coordinatortxs, l2txs)
if err != nil {
log.Error(err)
return nil, err
}
s.zki.FeePlanTokens = tokenIDs
s.zki.Metadata.NewStateRootRaw = s.mt.Root() s.zki.Metadata.NewStateRootRaw = s.mt.Root()
s.zki.Metadata.NewExitRootRaw = exitTree.Root() s.zki.Metadata.NewExitRootRaw = exitTree.Root()
// s.zki.ISInitStateRootFee = s.mt.Root().BigInt()
// return ZKInputs as the BatchBuilder will return it to forge the Batch // return ZKInputs as the BatchBuilder will return it to forge the Batch
return &ProcessTxOutput{ return &ProcessTxOutput{
ZKInputs: s.zki, ZKInputs: s.zki,
@ -331,15 +372,22 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
}, nil }, nil
} }
// getTokenIDsBigInt returns the list of TokenIDs in *big.Int format
func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []common.L1Tx, l2txs []common.PoolL2Tx) ([]*big.Int, error) {
tokenIDs := make(map[common.TokenID]bool)
for i := 0; i < len(l1usertxs); i++ {
tokenIDs[l1usertxs[i].TokenID] = true
}
for i := 0; i < len(l1coordinatortxs); i++ {
tokenIDs[l1coordinatortxs[i].TokenID] = true
// getFeePlanTokens returns an array of *big.Int containing a list of tokenIDs
// corresponding to the given CoordIdxs and the processed L2Txs
func (s *StateDB) getFeePlanTokens(coordIdxs []common.Idx, l2txs []common.PoolL2Tx) ([]*big.Int, error) {
// get Coordinator TokenIDs corresponding to the Idxs where the Fees
// will be sent
coordTokenIDs := make(map[common.TokenID]bool)
for i := 0; i < len(coordIdxs); i++ {
acc, err := s.GetAccount(coordIdxs[i])
if err != nil {
log.Errorf("could not get account to determine TokenID of CoordIdx %d not found: %s", coordIdxs[i], err.Error())
return nil, err
}
coordTokenIDs[acc.TokenID] = true
} }
tokenIDs := make(map[common.TokenID]bool)
for i := 0; i < len(l2txs); i++ { for i := 0; i < len(l2txs); i++ {
// as L2Tx does not have parameter TokenID, get it from the // as L2Tx does not have parameter TokenID, get it from the
// AccountsDB (in the StateDB) // AccountsDB (in the StateDB)
@ -348,7 +396,9 @@ func (s *StateDB) getTokenIDsBigInt(l1usertxs, l1coordinatortxs []common.L1Tx, l
log.Errorf("could not get account to determine TokenID of L2Tx: FromIdx %d not found: %s", l2txs[i].FromIdx, err.Error()) log.Errorf("could not get account to determine TokenID of L2Tx: FromIdx %d not found: %s", l2txs[i].FromIdx, err.Error())
return nil, err return nil, err
} }
tokenIDs[acc.TokenID] = true
if _, ok := coordTokenIDs[acc.TokenID]; ok {
tokenIDs[acc.TokenID] = true
}
} }
var tBI []*big.Int var tBI []*big.Int
for t := range tokenIDs { for t := range tokenIDs {
@ -368,7 +418,12 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
// ZKInputs // ZKInputs
if s.zki != nil { if s.zki != nil {
// Txs // Txs
// s.zki.TxCompressedData[s.i] = tx.TxCompressedData() // uncomment once L1Tx.TxCompressedData is ready
var err error
s.zki.TxCompressedData[s.i], err = tx.TxCompressedData()
if err != nil {
log.Error(err)
return nil, nil, false, nil, err
}
s.zki.FromIdx[s.i] = tx.FromIdx.BigInt() s.zki.FromIdx[s.i] = tx.FromIdx.BigInt()
s.zki.ToIdx[s.i] = tx.ToIdx.BigInt() s.zki.ToIdx[s.i] = tx.ToIdx.BigInt()
s.zki.OnChain[s.i] = big.NewInt(1) s.zki.OnChain[s.i] = big.NewInt(1)
@ -408,11 +463,6 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
// TODO applyCreateAccount will return the created account, // TODO applyCreateAccount will return the created account,
// which in the case type==TypeSynchronizer will be added to an // which in the case type==TypeSynchronizer will be added to an
// array of created accounts that will be returned // array of created accounts that will be returned
if s.zki != nil {
s.zki.AuxFromIdx[s.i] = s.idx.BigInt() // last s.idx is the one used for creating the new account
s.zki.NewAccount[s.i] = big.NewInt(1)
}
case common.TxTypeDeposit: case common.TxTypeDeposit:
// update balance of the MT account // update balance of the MT account
err := s.applyDeposit(tx, false) err := s.applyDeposit(tx, false)
@ -436,11 +486,6 @@ func (s *StateDB) processL1Tx(exitTree *merkletree.MerkleTree, tx *common.L1Tx)
log.Error(err) log.Error(err)
return nil, nil, false, nil, err return nil, nil, false, nil, err
} }
if s.zki != nil {
s.zki.AuxFromIdx[s.i] = s.idx.BigInt() // last s.idx is the one used for creating the new account
s.zki.NewAccount[s.i] = big.NewInt(1)
}
case common.TxTypeForceExit: case common.TxTypeForceExit:
// execute exit flow // execute exit flow
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees // coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
@ -485,8 +530,14 @@ func (s *StateDB) processL2Tx(coordIdxsMap map[common.TokenID]common.Idx, collec
// ZKInputs // ZKInputs
if s.zki != nil { if s.zki != nil {
// Txs // Txs
// s.zki.TxCompressedData[s.i] = tx.TxCompressedData() // uncomment once L1Tx.TxCompressedData is ready
// s.zki.TxCompressedDataV2[s.i] = tx.TxCompressedDataV2() // uncomment once L2Tx.TxCompressedDataV2 is ready
s.zki.TxCompressedData[s.i], err = tx.TxCompressedData()
if err != nil {
return nil, nil, false, err
}
s.zki.TxCompressedDataV2[s.i], err = tx.TxCompressedDataV2()
if err != nil {
return nil, nil, false, err
}
s.zki.FromIdx[s.i] = tx.FromIdx.BigInt() s.zki.FromIdx[s.i] = tx.FromIdx.BigInt()
s.zki.ToIdx[s.i] = tx.ToIdx.BigInt() s.zki.ToIdx[s.i] = tx.ToIdx.BigInt()
@ -587,6 +638,14 @@ func (s *StateDB) applyCreateAccount(tx *common.L1Tx) error {
s.zki.OldValue1[s.i] = p.OldValue.BigInt() s.zki.OldValue1[s.i] = p.OldValue.BigInt()
s.zki.Metadata.NewLastIdxRaw = s.idx + 1 s.zki.Metadata.NewLastIdxRaw = s.idx + 1
s.zki.AuxFromIdx[s.i] = common.Idx(s.idx + 1).BigInt()
s.zki.NewAccount[s.i] = big.NewInt(1)
if s.i < len(s.zki.ISOnChain) { // len(s.zki.ISOnChain) == nTx
// intermediate states
s.zki.ISOnChain[s.i] = big.NewInt(1)
}
} }
s.idx = s.idx + 1 s.idx = s.idx + 1
@ -695,7 +754,7 @@ func (s *StateDB) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx, coll
accumulated := s.accumulatedFees[accCoord.Idx] accumulated := s.accumulatedFees[accCoord.Idx]
accumulated.Add(accumulated, fee) accumulated.Add(accumulated, fee)
if s.typ == TypeSynchronizer {
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
collected := collectedFees[accCoord.TokenID] collected := collectedFees[accCoord.TokenID]
collected.Add(collected, fee) collected.Add(collected, fee)
} }
@ -800,6 +859,12 @@ func (s *StateDB) applyCreateAccountDepositTransfer(tx *common.L1Tx) error {
s.zki.OldValue1[s.i] = p.OldValue.BigInt() s.zki.OldValue1[s.i] = p.OldValue.BigInt()
s.zki.Metadata.NewLastIdxRaw = s.idx + 1 s.zki.Metadata.NewLastIdxRaw = s.idx + 1
s.zki.AuxFromIdx[s.i] = common.Idx(s.idx + 1).BigInt()
s.zki.NewAccount[s.i] = big.NewInt(1)
// intermediate states
s.zki.ISOnChain[s.i] = big.NewInt(1)
} }
// update receiver account in localStateDB // update receiver account in localStateDB
@ -854,7 +919,7 @@ func (s *StateDB) applyExit(coordIdxsMap map[common.TokenID]common.Idx, collecte
accumulated := s.accumulatedFees[accCoord.Idx] accumulated := s.accumulatedFees[accCoord.Idx]
accumulated.Add(accumulated, fee) accumulated.Add(accumulated, fee)
if s.typ == TypeSynchronizer {
if s.typ == TypeSynchronizer || s.typ == TypeBatchBuilder {
collected := collectedFees[accCoord.TokenID] collected := collectedFees[accCoord.TokenID]
collected.Add(collected, fee) collected.Add(collected, fee)
} }

+ 74
- 0
db/statedb/txprocessors_test.go

@ -477,6 +477,80 @@ func TestProcessTxsRootTestVectors(t *testing.T) {
assert.Equal(t, "9827704113668630072730115158977131501210702363656902211840117643154933433410", sdb.mt.Root().BigInt().String()) assert.Equal(t, "9827704113668630072730115158977131501210702363656902211840117643154933433410", sdb.mt.Root().BigInt().String())
} }
func TestCircomTest(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
sdb, err := NewStateDB(dir, TypeBatchBuilder, 8)
assert.Nil(t, err)
// same values than in the js test
bjj0, err := common.BJJFromStringWithChecksum("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7")
assert.Nil(t, err)
l1Txs := []common.L1Tx{
{
FromIdx: 0,
// LoadAmount: big.NewInt(10400),
LoadAmount: big.NewInt(16000000),
Amount: big.NewInt(0),
TokenID: 1,
FromBJJ: bjj0,
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
ToIdx: 0,
Type: common.TxTypeCreateAccountDeposit,
},
}
l2Txs := []common.PoolL2Tx{
{
FromIdx: 256,
ToIdx: 256,
TokenID: 1,
Amount: big.NewInt(1000),
Nonce: 0,
Fee: 126,
Type: common.TxTypeTransfer,
},
}
ptc := ProcessTxsConfig{
NLevels: 8,
MaxFeeTx: 2,
MaxTx: 5,
MaxL1Tx: 2,
}
ptOut, err := sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
require.Nil(t, err)
// check expected account keys values from tx inputs
acc, err := sdb.GetAccount(common.Idx(256))
require.Nil(t, err)
assert.Equal(t, "d746824f7d0ac5044a573f51b278acb56d823bec39551d1d7bf7378b68a1b021", acc.PublicKey.Compress().String())
assert.Equal(t, "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf", acc.EthAddr.Hex())
// check that there no exist more accounts
_, err = sdb.GetAccount(common.Idx(257))
require.NotNil(t, err)
ptOut.ZKInputs.FeeIdxs[0] = common.Idx(256).BigInt()
s, err := json.Marshal(ptOut.ZKInputs)
require.Nil(t, err)
debug := false
if debug {
fmt.Println("\nCopy&Paste into js circom test:\n let zkInput = JSON.parse(`" + string(s) + "`);")
h, err := ptOut.ZKInputs.HashGlobalData()
require.Nil(t, err)
fmt.Printf(`
const output={
hashGlobalInputs: "%s",
};
await circuit.assertOut(w, output);
`, h.String())
fmt.Println("")
}
}
func TestZKInputsHashTestVector0(t *testing.T) { func TestZKInputsHashTestVector0(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.Nil(t, err) require.Nil(t, err)

+ 16
- 0
db/statedb/utils.go

@ -160,3 +160,19 @@ func BJJCompressedTo256BigInts(pkComp babyjub.PublicKeyComp) [256]*big.Int {
return r return r
} }
// formatAccumulatedFees returns an array of [nFeeAccounts]*big.Int containing
// the balance of each FeeAccount, taken from the 'collectedFees' map, in the
// order of the 'orderTokenIDs'
func formatAccumulatedFees(collectedFees map[common.TokenID]*big.Int, orderTokenIDs []*big.Int) []*big.Int {
accFeeOut := make([]*big.Int, len(orderTokenIDs))
for i := 0; i < len(orderTokenIDs); i++ {
tokenID := common.TokenIDFromBigInt(orderTokenIDs[i])
if _, ok := collectedFees[tokenID]; ok {
accFeeOut[i] = collectedFees[tokenID]
} else {
accFeeOut[i] = big.NewInt(0)
}
}
return accFeeOut
}

Loading…
Cancel
Save