Browse Source

Merge pull request #471 from hermeznetwork/feature/zki6-comp

ZKInput with L2Txs compatible with circom circuits
feature/sql-semaphore1
Eduard S 4 years ago
committed by GitHub
parent
commit
0a461fe60c
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 247 additions and 73 deletions
  1. +7
    -1
      common/pooll2tx.go
  2. +34
    -16
      common/zk.go
  3. +0
    -4
      test/til/sets.go
  4. +4
    -2
      test/til/txs.go
  5. +20
    -0
      test/til/txs_test.go
  6. +13
    -8
      txprocessor/txprocessor.go
  7. +11
    -1
      txprocessor/txprocessor_test.go
  8. +6
    -4
      txprocessor/utils.go
  9. +151
    -36
      txprocessor/zkinputsgen_test.go
  10. +1
    -1
      txselector/txselector_test.go

+ 7
- 1
common/pooll2tx.go

@ -311,10 +311,16 @@ func (tx *PoolL2Tx) VerifySignature(chainID uint16, pkComp babyjub.PublicKeyComp
// L2Tx returns a *L2Tx from the PoolL2Tx
func (tx PoolL2Tx) L2Tx() L2Tx {
var toIdx Idx
if tx.ToIdx == Idx(0) {
toIdx = tx.AuxToIdx
} else {
toIdx = tx.ToIdx
}
return L2Tx{
TxID: tx.TxID,
FromIdx: tx.FromIdx,
ToIdx: tx.ToIdx,
ToIdx: toIdx,
Amount: tx.Amount,
Fee: tx.Fee,
Nonce: tx.Nonce,

+ 34
- 16
common/zk.go

@ -42,7 +42,8 @@ type ZKMetadata struct {
NewExitRootRaw *merkletree.Hash
}
// ZKInputs represents the inputs that will be used to generate the zkSNARK proof
// ZKInputs represents the inputs that will be used to generate the zkSNARK
// proof
type ZKInputs struct {
Metadata ZKMetadata `json:"-"`
@ -60,12 +61,16 @@ type ZKInputs struct {
// GlobalChainID is the blockchain ID (0 for Ethereum mainnet). This
// value can be get from the smart contract.
GlobalChainID *big.Int `json:"globalChainID"` // uint16
// FeeIdxs is an array of merkle tree indexes where the coordinator
// will receive the accumulated fees
// FeeIdxs is an array of merkle tree indexes (Idxs) where the
// coordinator will receive the accumulated fees
FeeIdxs []*big.Int `json:"feeIdxs"` // uint64 (max nLevels bits), len: [maxFeeIdxs]
// accumulate fees
// FeePlanTokens contains all the tokenIDs for which the fees are being accumulated
// FeePlanTokens contains all the tokenIDs for which the fees are being
// accumulated and those fees accoumulated will be paid to the FeeIdxs
// array. The order of FeeIdxs & FeePlanTokens & State3 must match.
// Coordinator fees are processed correlated such as:
// [FeePlanTokens[i], FeeIdxs[i]]
FeePlanTokens []*big.Int `json:"feePlanTokens"` // uint32 (max nLevels bits), len: [maxFeeIdxs]
//
@ -79,11 +84,12 @@ type ZKInputs struct {
TxCompressedDataV2 []*big.Int `json:"txCompressedDataV2"` // big.Int (max 193 bits), len: [maxTx]
// MaxNumBatch is the maximum allowed batch number when the transaction
// can be processed
MaxNumBatch []*big.Int `json:"maxNumBatch"` // uint32
MaxNumBatch []*big.Int `json:"maxNumBatch"` // big.Int (max 32 bits), len: [maxTx]
// FromIdx
FromIdx []*big.Int `json:"fromIdx"` // uint64 (max nLevels bits), len: [maxTx]
// AuxFromIdx is the Idx of the new created account which is consequence of a L1CreateAccountTx
// AuxFromIdx is the Idx of the new created account which is
// consequence of a L1CreateAccountTx
AuxFromIdx []*big.Int `json:"auxFromIdx"` // uint64 (max nLevels bits), len: [maxTx]
// ToIdx
@ -103,7 +109,8 @@ type ZKInputs struct {
//
// Txs/L1Txs
//
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new account (fromIdx==0)
// NewAccount boolean (0/1) flag set 'true' when L1 tx creates a new
// account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float16
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx]
@ -116,7 +123,8 @@ type ZKInputs struct {
// Txs/L2Txs
//
// RqOffset relative transaction position to be linked. Used to perform atomic transactions.
// RqOffset relative transaction position to be linked. Used to perform
// atomic transactions.
RqOffset []*big.Int `json:"rqOffset"` // uint8 (max 3 bits), len: [maxTx]
// transaction L2 request data
@ -149,13 +157,17 @@ type ZKInputs struct {
Balance1 []*big.Int `json:"balance1"` // big.Int (max 192 bits), len: [maxTx]
EthAddr1 []*big.Int `json:"ethAddr1"` // ethCommon.Address, len: [maxTx]
Siblings1 [][]*big.Int `json:"siblings1"` // big.Int, len: [maxTx][nLevels + 1]
// Required for inserts and deletes, values of the CircomProcessorProof (smt insert proof)
// Required for inserts and deletes, values of the CircomProcessorProof
// (smt insert proof)
IsOld0_1 []*big.Int `json:"isOld0_1"` // bool, len: [maxTx]
OldKey1 []*big.Int `json:"oldKey1"` // uint64 (max 40 bits), len: [maxTx]
OldValue1 []*big.Int `json:"oldValue1"` // Hash, len: [maxTx]
// state 2, value of the receiver (to) account leaf
// if Tx is an Exit, state 2 is used for the Exit Merkle Proof
// state 2, value of the receiver (to) account leaf. The values at the
// moment pre-smtprocessor of the update (before updating the Receiver
// leaf).
// If Tx is an Exit (tx.ToIdx=1), state 2 is used for the Exit Merkle
// Proof of the Exit MerkleTree.
TokenID2 []*big.Int `json:"tokenID2"` // uint32, len: [maxTx]
Nonce2 []*big.Int `json:"nonce2"` // uint64 (max 40 bits), len: [maxTx]
Sign2 []*big.Int `json:"sign2"` // bool, len: [maxTx]
@ -163,16 +175,22 @@ type ZKInputs struct {
Balance2 []*big.Int `json:"balance2"` // big.Int (max 192 bits), len: [maxTx]
EthAddr2 []*big.Int `json:"ethAddr2"` // ethCommon.Address, len: [maxTx]
Siblings2 [][]*big.Int `json:"siblings2"` // big.Int, len: [maxTx][nLevels + 1]
// newExit determines if an exit transaction has to create a new leaf in the exit tree
// NewExit determines if an exit transaction has to create a new leaf
// in the exit tree. If already exists an exit leaf of an account in
// the ExitTree, there is no 'new leaf' creation and 'NewExit' for that
// tx is 0 (if is an 'insert' in the tree, NewExit=1, if is an 'update'
// of an existing leaf, NewExit=0).
NewExit []*big.Int `json:"newExit"` // bool, len: [maxTx]
// Required for inserts and deletes, values of the CircomProcessorProof (smt insert proof)
// Required for inserts and deletes, values of the CircomProcessorProof
// (smt insert proof)
IsOld0_2 []*big.Int `json:"isOld0_2"` // bool, len: [maxTx]
OldKey2 []*big.Int `json:"oldKey2"` // uint64 (max 40 bits), len: [maxTx]
OldValue2 []*big.Int `json:"oldValue2"` // Hash, len: [maxTx]
// state 3, value of the account leaf receiver of the Fees
// fee tx
// State fees
// state 3, fee leafs states, value of the account leaf receiver of the
// Fees fee tx. The values at the moment pre-smtprocessor of the update
// (before updating the Receiver leaf).
// The order of FeeIdxs & FeePlanTokens & State3 must match.
TokenID3 []*big.Int `json:"tokenID3"` // uint32, len: [maxFeeIdxs]
Nonce3 []*big.Int `json:"nonce3"` // uint64 (max 40 bits), len: [maxFeeIdxs]
Sign3 []*big.Int `json:"sign3"` // bool, len: [maxFeeIdxs]

+ 0
- 4
test/til/sets.go

@ -224,7 +224,6 @@ CreateAccountDeposit(1) C: 0
// close Block:0, Batch:1
> batchL1 // freeze L1User{2}, forge L1Coord{0}
// Expected balances:
// Coord(0): 0, Coord(1): 0
// C(0): 0
CreateAccountDeposit(1) A: 500
@ -232,14 +231,12 @@ CreateAccountDeposit(1) A: 500
// close Block:0, Batch:2
> batchL1 // freeze L1User{1}, forge L1User{2}
// Expected balances:
// Coord(0): 0, Coord(1): 0
// A(0): 500
// C(0): 0, C(1): 0
// close Block:0, Batch:3
> batchL1 // freeze L1User{nil}, forge L1User{1}
// Expected balances:
// Coord(0): 0, Coord(1): 0
// A(0): 500, A(1): 500
// C(0): 0
@ -253,7 +250,6 @@ CreateAccountDeposit(0) D: 800
// close Block:0, Batch:5
> batchL1 // freeze L1User{1}, forge L1User{1}
// Expected balances:
// Coord(0): 0, Coord(1): 0
// A(0): 600, A(1): 500
// B(0): 400
// C(0): 0

+ 4
- 2
test/til/txs.go

@ -2,9 +2,9 @@ package til
import (
"crypto/ecdsa"
"encoding/binary"
"fmt"
"math/big"
"strconv"
"strings"
"time"
@ -694,7 +694,9 @@ func (tc *Context) generateKeys(userNames []string) {
}
// babyjubjub key
var sk babyjub.PrivateKey
copy(sk[:], []byte(strconv.Itoa(i))) // only for testing
var iBytes [8]byte
binary.LittleEndian.PutUint64(iBytes[:], uint64(i))
copy(sk[:], iBytes[:]) // only for testing
// eth address
var key ecdsa.PrivateKey

+ 20
- 0
test/til/txs_test.go

@ -1,14 +1,34 @@
package til
import (
"encoding/hex"
"fmt"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGenerateKeys(t *testing.T) {
tc := NewContext(0, common.RollupConstMaxL1UserTx)
usernames := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"}
tc.generateKeys(usernames)
debug := false
if debug {
for i, username := range usernames {
fmt.Println(i, username)
sk := crypto.FromECDSA(tc.Users[username].EthSk)
fmt.Println(" eth_sk", hex.EncodeToString(sk))
fmt.Println(" eth_addr", tc.Users[username].Addr)
fmt.Println(" bjj_sk", hex.EncodeToString(tc.Users[username].BJJ[:]))
fmt.Println(" bjj_pub", tc.Users[username].BJJ.Public().Compress())
}
}
}
func TestGenerateBlocksNoBatches(t *testing.T) {
set := `
Type: Blockchain

+ 13
- 8
txprocessor/txprocessor.go

@ -274,7 +274,8 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
if tp.i < nTx-1 {
tp.zki.ISOutIdx[tp.i] = tp.s.CurrentIdx().BigInt()
tp.zki.ISStateRoot[tp.i] = tp.s.MT.Root().BigInt()
tp.zki.ISAccFeeOut[tp.i] = formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens)
// tp.zki.ISAccFeeOut[tp.i] = formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens)
tp.zki.ISAccFeeOut[tp.i] = formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens, coordIdxs)
if exitIdx == nil {
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
}
@ -302,14 +303,14 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
if i < int(tp.config.MaxTx)-1 {
tp.zki.ISOutIdx[i] = tp.s.CurrentIdx().BigInt()
tp.zki.ISStateRoot[i] = tp.s.MT.Root().BigInt()
tp.zki.ISAccFeeOut[i] = formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens)
tp.zki.ISAccFeeOut[i] = formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens, coordIdxs)
tp.zki.ISExitRoot[i] = exitTree.Root().BigInt()
}
if i >= tp.i {
tp.zki.TxCompressedData[i] = new(big.Int).SetBytes(common.SignatureConstantBytes)
}
}
isFinalAccFee := formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens)
isFinalAccFee := formatAccumulatedFees(collectedFees, tp.zki.FeePlanTokens, coordIdxs)
copy(tp.zki.ISFinalAccFee, isFinalAccFee)
// before computing the Fees txs, set the ISInitStateRootFee
tp.zki.ISInitStateRootFee = tp.s.MT.Root().BigInt()
@ -318,7 +319,9 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
iFee := 0
for idx, accumulatedFee := range tp.AccumulatedFees {
for _, idx := range coordIdxs {
accumulatedFee := tp.AccumulatedFees[idx]
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
@ -816,7 +819,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Balance1[tp.i] = accSender.Balance
tp.zki.EthAddr1[tp.i] = common.EthAddrToBigInt(accSender.EthAddr)
}
if !tx.IsL1 {
if !tx.IsL1 { // L2
// increment nonce
accSender.Nonce++
@ -1009,8 +1012,6 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Ay1[tp.i] = accBJJY
tp.zki.Balance1[tp.i] = acc.Balance
tp.zki.EthAddr1[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
tp.zki.NewExit[tp.i] = big.NewInt(1)
}
if !tx.IsL1 {
@ -1081,6 +1082,8 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Ay2[tp.i] = accBJJY
tp.zki.Balance2[tp.i] = tx.Amount
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
// as Leaf didn't exist in the ExitTree, set NewExit[i]=1
tp.zki.NewExit[tp.i] = big.NewInt(1)
}
p, err = statedb.CreateAccountInTreeDB(exitTree.DB(), exitTree, tx.FromIdx, exitAccount)
if err != nil {
@ -1099,12 +1102,14 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} else if err != nil {
return exitAccount, false, tracerr.Wrap(err)
}
exitAccount.Nonce = exitAccount.Nonce + 1
// 1b. if idx already exist in exitTree:
if tp.zki != nil {
// Set the State2 before updating the Exit leaf
tp.zki.TokenID2[tp.i] = acc.TokenID.BigInt()
tp.zki.Nonce2[tp.i] = big.NewInt(0)
// increment nonce from existing ExitLeaf
tp.zki.Nonce2[tp.i] = exitAccount.Nonce.BigInt()
accBJJSign, accBJJY := babyjub.UnpackSignY(acc.BJJ)
if accBJJSign {
tp.zki.Sign2[tp.i] = big.NewInt(1)

+ 11
- 1
txprocessor/txprocessor_test.go

@ -226,12 +226,14 @@ func TestProcessTxsBalances(t *testing.T) {
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
_, err = tp.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
require.NoError(t, err)
assert.Equal(t, "0", tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:1")
l1UserTxs := []common.L1Tx{}
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
assert.Equal(t, "0", tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:2")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
@ -239,6 +241,7 @@ func TestProcessTxsBalances(t *testing.T) {
_, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
checkBalance(t, tc, sdb, "A", 0, "500")
assert.Equal(t, "13644148972047617726265275926674266298636745191961029124811988256139761111521", tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:3")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
@ -247,6 +250,7 @@ func TestProcessTxsBalances(t *testing.T) {
require.NoError(t, err)
checkBalance(t, tc, sdb, "A", 0, "500")
checkBalance(t, tc, sdb, "A", 1, "500")
assert.Equal(t, "12433441613247342495680642890662773367605896324555599297255745922589338651261", tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:4")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
@ -255,6 +259,7 @@ func TestProcessTxsBalances(t *testing.T) {
require.NoError(t, err)
checkBalance(t, tc, sdb, "A", 0, "500")
checkBalance(t, tc, sdb, "A", 1, "500")
assert.Equal(t, "12433441613247342495680642890662773367605896324555599297255745922589338651261", tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:5")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
@ -264,6 +269,7 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "A", 0, "600")
checkBalance(t, tc, sdb, "A", 1, "500")
checkBalance(t, tc, sdb, "B", 0, "400")
assert.Equal(t, "4191361650490017591061467288209836928064232431729236465872209988325272262963", tp.s.MT.Root().BigInt().String())
coordIdxs := []common.Idx{261, 262}
log.Debug("block:0 batch:6")
@ -279,6 +285,7 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "B", 1, "200")
checkBalance(t, tc, sdb, "C", 0, "100")
checkBalance(t, tc, sdb, "D", 0, "800")
assert.Equal(t, "7614010373759339299470010949167613050707822522530721724565424494781010548240", tp.s.MT.Root().BigInt().String())
log.Debug("block:0 batch:7")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
@ -294,6 +301,7 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "C", 0, "45")
checkBalance(t, tc, sdb, "C", 1, "100")
checkBalance(t, tc, sdb, "D", 0, "800")
assert.Equal(t, "21231789250434471575486264439945776732824482207853465397552873521865656677689", tp.s.MT.Root().BigInt().String())
log.Debug("block:1 batch:0")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
@ -309,12 +317,14 @@ func TestProcessTxsBalances(t *testing.T) {
checkBalance(t, tc, sdb, "C", 0, "845")
checkBalance(t, tc, sdb, "C", 1, "100")
checkBalance(t, tc, sdb, "D", 0, "470")
assert.Equal(t, "11289313644810782435120113035387729451095637380468777086895109386127538554246", tp.s.MT.Root().BigInt().String())
log.Debug("block:1 batch:1")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
_, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
assert.Equal(t, "10342681351319338354912862547249967104198317571995055517008223832276478908482", tp.s.MT.Root().BigInt().String())
// use Set of PoolL2 txs
poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow1)
@ -553,7 +563,7 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
assert.Equal(t, common.TokenID(1), acc.TokenID)
assert.Equal(t, "2", acc.Balance.String())
assert.Equal(t, "2720257526434001367979405991743527513807903085728407823609738212616896104498", sdb.MT.Root().BigInt().String())
assert.Equal(t, "18894163991492573893706613133132363559300580460789469708968288074813925659539", sdb.MT.Root().BigInt().String())
}
func TestProcessTxsRootTestVectors(t *testing.T) {

+ 6
- 4
txprocessor/utils.go

@ -36,14 +36,16 @@ func BJJCompressedTo256BigInts(pkComp babyjub.PublicKeyComp) [256]*big.Int {
// formatAccumulatedFees returns an array of [nFeeAccounts]*big.Int containing
// the balance of each FeeAccount, taken from the 'collectedFees' map, in the
// order of the 'orderTokenIDs'
func formatAccumulatedFees(collectedFees map[common.TokenID]*big.Int, orderTokenIDs []*big.Int) []*big.Int {
// func formatAccumulatedFees(collectedFees map[common.TokenID]*big.Int, orderTokenIDs []*big.Int) []*big.Int {
func formatAccumulatedFees(collectedFees map[common.TokenID]*big.Int, orderTokenIDs []*big.Int, coordIdxs []common.Idx) []*big.Int {
accFeeOut := make([]*big.Int, len(orderTokenIDs))
for i := 0; i < len(orderTokenIDs); i++ {
for i := 0; i < len(accFeeOut); i++ {
accFeeOut[i] = big.NewInt(0)
}
for i := 0; i < len(coordIdxs); i++ {
tokenID := common.TokenIDFromBigInt(orderTokenIDs[i])
if _, ok := collectedFees[tokenID]; ok {
accFeeOut[i] = new(big.Int).Set(collectedFees[tokenID])
} else {
accFeeOut[i] = big.NewInt(0)
}
}
return accFeeOut

+ 151
- 36
txprocessor/zkinputsgen_test.go
File diff suppressed because it is too large
View File


+ 1
- 1
txselector/txselector_test.go

@ -303,7 +303,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
require.NoError(t, err)
log.Debug("block:0 batch:7")
// simulate the PoolL2Txs of the batch6
// simulate the PoolL2Txs of the batch7
batchPoolL2 = `
Type: PoolL2
PoolTransfer(0) A-B: 100 (126)

Loading…
Cancel
Save