Add tests connecting TxSelector, BatchBuilder, ZKInputs, ProofServer

- Add tests connecting TxSelector, BatchBuilder, ZKInputs, ProofServer
- Added test to check that the signatures of the PoolL2Txs from the L2DB
pool can be verified, to check that the parameters of the PoolL2Tx match
the original parameters signed before inserting them into the L2DB
This commit is contained in:
arnaucube
2021-01-15 19:25:11 +01:00
parent 1633a88a67
commit 7f46c3028e
10 changed files with 347 additions and 95 deletions

247
test/zkproof/flows_test.go Normal file
View File

@@ -0,0 +1,247 @@
package zkproof
import (
"io/ioutil"
"os"
"strconv"
"testing"
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
ethCrypto "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til"
"github.com/hermeznetwork/hermez-node/test/txsets"
"github.com/hermeznetwork/hermez-node/txselector"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
var tokens []common.Token
for i := 0; i < int(tc.LastRegisteredTokenID); i++ {
tokens = append(tokens, common.Token{
TokenID: common.TokenID(i + 1),
EthBlockNum: 1,
EthAddr: ethCommon.BytesToAddress([]byte{byte(i + 1)}),
Name: strconv.Itoa(i),
Symbol: strconv.Itoa(i),
Decimals: 18,
})
}
hdb := historydb.NewHistoryDB(db)
assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1,
}))
assert.NoError(t, hdb.AddTokens(tokens))
}
func addL2Txs(t *testing.T, l2DB *l2db.L2DB, poolL2Txs []common.PoolL2Tx) {
for i := 0; i < len(poolL2Txs); i++ {
err := l2DB.AddTxTest(&poolL2Txs[i])
if err != nil {
log.Error(err)
}
require.NoError(t, err)
}
}
func addAccCreationAuth(t *testing.T, tc *til.Context, l2DB *l2db.L2DB, chainID uint16, hermezContractAddr ethCommon.Address, username string) []byte {
user := tc.Users[username]
auth := &common.AccountCreationAuth{
EthAddr: user.Addr,
BJJ: user.BJJ.Public().Compress(),
}
err := auth.Sign(func(hash []byte) ([]byte, error) {
return ethCrypto.Sign(hash, user.EthSk)
}, chainID, hermezContractAddr)
assert.NoError(t, err)
err = l2DB.AddAccountCreationAuth(auth)
assert.NoError(t, err)
return auth.Signature
}
func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address, coordUser *til.User) (*txselector.TxSelector, *l2db.L2DB, *statedb.StateDB) {
pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour)
dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
syncStateDB, err := statedb.NewStateDB(dir, 128, statedb.TypeSynchronizer, 0)
require.NoError(t, err)
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
// use Til Coord keys for tests compatibility
coordAccount := &txselector.CoordAccount{
Addr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil,
}
auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(),
}
err = auth.Sign(func(hash []byte) ([]byte, error) {
return ethCrypto.Sign(hash, coordUser.EthSk)
}, chainID, hermezContractAddr)
assert.NoError(t, err)
coordAccount.AccountCreationAuth = auth.Signature
txsel, err := txselector.NewTxSelector(coordAccount, txselDir, syncStateDB, l2DB)
require.NoError(t, err)
test.WipeDB(l2DB.DB())
return txsel, l2DB, syncStateDB
}
func TestTxSelectorBatchBuilderZKInputs(t *testing.T) {
tc := til.NewContext(ChainID, common.RollupConstMaxL1UserTx)
// generate test transactions, the L1CoordinatorTxs generated by Til
// will be ignored at this test, as will be the TxSelector who
// generates them when needed
blocks, err := tc.GenerateBlocks(txsets.SetBlockchainMinimumFlow0)
require.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, l2DBTxSel, syncStateDB := initTxSelector(t, ChainID, hermezContractAddr, tc.Users["Coord"])
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
require.NoError(t, err)
bb, err := batchbuilder.NewBatchBuilder(bbDir, syncStateDB, 0, NLevels)
require.NoError(t, err)
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
// add tokens to HistoryDB to avoid breaking FK constrains
addTokens(t, tc, l2DBTxSel.DB())
selectionConfig := &txselector.SelectionConfig{
MaxL1UserTxs: 100, // TODO
TxProcessorConfig: txprocConfig,
}
configBatch := &batchbuilder.ConfigBatch{
// ForgerAddress:
TxProcessorConfig: txprocConfig,
}
// loop over the first 6 batches
expectedRoots := []string{"0", "0", "13644148972047617726265275926674266298636745191961029124811988256139761111521", "12433441613247342495680642890662773367605896324555599297255745922589338651261", "12433441613247342495680642890662773367605896324555599297255745922589338651261", "4191361650490017591061467288209836928064232431729236465872209988325272262963"}
for i := 0; i < 6; i++ {
log.Debugf("block:0 batch:%d", i+1)
var l1UserTxs []common.L1Tx
if blocks[0].Rollup.Batches[i].Batch.ForgeL1TxsNum != nil {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[i].Batch.ForgeL1TxsNum])
}
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t, expectedRoots[i], bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
}
log.Debug("block:0 batch:7")
// simulate the PoolL2Txs of the batch6
batchPoolL2 := `
Type: PoolL2
PoolTransferToEthAddr(1) A-B: 200 (126)
PoolTransferToEthAddr(0) B-C: 100 (126)`
l2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add AccountCreationAuths that will be used at the next batch
_ = addAccCreationAuth(t, tc, l2DBTxSel, ChainID, hermezContractAddr, "B")
_ = addAccCreationAuth(t, tc, l2DBTxSel, ChainID, hermezContractAddr, "C")
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t, "7614010373759339299470010949167613050707822522530721724565424494781010548240", bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.LocalAccountsDB().CurrentBatch())
require.NoError(t, err)
log.Debug("block:0 batch:8")
// simulate the PoolL2Txs of the batch8
batchPoolL2 = `
Type: PoolL2
PoolTransfer(0) A-B: 100 (126)
PoolTransfer(0) C-A: 50 (126)
PoolTransfer(1) B-C: 100 (126)
PoolExit(0) A: 100 (126)`
l2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t, "21231789250434471575486264439945776732824482207853465397552873521865656677689", bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs), txsel.LocalAccountsDB().CurrentBatch())
require.NoError(t, err)
log.Debug("(batch9) block:1 batch:1")
// simulate the PoolL2Txs of the batch9
batchPoolL2 = `
Type: PoolL2
PoolTransfer(0) D-A: 300 (126)
PoolTransfer(0) B-D: 100 (126)`
l2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
assert.Equal(t, "11289313644810782435120113035387729451095637380468777086895109386127538554246", bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs), txsel.LocalAccountsDB().CurrentBatch())
require.NoError(t, err)
log.Debug("(batch10) block:1 batch:2")
l2Txs = []common.PoolL2Tx{}
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
require.NoError(t, err)
// same root as previous batch, as the L1CoordinatorTxs created by the
// Til set is not created by the TxSelector in this test
assert.Equal(t, "11289313644810782435120113035387729451095637380468777086895109386127538554246", bb.LocalStateDB().MT.Root().BigInt().String())
sendProofAndCheckResp(t, zki)
err = l2DBTxSel.StartForging(common.TxIDsFromPoolL2Txs(l2Txs), txsel.LocalAccountsDB().CurrentBatch())
require.NoError(t, err)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/test/til"
"github.com/hermeznetwork/hermez-node/test/txsets"
@@ -26,9 +27,7 @@ const pollInterval = 200 * time.Millisecond
func TestMain(m *testing.M) {
exitVal := 0
proofServerURL = os.Getenv("PROOF_SERVER_URL")
if proofServerURL != "" {
exitVal = m.Run()
}
exitVal = m.Run()
os.Exit(exitVal)
}
@@ -38,7 +37,7 @@ const MaxL1Tx = 256
const MaxFeeTx = 64
const ChainID uint16 = 1
var config = txprocessor.Config{
var txprocConfig = txprocessor.Config{
NLevels: uint32(NLevels),
MaxTx: MaxTx,
MaxL1Tx: MaxL1Tx,
@@ -46,19 +45,24 @@ var config = txprocessor.Config{
ChainID: ChainID,
}
func initStateDB(t *testing.T) *statedb.StateDB {
func initStateDB(t *testing.T, typ statedb.TypeStateDB) *statedb.StateDB {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.Nil(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(dir, 128, statedb.TypeBatchBuilder, NLevels)
sdb, err := statedb.NewStateDB(dir, 128, typ, NLevels)
require.NoError(t, err)
return sdb
}
func sendProofAndCheckResp(t *testing.T, ptOut *txprocessor.ProcessTxOutput) {
func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
if proofServerURL == "" {
log.Debug("No PROOF_SERVER_URL defined, not using ProofServer")
return
}
// Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(ptOut.ZKInputs)
zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err)
err = ioutil.WriteFile("/tmp/dbgZKInputs.json", zkInputsJSON, 0640) //nolint:gosec
require.NoError(t, err)
@@ -66,7 +70,7 @@ func sendProofAndCheckResp(t *testing.T, ptOut *txprocessor.ProcessTxOutput) {
proofServerClient := prover.NewProofServerClient(proofServerURL, pollInterval)
err = proofServerClient.WaitReady(context.Background())
require.NoError(t, err)
err = proofServerClient.CalculateProof(context.Background(), ptOut.ZKInputs)
err = proofServerClient.CalculateProof(context.Background(), zki)
require.NoError(t, err)
proof, pubInputs, err := proofServerClient.GetProof(context.Background())
require.NoError(t, err)
@@ -75,9 +79,9 @@ func sendProofAndCheckResp(t *testing.T, ptOut *txprocessor.ProcessTxOutput) {
}
func TestZKInputsEmpty(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
coordIdxs := []common.Idx{}
l1UserTxs := []common.L1Tx{}
@@ -86,7 +90,7 @@ func TestZKInputsEmpty(t *testing.T) {
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut) // test empty batch ZKInputs
sendProofAndCheckResp(t, ptOut.ZKInputs) // test empty batch ZKInputs
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs = txsets.GenerateTxsZKInputs0(t, ChainID)
@@ -99,79 +103,79 @@ func TestZKInputsEmpty(t *testing.T) {
l2Txs = []common.PoolL2Tx{}
ptOut, err = tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut) // test empty batch ZKInputs after a non-empty batch
sendProofAndCheckResp(t, ptOut.ZKInputs) // test empty batch ZKInputs after a non-empty batch
}
func TestZKInputs0(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs := txsets.GenerateTxsZKInputs0(t, ChainID)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}
func TestZKInputs1(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs := txsets.GenerateTxsZKInputs1(t, ChainID)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}
func TestZKInputs2(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs := txsets.GenerateTxsZKInputs2(t, ChainID)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}
func TestZKInputs3(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs := txsets.GenerateTxsZKInputs3(t, ChainID)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}
func TestZKInputs4(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs := txsets.GenerateTxsZKInputs4(t, ChainID)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}
func TestZKInputs5(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
_, coordIdxs, l1UserTxs, l1CoordTxs, l2Txs := txsets.GenerateTxsZKInputs5(t, ChainID)
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1UserTxs, l1CoordTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}
func TestZKInputs6(t *testing.T) {
sdb := initStateDB(t)
sdb := initStateDB(t, statedb.TypeBatchBuilder)
tc := til.NewContext(ChainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(txsets.SetBlockchainMinimumFlow0)
@@ -181,12 +185,12 @@ func TestZKInputs6(t *testing.T) {
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tp := txprocessor.NewTxProcessor(sdb, config)
tp := txprocessor.NewTxProcessor(sdb, txprocConfig)
// batch1
ptOut, err := tp.ProcessTxs(nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch2
l1UserTxs := []common.L1Tx{}
@@ -194,7 +198,7 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch3
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
@@ -202,7 +206,7 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch4
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
@@ -210,7 +214,7 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[3].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch5
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
@@ -218,7 +222,7 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[4].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch6
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
@@ -226,10 +230,10 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(nil, l1UserTxs, blocks[0].Rollup.Batches[5].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch7
// simulate the PoolL2Txs of the batch6
// simulate the PoolL2Txs of the batch7
batchPoolL2 := `
Type: PoolL2
PoolTransferToEthAddr(1) A-B: 200 (126)
@@ -244,10 +248,10 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[6].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch8
// simulate the PoolL2Txs of the batch7
// simulate the PoolL2Txs of the batch8
batchPoolL2 = `
Type: PoolL2
PoolTransfer(0) A-B: 100 (126)
@@ -262,7 +266,7 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[7].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch9
// simulate the PoolL2Txs of the batch9
@@ -279,7 +283,7 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
// batch10
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
@@ -288,5 +292,5 @@ func TestZKInputs6(t *testing.T) {
ptOut, err = tp.ProcessTxs(coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
sendProofAndCheckResp(t, ptOut)
sendProofAndCheckResp(t, ptOut.ZKInputs)
}