mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Compare commits
1 Commits
feature/to
...
feature/tx
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c72166aab |
@@ -275,8 +275,7 @@ func (s *StateDB) GetAccount(idx common.Idx) (*common.Account, error) {
|
||||
return GetAccountInTreeDB(s.db.DB(), idx)
|
||||
}
|
||||
|
||||
// AccountsIter iterates over all the accounts in db, calling fn for each one
|
||||
func AccountsIter(db db.Storage, fn func(a *common.Account) (bool, error)) error {
|
||||
func accountsIter(db db.Storage, fn func(a *common.Account) (bool, error)) error {
|
||||
idxDB := db.WithPrefix(PrefixKeyIdx)
|
||||
if err := idxDB.Iterate(func(k []byte, v []byte) (bool, error) {
|
||||
idx, err := common.IdxFromBytes(k)
|
||||
|
||||
@@ -376,7 +376,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
}
|
||||
var debugAPI *debugapi.DebugAPI
|
||||
if cfg.Debug.APIAddress != "" {
|
||||
debugAPI = debugapi.NewDebugAPI(cfg.Debug.APIAddress, historyDB, stateDB, sync)
|
||||
debugAPI = debugapi.NewDebugAPI(cfg.Debug.APIAddress, stateDB, sync)
|
||||
}
|
||||
priceUpdater, err := priceupdater.NewPriceUpdater(cfg.PriceUpdater.URL,
|
||||
priceupdater.APIType(cfg.PriceUpdater.Type), historyDB)
|
||||
@@ -547,9 +547,6 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
|
||||
WDelayer: blockData.WDelayer.Vars,
|
||||
}
|
||||
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches)
|
||||
if n.debugAPI != nil {
|
||||
n.debugAPI.SyncBlockHook()
|
||||
}
|
||||
return &blockData.Block, time.Duration(0), nil
|
||||
} else {
|
||||
// case: no block
|
||||
|
||||
@@ -2,16 +2,12 @@ package debugapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/synchronizer"
|
||||
@@ -35,109 +31,22 @@ func badReq(err error, c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
statusUpdating = "updating"
|
||||
statusOK = "ok"
|
||||
)
|
||||
|
||||
type tokenBalances struct {
|
||||
sync.RWMutex
|
||||
Value struct {
|
||||
Status string
|
||||
Block *common.Block
|
||||
Batch *common.Batch
|
||||
Balances map[common.TokenID]*big.Int
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenBalances) Update(historyDB *historydb.HistoryDB, sdb *statedb.StateDB) (err error) {
|
||||
var block *common.Block
|
||||
var batch *common.Batch
|
||||
var balances map[common.TokenID]*big.Int
|
||||
defer func() {
|
||||
t.Lock()
|
||||
if err == nil {
|
||||
t.Value.Status = statusOK
|
||||
t.Value.Block = block
|
||||
t.Value.Batch = batch
|
||||
t.Value.Balances = balances
|
||||
} else {
|
||||
t.Value.Status = fmt.Sprintf("tokenBalances.Update: %v", err)
|
||||
t.Value.Block = nil
|
||||
t.Value.Batch = nil
|
||||
t.Value.Balances = nil
|
||||
}
|
||||
t.Unlock()
|
||||
}()
|
||||
|
||||
if block, err = historyDB.GetLastBlock(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if batch, err = historyDB.GetLastBatch(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
balances = make(map[common.TokenID]*big.Int)
|
||||
sdb.LastRead(func(sdbLast *statedb.Last) error {
|
||||
return tracerr.Wrap(
|
||||
statedb.AccountsIter(sdbLast.DB(), func(a *common.Account) (bool, error) {
|
||||
if balance, ok := balances[a.TokenID]; !ok {
|
||||
balances[a.TokenID] = a.Balance
|
||||
} else {
|
||||
balance.Add(balance, a.Balance)
|
||||
}
|
||||
return true, nil
|
||||
}),
|
||||
)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// DebugAPI is an http API with debugging endpoints
|
||||
type DebugAPI struct {
|
||||
addr string
|
||||
historyDB *historydb.HistoryDB
|
||||
stateDB *statedb.StateDB // synchronizer statedb
|
||||
sync *synchronizer.Synchronizer
|
||||
tokenBalances tokenBalances
|
||||
}
|
||||
|
||||
// NewDebugAPI creates a new DebugAPI
|
||||
func NewDebugAPI(addr string, historyDB *historydb.HistoryDB, stateDB *statedb.StateDB,
|
||||
sync *synchronizer.Synchronizer) *DebugAPI {
|
||||
func NewDebugAPI(addr string, stateDB *statedb.StateDB, sync *synchronizer.Synchronizer) *DebugAPI {
|
||||
return &DebugAPI{
|
||||
addr: addr,
|
||||
historyDB: historyDB,
|
||||
stateDB: stateDB,
|
||||
sync: sync,
|
||||
}
|
||||
}
|
||||
|
||||
// SyncBlockHook is a hook function that the node will call after every new synchronized block
|
||||
func (a *DebugAPI) SyncBlockHook() {
|
||||
a.tokenBalances.RLock()
|
||||
updateTokenBalances := a.tokenBalances.Value.Status == statusUpdating
|
||||
a.tokenBalances.RUnlock()
|
||||
if updateTokenBalances {
|
||||
if err := a.tokenBalances.Update(a.historyDB, a.stateDB); err != nil {
|
||||
log.Errorw("DebugAPI.tokenBalances.Upate", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *DebugAPI) handleTokenBalances(c *gin.Context) {
|
||||
a.tokenBalances.RLock()
|
||||
tokenBalances := a.tokenBalances.Value
|
||||
a.tokenBalances.RUnlock()
|
||||
c.JSON(http.StatusOK, tokenBalances)
|
||||
}
|
||||
|
||||
func (a *DebugAPI) handlePostTokenBalances(c *gin.Context) {
|
||||
a.tokenBalances.Lock()
|
||||
a.tokenBalances.Value.Status = statusUpdating
|
||||
a.tokenBalances.Unlock()
|
||||
c.JSON(http.StatusOK, nil)
|
||||
}
|
||||
|
||||
func (a *DebugAPI) handleAccount(c *gin.Context) {
|
||||
uri := struct {
|
||||
Idx uint32
|
||||
@@ -205,8 +114,6 @@ func (a *DebugAPI) Run(ctx context.Context) error {
|
||||
// is created.
|
||||
debugAPI.GET("sdb/accounts", a.handleAccounts)
|
||||
debugAPI.GET("sdb/accounts/:Idx", a.handleAccount)
|
||||
debugAPI.POST("sdb/tokenbalances", a.handlePostTokenBalances)
|
||||
debugAPI.GET("sdb/tokenbalances", a.handleTokenBalances)
|
||||
|
||||
debugAPI.GET("sync/stats", a.handleSyncStats)
|
||||
|
||||
|
||||
@@ -6,21 +6,13 @@ import (
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/dghubble/sling"
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
ethCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/test"
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -59,15 +51,12 @@ func TestDebugAPI(t *testing.T) {
|
||||
|
||||
addr := "localhost:12345"
|
||||
// We won't test the sync/stats endpoint, so we can se the Syncrhonizer to nil
|
||||
debugAPI := NewDebugAPI(addr, nil, sdb, nil)
|
||||
debugAPI := NewDebugAPI(addr, sdb, nil)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err := debugAPI.Run(ctx)
|
||||
require.Nil(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
var accounts []common.Account
|
||||
@@ -113,130 +102,4 @@ func TestDebugAPI(t *testing.T) {
|
||||
assert.Equal(t, accounts, accountsAPI)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDebugAPITokenBalances(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.Nil(t, err)
|
||||
|
||||
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeSynchronizer, NLevels: 32})
|
||||
require.Nil(t, err)
|
||||
|
||||
// Init History DB
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||
require.NoError(t, err)
|
||||
historyDB := historydb.NewHistoryDB(db, nil)
|
||||
// Clear DB
|
||||
test.WipeDB(historyDB.DB())
|
||||
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
AddToken(1)
|
||||
AddToken(2)
|
||||
AddToken(3)
|
||||
|
||||
CreateAccountDeposit(1) A: 1000
|
||||
CreateAccountDeposit(2) A: 2000
|
||||
CreateAccountDeposit(1) B: 100
|
||||
CreateAccountDeposit(2) B: 200
|
||||
CreateAccountDeposit(2) C: 400
|
||||
|
||||
> batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{5}
|
||||
> batchL1 // forge defined L1UserTxs{5}, freeze L1UserTxs{nil}
|
||||
> block // blockNum=2
|
||||
`
|
||||
|
||||
chainID := uint16(0)
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
tilCfgExtra := til.ConfigExtra{
|
||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||
CoordUser: "A",
|
||||
}
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
|
||||
require.NoError(t, err)
|
||||
tc.FillBlocksL1UserTxsBatchNum(blocks)
|
||||
err = tc.FillBlocksForgedL1UserTxs(blocks)
|
||||
require.NoError(t, err)
|
||||
|
||||
tpc := txprocessor.Config{
|
||||
NLevels: 32,
|
||||
MaxTx: 100,
|
||||
ChainID: chainID,
|
||||
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
|
||||
MaxL1Tx: common.RollupConstMaxL1Tx,
|
||||
}
|
||||
tp := txprocessor.NewTxProcessor(sdb, tpc)
|
||||
|
||||
for _, block := range blocks {
|
||||
require.NoError(t, historyDB.AddBlockSCData(&block))
|
||||
for _, batch := range block.Rollup.Batches {
|
||||
_, err := tp.ProcessTxs(batch.Batch.FeeIdxsCoordinator,
|
||||
batch.L1UserTxs, batch.L1CoordinatorTxs, []common.PoolL2Tx{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
addr := "localhost:12345"
|
||||
// We won't test the sync/stats endpoint, so we can se the Syncrhonizer to nil
|
||||
debugAPI := NewDebugAPI(addr, historyDB, sdb, nil)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err := debugAPI.Run(ctx)
|
||||
require.Nil(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
var accounts []common.Account
|
||||
for i := 0; i < 16; i++ {
|
||||
account := newAccount(t, i)
|
||||
accounts = append(accounts, *account)
|
||||
_, err = sdb.CreateAccount(account.Idx, account)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
// Make a checkpoint (batchNum 2) to make the accounts available in Last
|
||||
err = sdb.MakeCheckpoint()
|
||||
require.Nil(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://%v/debug/", addr)
|
||||
|
||||
var batchNum common.BatchNum
|
||||
req, err := sling.New().Get(url).Path("sdb/batchnum").ReceiveSuccess(&batchNum)
|
||||
require.Equal(t, http.StatusOK, req.StatusCode)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, common.BatchNum(2), batchNum)
|
||||
|
||||
var mtroot *big.Int
|
||||
req, err = sling.New().Get(url).Path("sdb/mtroot").ReceiveSuccess(&mtroot)
|
||||
require.Equal(t, http.StatusOK, req.StatusCode)
|
||||
require.Nil(t, err)
|
||||
// Testing against a hardcoded value obtained by running the test and
|
||||
// printing the value previously.
|
||||
assert.Equal(t, "21765339739823365993496282904432398015268846626944509989242908567129545640185",
|
||||
mtroot.String())
|
||||
|
||||
var accountAPI common.Account
|
||||
req, err = sling.New().Get(url).
|
||||
Path(fmt.Sprintf("sdb/accounts/%v", accounts[0].Idx)).
|
||||
ReceiveSuccess(&accountAPI)
|
||||
require.Equal(t, http.StatusOK, req.StatusCode)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, accounts[0], accountAPI)
|
||||
|
||||
var accountsAPI []common.Account
|
||||
req, err = sling.New().Get(url).Path("sdb/accounts").ReceiveSuccess(&accountsAPI)
|
||||
require.Equal(t, http.StatusOK, req.StatusCode)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, accounts, accountsAPI)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -33,11 +33,9 @@ type TxProcessor struct {
|
||||
// Config contains the TxProcessor configuration parameters
|
||||
type Config struct {
|
||||
NLevels uint32
|
||||
// MaxFeeTx is the maximum number of coordinator accounts that can receive fees
|
||||
MaxFeeTx uint32
|
||||
MaxTx uint32
|
||||
MaxL1Tx uint32
|
||||
// ChainID of the blockchain
|
||||
ChainID uint16
|
||||
}
|
||||
|
||||
@@ -407,8 +405,9 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// once all txs processed (exitTree root frozen), for each Exit,
|
||||
// generate common.ExitInfo data
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
// once all txs processed (exitTree root frozen), for each
|
||||
// Exit, generate common.ExitInfo data
|
||||
var exitInfos []common.ExitInfo
|
||||
exitInfosByIdx := make(map[common.Idx]*common.ExitInfo)
|
||||
for i := 0; i < nTx; i++ {
|
||||
@@ -438,9 +437,9 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
}
|
||||
}
|
||||
|
||||
if tp.s.Type() == statedb.TypeSynchronizer {
|
||||
// retuTypeexitInfos, createdAccounts and collectedFees, so Synchronizer will
|
||||
// be able to store it into HistoryDB for the concrete BatchNum
|
||||
// return exitInfos, createdAccounts and collectedFees, so
|
||||
// Synchronizer will be able to store it into HistoryDB for the
|
||||
// concrete BatchNum
|
||||
return &ProcessTxOutput{
|
||||
ZKInputs: nil,
|
||||
ExitInfos: exitInfos,
|
||||
|
||||
@@ -18,8 +18,7 @@ import (
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
)
|
||||
|
||||
// txs implements the interface Sort for an array of Tx, and sorts the txs by
|
||||
// absolute fee
|
||||
// txs implements the interface Sort for an array of Tx
|
||||
type txs []common.PoolL2Tx
|
||||
|
||||
func (t txs) Len() int {
|
||||
@@ -591,12 +590,11 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
|
||||
|
||||
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce
|
||||
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx {
|
||||
// Sort by absolute fee
|
||||
sort.Sort(txs(l2Txs))
|
||||
|
||||
if len(l2Txs) > int(max) {
|
||||
l2Txs = l2Txs[:max]
|
||||
if len(l2Txs) < int(max) {
|
||||
return l2Txs
|
||||
}
|
||||
l2Txs = l2Txs[:max]
|
||||
|
||||
// sort l2Txs by Nonce. This can be done in many different ways, what
|
||||
// is needed is to output the l2Txs where the Nonce of l2Txs for each
|
||||
|
||||
@@ -538,8 +538,7 @@ func TestTransferToBjj(t *testing.T) {
|
||||
MaxL1UserTxs: 5,
|
||||
TxProcessorConfig: tpc,
|
||||
}
|
||||
// batch1 to freeze L1UserTxs that will create some accounts with
|
||||
// positive balance
|
||||
// batch1 to create some accounts with positive balance
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
@@ -629,78 +628,3 @@ func TestTransferToBjj(t *testing.T) {
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTransferManyFromSameAccount(t *testing.T) {
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
CreateAccountDeposit(0) Coord: 0
|
||||
CreateAccountDeposit(0) A: 1000
|
||||
CreateAccountDeposit(0) B: 1000
|
||||
|
||||
> batchL1 // freeze L1User{1}
|
||||
> batchL1 // forge L1User{1}
|
||||
> block
|
||||
`
|
||||
|
||||
chainID := uint16(0)
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||
|
||||
// restart nonces of TilContext, as will be set by generating directly
|
||||
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||
tc.RestartNonces()
|
||||
|
||||
tpc := txprocessor.Config{
|
||||
NLevels: 16,
|
||||
MaxFeeTx: 10,
|
||||
MaxTx: 20,
|
||||
MaxL1Tx: 10,
|
||||
ChainID: chainID,
|
||||
}
|
||||
selectionConfig := &SelectionConfig{
|
||||
MaxL1UserTxs: 5,
|
||||
TxProcessorConfig: tpc,
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 8 transfers from the same account
|
||||
|
||||
batchPoolL2 := `
|
||||
Type: PoolL2
|
||||
PoolTransfer(0) A-B: 10 (126) // 1
|
||||
PoolTransfer(0) A-B: 10 (126) // 2
|
||||
PoolTransfer(0) A-B: 10 (126) // 3
|
||||
PoolTransfer(0) A-B: 10 (126) // 4
|
||||
PoolTransfer(0) A-B: 10 (126) // 5
|
||||
PoolTransfer(0) A-B: 10 (126) // 6
|
||||
PoolTransfer(0) A-B: 10 (126) // 7
|
||||
PoolTransfer(0) A-B: 10 (126) // 8
|
||||
`
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// reorder poolL2Txs so that nonces are not sorted
|
||||
poolL2Txs[0], poolL2Txs[7] = poolL2Txs[7], poolL2Txs[0]
|
||||
poolL2Txs[1], poolL2Txs[6] = poolL2Txs[6], poolL2Txs[1]
|
||||
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 8, len(oL2Txs))
|
||||
assert.Equal(t, 0, len(discardedL2Txs))
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user