Browse Source

Add circuit configuration to node config

- Remove MaxL1CoordTxs parameter from the TxSelector because this parameter
  doesn't exist
- Use ChainID in l1tx byte encoding
- Pass txprocessor configuration to batch builder via an existing parameter
feature/sql-semaphore1
Eduard S 3 years ago
parent
commit
18c854fbaa
12 changed files with 88 additions and 107 deletions
  1. +6
    -11
      batchbuilder/batchbuilder.go
  2. +4
    -0
      cli/node/cfg.buidler.toml
  3. +14
    -44
      common/ethrollup.go
  4. +2
    -2
      common/l1tx.go
  5. +2
    -1
      common/l1tx_test.go
  6. +6
    -1
      config/config.go
  7. +1
    -0
      coordinator/batch.go
  8. +20
    -28
      coordinator/coordinator.go
  9. +11
    -16
      coordinator/coordinator_test.go
  10. +21
    -2
      node/node.go
  11. +0
    -1
      prover/prover.go
  12. +1
    -1
      txprocessor/txprocessor.go

+ 6
- 11
batchbuilder/batchbuilder.go

@ -24,7 +24,8 @@ type BatchBuilder struct {
// ConfigBatch contains the batch configuration
type ConfigBatch struct {
ForgerAddress ethCommon.Address
ForgerAddress ethCommon.Address
TxProcessorConfig txprocessor.Config
}
// NewBatchBuilder constructs a new BatchBuilder, and executes the bb.Reset
@ -54,17 +55,11 @@ func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) e
}
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBatch, l1usertxs, l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx, tokenIDs []common.TokenID) (*common.ZKInputs, error) {
//nolint:gomnd
tpc := txprocessor.Config{ // TODO TMP
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 64,
ChainID: uint16(0),
}
func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBatch, l1usertxs,
l1coordinatortxs []common.L1Tx, pooll2txs []common.PoolL2Tx,
tokenIDs []common.TokenID) (*common.ZKInputs, error) {
bbStateDB := bb.localStateDB.StateDB
tp := txprocessor.NewTxProcessor(bbStateDB, tpc)
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
return ptOut.ZKInputs, tracerr.Wrap(err)

+ 4
- 0
cli/node/cfg.buidler.toml

@ -63,6 +63,10 @@ Path = "/tmp/iden3-test/hermez/batchbuilder"
[[Coordinator.ServerProofs]]
URL = "http://localhost:3000/api"
[Coordinator.Circuit]
MaxTx = 512
NLevels = 32
[Coordinator.EthClient]
CallGasLimit = 300000
DeployGasLimit = 1000000

+ 14
- 44
common/ethrollup.go

@ -1,56 +1,14 @@
package common
import (
"fmt"
"math/big"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/tracerr"
)
// RollupVars contain the Rollup smart contract variables
// type RollupVars struct {
// EthBlockNum uint64
// ForgeL1Timeout *big.Int
// FeeL1UserTx *big.Int
// FeeAddToken *big.Int
// TokensHEZ eth.Address
// Governance eth.Address
// }
// AuctionVars contain the Auction smart contract variables
// type AuctionVars struct {
// EthBlockNum uint64
// SlotDeadline uint
// CloseAuctionSlots uint
// OpenAuctionSlots uint
// Governance eth.Address
// MinBidSlots MinBidSlots
// Outbidding int
// DonationAddress eth.Address
// GovernanceAddress eth.Address
// AllocationRatio AllocationRatio
// }
// WithdrawDelayerVars contains the Withdrawal Delayer smart contract variables
// type WithdrawDelayerVars struct {
// HermezRollupAddress eth.Address
// HermezGovernanceAddress eth.Address
// EmergencyCouncilAddress eth.Address
// WithdrawalDelay uint
// EmergencyModeStartingTime time.Time
// EmergencyModeEnabled bool
// }
// MinBidSlots TODO
// type MinBidSlots [6]uint
//
// // AllocationRatio TODO
// type AllocationRatio struct {
// Donation uint
// Burn uint
// Forger uint
// }
const (
// RollupConstMaxFeeIdxCoordinator is the maximum number of tokens the
// coordinator can use to collect fees (determines the number of tokens
@ -146,6 +104,18 @@ type RollupConstants struct {
WithdrawDelayerContract ethCommon.Address `json:"withdrawDelayerContract"`
}
// FindVerifierIdx tries to find a matching verifier in the RollupConstants and
// returns its index
func (c *RollupConstants) FindVerifierIdx(MaxTx, NLevels int64) (int, error) {
for i, verifier := range c.Verifiers {
if verifier.MaxTx == MaxTx && verifier.NLevels == NLevels {
return i, nil
}
}
return 0, tracerr.Wrap(fmt.Errorf("verifier not found for MaxTx: %v, NLevels: %v",
MaxTx, NLevels))
}
// BucketParams are the parameter variables of each Bucket of Rollup Smart
// Contract
type BucketParams struct {

+ 2
- 2
common/l1tx.go

@ -176,7 +176,7 @@ func (tx L1Tx) Tx() Tx {
// [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] empty (signatureConstant) // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
func (tx L1Tx) TxCompressedData() (*big.Int, error) {
func (tx L1Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
amountFloat16, err := NewFloat16(tx.Amount)
if err != nil {
return nil, tracerr.Wrap(err)
@ -196,7 +196,7 @@ func (tx L1Tx) TxCompressedData() (*big.Int, error) {
return nil, tracerr.Wrap(err)
}
copy(b[19:25], fromIdxBytes[:])
copy(b[25:27], []byte{0, 0}) // TODO this will be generated by the ChainID config parameter
binary.BigEndian.PutUint16(b[25:27], chainID)
copy(b[27:31], SignatureConstantBytes[:])
bi := new(big.Int).SetBytes(b[:])

+ 2
- 1
common/l1tx_test.go

@ -56,7 +56,8 @@ func TestL1TxCompressedData(t *testing.T) {
Amount: big.NewInt(4),
TokenID: 5,
}
txCompressedData, err := tx.TxCompressedData()
chainID := uint16(0)
txCompressedData, err := tx.TxCompressedData(chainID)
assert.NoError(t, err)
// test vector value generated from javascript implementation

+ 6
- 1
config/config.go

@ -68,7 +68,12 @@ type Coordinator struct {
Path string `validate:"required"`
} `validate:"required"`
ServerProofs []ServerProof `validate:"required"`
EthClient struct {
Circuit struct {
// VerifierIdx uint8 `validate:"required"`
MaxTx int64 `validate:"required"`
NLevels int64 `validate:"required"`
} `validate:"required"`
EthClient struct {
CallGasLimit uint64 `validate:"required"`
DeployGasLimit uint64 `validate:"required"`
GasPriceDiv uint64 `validate:"required"`

+ 1
- 0
coordinator/batch.go

@ -32,6 +32,7 @@ type BatchInfo struct {
Proof *prover.Proof
PublicInputs []*big.Int
L1Batch bool
VerifierIdx uint8
L1UserTxsExtra []common.L1Tx
L1CoordTxs []common.L1Tx
L1CoordinatorTxsAuths [][]byte

+ 20
- 28
coordinator/coordinator.go

@ -50,8 +50,10 @@ type Config struct {
TxManagerCheckInterval time.Duration
// DebugBatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline
DebugBatchPath string
Purger PurgerCfg
DebugBatchPath string
Purger PurgerCfg
VerifierIdx uint8
TxProcessorConfig txprocessor.Config
}
func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
@ -213,20 +215,6 @@ func (c *Coordinator) canForge(stats *synchronizer.Stats) bool {
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
c.txManager.SetLastBlock(stats.Eth.LastBlock.Num)
// TMP
//nolint:gomnd
selectionConfig := &txselector.SelectionConfig{
MaxL1UserTxs: 32,
MaxL1CoordinatorTxs: 32,
TxProcessorConfig: txprocessor.Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 64,
ChainID: uint16(0),
},
}
canForge := c.canForge(stats)
if c.pipeline == nil {
if canForge {
@ -238,7 +226,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
return tracerr.Wrap(err)
}
if err := c.pipeline.Start(batchNum, stats.Sync.LastForgeL1TxsNum,
stats, &c.vars, selectionConfig); err != nil {
stats, &c.vars); err != nil {
c.pipeline = nil
return tracerr.Wrap(err)
}
@ -689,8 +677,7 @@ func (p *Pipeline) reset(batchNum common.BatchNum, lastForgeL1TxsNum int64,
// Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum, lastForgeL1TxsNum int64,
syncStats *synchronizer.Stats, initSCVars *synchronizer.SCVariables,
selectionConfig *txselector.SelectionConfig) error {
syncStats *synchronizer.Stats, initSCVars *synchronizer.SCVariables) error {
if p.started {
log.Fatal("Pipeline already started")
}
@ -716,7 +703,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum, lastForgeL1TxsNum int64,
p.stats = syncStats
default:
batchNum = p.batchNum + 1
batchInfo, err := p.forgeBatch(p.ctx, batchNum, selectionConfig)
batchInfo, err := p.forgeBatch(batchNum)
if p.ctx.Err() != nil {
continue
} else if err != nil {
@ -818,7 +805,7 @@ func (p *Pipeline) sendServerProof(ctx context.Context, batchInfo *BatchInfo) er
}
// forgeBatch the next batch.
func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, selectionConfig *txselector.SelectionConfig) (*BatchInfo, error) {
func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (*BatchInfo, error) {
// remove transactions from the pool that have been there for too long
_, err := p.purger.InvalidateMaybe(p.l2DB, p.txSelector.LocalAccountsDB(),
p.stats.Sync.LastBlock.Num, int64(batchNum))
@ -832,6 +819,11 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
batchInfo := BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch
selectionCfg := &txselector.SelectionConfig{
MaxL1UserTxs: common.RollupConstMaxL1UserTx,
TxProcessorConfig: p.cfg.TxProcessorConfig,
}
var poolL2Txs []common.PoolL2Tx
// var feesInfo
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
@ -847,16 +839,15 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
if err != nil {
return nil, tracerr.Wrap(err)
}
// TODO once feesInfo is added to method return, add the var
coordIdxs, auths, l1UserTxsExtra, l1CoordTxs, poolL2Txs, err =
p.txSelector.GetL1L2TxSelection(selectionConfig, batchNum, l1UserTxs)
p.txSelector.GetL1L2TxSelection(selectionCfg, batchNum, l1UserTxs)
if err != nil {
return nil, tracerr.Wrap(err)
}
} else {
// 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, err =
p.txSelector.GetL2TxSelection(selectionConfig, batchNum)
p.txSelector.GetL2TxSelection(selectionCfg, batchNum)
if err != nil {
return nil, tracerr.Wrap(err)
}
@ -864,11 +855,11 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
}
// 3. Save metadata from TxSelector output for BatchNum
// TODO feesInfo
batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs
batchInfo.L1CoordinatorTxsAuths = auths
batchInfo.CoordIdxs = coordIdxs
batchInfo.VerifierIdx = p.cfg.VerifierIdx
if err := p.l2DB.StartForging(poolL2TxsIDs(poolL2Txs), batchInfo.BatchNum); err != nil {
return nil, tracerr.Wrap(err)
@ -885,10 +876,11 @@ func (p *Pipeline) forgeBatch(ctx context.Context, batchNum common.BatchNum, sel
// 4. Call BatchBuilder with TxSelector output
configBatch := &batchbuilder.ConfigBatch{
ForgerAddress: p.cfg.ForgerAddress,
ForgerAddress: p.cfg.ForgerAddress,
TxProcessorConfig: p.cfg.TxProcessorConfig,
}
zkInputs, err := p.batchBuilder.BuildBatch(coordIdxs, configBatch, l1UserTxsExtra,
l1CoordTxs, poolL2Txs, nil) // TODO []common.TokenID --> feesInfo
l1CoordTxs, poolL2Txs, nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
@ -948,7 +940,7 @@ func prepareForgeBatchArgs(batchInfo *BatchInfo) *eth.RollupForgeBatchArgs {
L2TxsData: batchInfo.L2Txs,
FeeIdxCoordinator: batchInfo.CoordIdxs,
// Circuit selector
VerifierIdx: 0, // TODO
VerifierIdx: batchInfo.VerifierIdx,
L1Batch: batchInfo.L1Batch,
ProofA: [2]*big.Int{proof.PiA[0], proof.PiA[1]},
ProofB: [2][2]*big.Int{

+ 11
- 16
coordinator/coordinator_test.go

@ -90,9 +90,7 @@ type modules struct {
stateDB *statedb.StateDB
}
var maxL1UserTxs uint64 = 128
var maxL1Txs uint64 = 256
var maxL1CoordinatorTxs uint64 = maxL1Txs - maxL1UserTxs
var maxTxs uint64 = 376
var nLevels uint32 = 32 //nolint:deadcode,unused
var maxFeeTxs uint32 = 64 //nolint:deadcode,varcheck
@ -174,7 +172,16 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
InvalidateBatchDelay: 4,
InvalidateBlockDelay: 4,
},
TxProcessorConfig: txprocessor.Config{
NLevels: nLevels,
MaxFeeTx: maxFeeTxs,
MaxTx: uint32(maxTxs),
MaxL1Tx: uint32(maxL1Txs),
ChainID: chainID,
},
VerifierIdx: 0,
}
serverProofs := []prover.Client{
&prover.MockClient{Delay: 300 * time.Millisecond},
&prover.MockClient{Delay: 400 * time.Millisecond},
@ -627,24 +634,12 @@ PoolTransfer(0) User2-User3: 300 (126)
batchNum++
selectionConfig := &txselector.SelectionConfig{
MaxL1UserTxs: maxL1UserTxs,
MaxL1CoordinatorTxs: maxL1CoordinatorTxs,
TxProcessorConfig: txprocessor.Config{
NLevels: nLevels,
MaxFeeTx: maxFeeTxs,
MaxTx: uint32(maxTxs),
MaxL1Tx: uint32(maxL1Txs),
ChainID: chainID,
},
}
batchInfo, err := pipeline.forgeBatch(ctx, batchNum, selectionConfig)
batchInfo, err := pipeline.forgeBatch(batchNum)
require.NoError(t, err)
assert.Equal(t, 3, len(batchInfo.L2Txs))
batchNum++
batchInfo, err = pipeline.forgeBatch(ctx, batchNum, selectionConfig)
batchInfo, err = pipeline.forgeBatch(batchNum)
require.NoError(t, err)
assert.Equal(t, 0, len(batchInfo.L2Txs))
}

+ 21
- 2
node/node.go

@ -28,6 +28,7 @@ import (
"github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/test/debugapi"
"github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/hermez-node/txselector"
"github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx"
@ -202,8 +203,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err)
}
// TODO: Get (configCircuits []ConfigCircuit, batchNum common.BatchNum, nLevels uint64) from smart contract
nLevels := uint64(32) //nolint:gomnd
batchBuilder, err := batchbuilder.NewBatchBuilder(cfg.Coordinator.BatchBuilder.Path, stateDB, nil, 0, nLevels)
batchBuilder, err := batchbuilder.NewBatchBuilder(cfg.Coordinator.BatchBuilder.Path,
stateDB, nil, 0, uint64(cfg.Coordinator.Circuit.NLevels))
if err != nil {
return nil, tracerr.Wrap(err)
}
@ -216,6 +217,22 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.ProofServerPollInterval.Duration)
}
txProcessorCfg := txprocessor.Config{
NLevels: uint32(cfg.Coordinator.Circuit.NLevels),
MaxTx: uint32(cfg.Coordinator.Circuit.MaxTx),
ChainID: chainIDU16,
MaxFeeTx: common.RollupConstMaxFeeIdxCoordinator,
MaxL1Tx: common.RollupConstMaxL1Tx,
}
verifierIdx, err := scConsts.Rollup.FindVerifierIdx(
cfg.Coordinator.Circuit.MaxTx,
cfg.Coordinator.Circuit.NLevels,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
log.Infow("Found verifier that matches circuit config", "verifierIdx", verifierIdx)
coord, err = coordinator.NewCoordinator(
coordinator.Config{
ForgerAddress: cfg.Coordinator.ForgerAddress,
@ -232,6 +249,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
},
VerifierIdx: uint8(verifierIdx),
TxProcessorConfig: txProcessorCfg,
},
historyDB,
l2DB,

+ 0
- 1
prover/prover.go

@ -65,7 +65,6 @@ func (p *Proof) UnmarshalJSON(data []byte) error {
if p.PiC[2].Int64() != 1 {
return tracerr.Wrap(fmt.Errorf("Expected PiC[2] == 1, but got %v", p.PiC[2]))
}
// TODO: Assert ones and zeroes
p.Protocol = proof.Protocol
return nil
}

+ 1
- 1
txprocessor/txprocessor.go

@ -441,7 +441,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.zki != nil {
// Txs
var err error
tp.zki.TxCompressedData[tp.i], err = tx.TxCompressedData()
tp.zki.TxCompressedData[tp.i], err = tx.TxCompressedData(tp.config.ChainID)
if err != nil {
log.Error(err)
return nil, nil, false, nil, tracerr.Wrap(err)

Loading…
Cancel
Save