mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Compare commits
3 Commits
feature/fa
...
feature/ed
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
672d08c671 | ||
|
|
9b7b333acf | ||
|
|
2a5992d218 |
@@ -104,6 +104,12 @@ GasPriceIncPerc = 10
|
||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||
Password = "yourpasswordhere"
|
||||
|
||||
[Coordinator.EthClient.ForgeBatchGasCost]
|
||||
Fixed = 500000
|
||||
L1UserTx = 8000
|
||||
L1CoordTx = 9000
|
||||
L2Tx = 1
|
||||
|
||||
[Coordinator.API]
|
||||
Coordinator = true
|
||||
|
||||
|
||||
@@ -35,6 +35,15 @@ type ServerProof struct {
|
||||
URL string `validate:"required"`
|
||||
}
|
||||
|
||||
// ForgeBatchGasCost is the costs associated to a ForgeBatch transaction, split
|
||||
// into different parts to be used in a formula.
|
||||
type ForgeBatchGasCost struct {
|
||||
Fixed uint64 `validate:"required"`
|
||||
L1UserTx uint64 `validate:"required"`
|
||||
L1CoordTx uint64 `validate:"required"`
|
||||
L2Tx uint64 `validate:"required"`
|
||||
}
|
||||
|
||||
// Coordinator is the coordinator specific configuration.
|
||||
type Coordinator struct {
|
||||
// ForgerAddress is the address under which this coordinator is forging
|
||||
@@ -180,6 +189,9 @@ type Coordinator struct {
|
||||
// Password used to decrypt the keys in the keystore
|
||||
Password string `validate:"required"`
|
||||
} `validate:"required"`
|
||||
// ForgeBatchGasCost contains the cost of each action in the
|
||||
// ForgeBatch transaction.
|
||||
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
|
||||
} `validate:"required"`
|
||||
API struct {
|
||||
// Coordinator enables the coordinator API endpoints
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/batchbuilder"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/config"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/eth"
|
||||
@@ -116,6 +117,9 @@ type Config struct {
|
||||
// VerifierIdx is the index of the verifier contract registered in the
|
||||
// smart contract
|
||||
VerifierIdx uint8
|
||||
// ForgeBatchGasCost contains the cost of each action in the
|
||||
// ForgeBatch transaction.
|
||||
ForgeBatchGasCost config.ForgeBatchGasCost
|
||||
TxProcessorConfig txprocessor.Config
|
||||
}
|
||||
|
||||
@@ -383,11 +387,23 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
fromBatch.ForgerAddr = c.cfg.ForgerAddress
|
||||
fromBatch.StateRoot = big.NewInt(0)
|
||||
}
|
||||
// Before starting the pipeline make sure we reset any
|
||||
// l2tx from the pool that was forged in a batch that
|
||||
// didn't end up being mined. We are already doing
|
||||
// this in handleStopPipeline, but we do it again as a
|
||||
// failsafe in case the last synced batchnum is
|
||||
// different than in the previous call to l2DB.Reorg,
|
||||
// or in case the node was restarted when there was a
|
||||
// started batch that included l2txs but was not mined.
|
||||
if err := c.l2DB.Reorg(fromBatch.BatchNum); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
var err error
|
||||
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
c.pipelineFromBatch = fromBatch
|
||||
// Start the pipeline
|
||||
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
|
||||
c.pipeline = nil
|
||||
return tracerr.Wrap(err)
|
||||
@@ -508,7 +524,7 @@ func (c *Coordinator) Start() {
|
||||
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
waitCh := time.After(longWaitDuration)
|
||||
timer := time.NewTimer(longWaitDuration)
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
@@ -520,24 +536,27 @@ func (c *Coordinator) Start() {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("Coordinator.handleMsg", "err", err)
|
||||
waitCh = time.After(c.cfg.SyncRetryInterval)
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(c.cfg.SyncRetryInterval)
|
||||
continue
|
||||
}
|
||||
waitCh = time.After(longWaitDuration)
|
||||
case <-waitCh:
|
||||
case <-timer.C:
|
||||
timer.Reset(longWaitDuration)
|
||||
if !c.stats.Synced() {
|
||||
waitCh = time.After(longWaitDuration)
|
||||
continue
|
||||
}
|
||||
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
|
||||
waitCh = time.After(longWaitDuration)
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Errorw("Coordinator.syncStats", "err", err)
|
||||
waitCh = time.After(c.cfg.SyncRetryInterval)
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(c.cfg.SyncRetryInterval)
|
||||
continue
|
||||
}
|
||||
waitCh = time.After(longWaitDuration)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -271,7 +271,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
|
||||
p.wg.Add(1)
|
||||
go func() {
|
||||
waitCh := time.After(zeroDuration)
|
||||
timer := time.NewTimer(zeroDuration)
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
@@ -281,23 +281,21 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
case statsVars := <-p.statsVarsCh:
|
||||
p.stats = statsVars.Stats
|
||||
p.syncSCVars(statsVars.Vars)
|
||||
case <-waitCh:
|
||||
case <-timer.C:
|
||||
timer.Reset(p.cfg.ForgeRetryInterval)
|
||||
// Once errAtBatchNum != 0, we stop forging
|
||||
// batches because there's been an error and we
|
||||
// wait for the pipeline to be stopped.
|
||||
if p.getErrAtBatchNum() != 0 {
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
}
|
||||
batchNum = p.state.batchNum + 1
|
||||
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
|
||||
if p.ctx.Err() != nil {
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
|
||||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
|
||||
tracerr.Unwrap(err) == errForgeBeforeDelay {
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
} else if err != nil {
|
||||
p.setErrAtBatchNum(batchNum)
|
||||
@@ -306,7 +304,6 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
"Pipeline.handleForgBatch: %v", err),
|
||||
FailedBatchNum: batchNum,
|
||||
})
|
||||
waitCh = time.After(p.cfg.ForgeRetryInterval)
|
||||
continue
|
||||
}
|
||||
p.lastForgeTime = time.Now()
|
||||
@@ -316,7 +313,10 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
|
||||
case batchChSentServerProof <- batchInfo:
|
||||
case <-p.ctx.Done():
|
||||
}
|
||||
waitCh = time.After(zeroDuration)
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(zeroDuration)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -123,7 +123,7 @@ func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
}
|
||||
|
||||
// NewAuth generates a new auth object for an ethereum transaction
|
||||
func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.TransactOpts, error) {
|
||||
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
@@ -143,15 +143,12 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auth.Value = big.NewInt(0) // in wei
|
||||
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
|
||||
// This requires a function that estimates the gas usage of the
|
||||
// forgeBatch call based on the contents of the ForgeBatch args:
|
||||
// - length of l2txs
|
||||
// - length of l1Usertxs
|
||||
// - length of l1CoordTxs with authorization signature
|
||||
// - length of l1CoordTxs without authoriation signature
|
||||
// - etc.
|
||||
auth.GasLimit = 1000000
|
||||
|
||||
gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
|
||||
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
|
||||
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
|
||||
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
|
||||
auth.GasLimit = gasLimit
|
||||
auth.GasPrice = gasPrice
|
||||
auth.Nonce = nil
|
||||
|
||||
@@ -191,7 +188,7 @@ func addPerc(v *big.Int, p int64) *big.Int {
|
||||
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
|
||||
var ethTx *types.Transaction
|
||||
var err error
|
||||
auth, err := t.NewAuth(ctx)
|
||||
auth, err := t.NewAuth(ctx, batchInfo)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -419,8 +416,6 @@ func (q *Queue) Push(batchInfo *BatchInfo) {
|
||||
|
||||
// Run the TxManager
|
||||
func (t *TxManager) Run(ctx context.Context) {
|
||||
waitCh := time.After(longWaitDuration)
|
||||
|
||||
var statsVars statsVars
|
||||
select {
|
||||
case statsVars = <-t.statsVarsCh:
|
||||
@@ -431,6 +426,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
log.Infow("TxManager: received initial statsVars",
|
||||
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
|
||||
|
||||
timer := time.NewTimer(longWaitDuration)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -474,13 +470,17 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
t.queue.Push(batchInfo)
|
||||
waitCh = time.After(t.cfg.TxManagerCheckInterval)
|
||||
case <-waitCh:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(t.cfg.TxManagerCheckInterval)
|
||||
case <-timer.C:
|
||||
queuePosition, batchInfo := t.queue.Next()
|
||||
if batchInfo == nil {
|
||||
waitCh = time.After(longWaitDuration)
|
||||
timer.Reset(longWaitDuration)
|
||||
continue
|
||||
}
|
||||
timer.Reset(t.cfg.TxManagerCheckInterval)
|
||||
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil { //nolint:staticcheck
|
||||
|
||||
@@ -336,6 +336,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
|
||||
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
|
||||
},
|
||||
ForgeBatchGasCost: cfg.Coordinator.EthClient.ForgeBatchGasCost,
|
||||
VerifierIdx: uint8(verifierIdx),
|
||||
TxProcessorConfig: txProcessorCfg,
|
||||
},
|
||||
|
||||
@@ -146,7 +146,7 @@ const longWaitDuration = 999 * time.Hour
|
||||
// const provingDuration = 2 * time.Second
|
||||
|
||||
func (s *Mock) runProver(ctx context.Context) {
|
||||
waitCh := time.After(longWaitDuration)
|
||||
timer := time.NewTimer(longWaitDuration)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -154,21 +154,27 @@ func (s *Mock) runProver(ctx context.Context) {
|
||||
case msg := <-s.msgCh:
|
||||
switch msg.value {
|
||||
case "cancel":
|
||||
waitCh = time.After(longWaitDuration)
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(longWaitDuration)
|
||||
s.Lock()
|
||||
if !s.status.IsReady() {
|
||||
s.status = prover.StatusCodeAborted
|
||||
}
|
||||
s.Unlock()
|
||||
case "prove":
|
||||
waitCh = time.After(s.provingDuration)
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(s.provingDuration)
|
||||
s.Lock()
|
||||
s.status = prover.StatusCodeBusy
|
||||
s.Unlock()
|
||||
}
|
||||
msg.ackCh <- true
|
||||
case <-waitCh:
|
||||
waitCh = time.After(longWaitDuration)
|
||||
case <-timer.C:
|
||||
timer.Reset(longWaitDuration)
|
||||
s.Lock()
|
||||
if s.status != prover.StatusCodeBusy {
|
||||
s.Unlock()
|
||||
|
||||
Reference in New Issue
Block a user