Delete pending txs by external mark, store tx IP

- In tx_pool, add a column called `external_delete` that can be set to true
  externally.  Regularly, the coordinator will delete all pending txs with this
  column set to true.  The interval for this action is set via the new config
  parameter `Coordinator.PurgeByExtDelInterval`.
- In tx_pool, add a column for the client ip that sent the transaction.  The
  api fills this value using the ClientIP method from gin.Context, which should
  work even under a reverse-proxy.
This commit is contained in:
Eduard S
2021-02-19 13:54:56 +01:00
parent e23063380c
commit 67b2b7da4b
11 changed files with 138 additions and 42 deletions

View File

@@ -85,6 +85,10 @@ type Config struct {
// SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error
SyncRetryInterval time.Duration
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval time.Duration
// EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call
EthClientAttemptsDelay time.Duration
@@ -153,6 +157,15 @@ type Coordinator struct {
wg sync.WaitGroup
cancel context.CancelFunc
// mutexL2DBUpdateDelete protects updates to the L2DB so that
// these two processes always happen exclusively:
// - Pipeline taking pending txs, running through the TxProcessor and
// marking selected txs as forging
// - Coordinator deleting pending txs that have been marked with
// `external_delete`.
// Without this mutex, the coordinator could delete a pending txs that
// has just been selected by the TxProcessor in the pipeline.
mutexL2DBUpdateDelete sync.Mutex
pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
@@ -248,7 +261,8 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
c.pipelineNum++
return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
c.batchBuilder, c.purger, c, c.txManager, c.provers, &c.consts)
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager,
c.provers, &c.consts)
}
// MsgSyncBlock indicates an update to the Synchronizer stats
@@ -527,6 +541,24 @@ func (c *Coordinator) Start() {
}
}
}()
c.wg.Add(1)
go func() {
for {
select {
case <-c.ctx.Done():
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
c.wg.Done()
return
case <-time.After(c.cfg.PurgeByExtDelInterval):
c.mutexL2DBUpdateDelete.Lock()
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
}
c.mutexL2DBUpdateDelete.Unlock()
}
}
}()
}
const stopCtxTimeout = 200 * time.Millisecond

View File

@@ -45,15 +45,16 @@ type Pipeline struct {
errAtBatchNum common.BatchNum
lastForgeTime time.Time
proversPool *ProversPool
provers []prover.Client
coord *Coordinator
txManager *TxManager
historyDB *historydb.HistoryDB
l2DB *l2db.L2DB
txSelector *txselector.TxSelector
batchBuilder *batchbuilder.BatchBuilder
purger *Purger
proversPool *ProversPool
provers []prover.Client
coord *Coordinator
txManager *TxManager
historyDB *historydb.HistoryDB
l2DB *l2db.L2DB
txSelector *txselector.TxSelector
batchBuilder *batchbuilder.BatchBuilder
mutexL2DBUpdateDelete *sync.Mutex
purger *Purger
stats synchronizer.Stats
vars synchronizer.SCVariables
@@ -84,6 +85,7 @@ func NewPipeline(ctx context.Context,
l2DB *l2db.L2DB,
txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder,
mutexL2DBUpdateDelete *sync.Mutex,
purger *Purger,
coord *Coordinator,
txManager *TxManager,
@@ -104,19 +106,20 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
}
return &Pipeline{
num: num,
cfg: cfg,
historyDB: historyDB,
l2DB: l2DB,
txSelector: txSelector,
batchBuilder: batchBuilder,
provers: provers,
proversPool: proversPool,
purger: purger,
coord: coord,
txManager: txManager,
consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen),
num: num,
cfg: cfg,
historyDB: historyDB,
l2DB: l2DB,
txSelector: txSelector,
batchBuilder: batchBuilder,
provers: provers,
proversPool: proversPool,
mutexL2DBUpdateDelete: mutexL2DBUpdateDelete,
purger: purger,
coord: coord,
txManager: txManager,
consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen),
}, nil
}
@@ -199,7 +202,9 @@ func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
// and then waits for an available proof server and sends the zkInputs to it so
// that the proof computation begins.
func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) {
p.mutexL2DBUpdateDelete.Lock()
batchInfo, err := p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {