mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Merge pull request #467 from hermeznetwork/feature/fixes2
Update node and apply some fixes
This commit is contained in:
@@ -40,12 +40,20 @@ TokenHEZName = "Hermez Network Token"
|
||||
|
||||
[Coordinator]
|
||||
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
|
||||
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
|
||||
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
|
||||
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
|
||||
ConfirmBlocks = 10
|
||||
L1BatchTimeoutPerc = 0.6
|
||||
L1BatchTimeoutPerc = 0.4
|
||||
ProofServerPollInterval = "1s"
|
||||
SyncRetryInterval = "1s"
|
||||
|
||||
[Coordinator.FeeAccount]
|
||||
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
|
||||
# PrivateKey = "0x3a9270c020e169097808da4b02e8d9100be0f8a38cfad3dcfc0b398076381fdd"
|
||||
BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
|
||||
# BJJPrivateKey = "0xb556862fb60e7cf4c0a8a7f44baf2ab06a4c90ac39decc4eef363b6142d07a34"
|
||||
|
||||
[Coordinator.L2DB]
|
||||
SafetyPeriod = 10
|
||||
MaxTxs = 512
|
||||
@@ -71,10 +79,11 @@ NLevels = 32
|
||||
[Coordinator.EthClient]
|
||||
ReceiptTimeout = "60s"
|
||||
ReceiptLoopInterval = "500ms"
|
||||
|
||||
CheckLoopInterval = "500ms"
|
||||
Attempts = 8
|
||||
AttemptsDelay = "200ms"
|
||||
CallGasLimit = 300000
|
||||
GasPriceDiv = 100
|
||||
|
||||
[Coordinator.EthClient.Keystore]
|
||||
Path = "/tmp/iden3-test/hermez/ethkeystore"
|
||||
|
||||
@@ -5,3 +5,6 @@ go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x30f5fddb34
|
||||
|
||||
# Boot Coordinator
|
||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563
|
||||
|
||||
# FeeAccount
|
||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x3a9270c020e169097808da4b02e8d9100be0f8a38cfad3dcfc0b398076381fdd
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/node"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -25,6 +27,15 @@ const (
|
||||
modeCoord = "coord"
|
||||
)
|
||||
|
||||
func cmdGenBJJ(c *cli.Context) error {
|
||||
sk := babyjub.NewRandPrivKey()
|
||||
skBuf := [32]byte(sk)
|
||||
pk := sk.Public()
|
||||
fmt.Printf("BJJ = \"0x%s\"\n", pk.String())
|
||||
fmt.Printf("BJJPrivateKey = \"0x%s\"\n", hex.EncodeToString(skBuf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdImportKey(c *cli.Context) error {
|
||||
_cfg, err := parseCli(c)
|
||||
if err != nil {
|
||||
@@ -196,6 +207,12 @@ func main() {
|
||||
Required: true,
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "genbjj",
|
||||
Aliases: []string{},
|
||||
Usage: "Generate a new BabyJubJub key",
|
||||
Action: cmdGenBJJ,
|
||||
},
|
||||
{
|
||||
Name: "wipesql",
|
||||
Aliases: []string{},
|
||||
|
||||
@@ -49,12 +49,34 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
||||
return ethCrypto.Keccak256Hash(b).Bytes(), nil
|
||||
}
|
||||
|
||||
// Sign signs the account creation authorization message using the provided
|
||||
// `signHash` function, and stores the signaure in `a.Signature`. `signHash`
|
||||
// should do an ethereum signature using the account corresponding to
|
||||
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in
|
||||
// tests we sign directly using the private key, outside tests we sign using
|
||||
// the keystore (which never exposes the private key).
|
||||
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
|
||||
chainID uint16, hermezContractAddr ethCommon.Address) error {
|
||||
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := signHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig[64] += 27
|
||||
a.Signature = sig
|
||||
a.Timestamp = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySignature ensures that the Signature is done with the EthAddr, for the
|
||||
// chainID and hermezContractAddress passed by parameter
|
||||
func (a *AccountCreationAuth) VerifySignature(chainID uint16,
|
||||
hermezContractAddr ethCommon.Address) bool {
|
||||
// Calculate hash to be signed
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -64,7 +86,7 @@ func (a *AccountCreationAuth) VerifySignature(chainID uint16,
|
||||
sig[64] -= 27
|
||||
|
||||
// Get public key from Signature
|
||||
pubKBytes, err := ethCrypto.Ecrecover(msg, sig[:])
|
||||
pubKBytes, err := ethCrypto.Ecrecover(hash, sig[:])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAccountCreationAuthVerification(t *testing.T) {
|
||||
func TestAccountCreationAuthSignVerify(t *testing.T) {
|
||||
// Ethereum key
|
||||
ethSk, err := ethCrypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
|
||||
require.NoError(t, err)
|
||||
@@ -21,7 +21,7 @@ func TestAccountCreationAuthVerification(t *testing.T) {
|
||||
var sk babyjub.PrivateKey
|
||||
_, err = hex.Decode(sk[:],
|
||||
[]byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
chainID := uint16(0)
|
||||
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||
@@ -29,16 +29,22 @@ func TestAccountCreationAuthVerification(t *testing.T) {
|
||||
EthAddr: ethAddr,
|
||||
BJJ: sk.Public().Compress(),
|
||||
}
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e",
|
||||
hex.EncodeToString(msg))
|
||||
|
||||
// sign AccountCreationAuth with eth key
|
||||
sig, err := ethCrypto.Sign(msg, ethSk)
|
||||
assert.NoError(t, err)
|
||||
// Sign using the Sign function (stores signature in a.Signature)
|
||||
err = a.Sign(func(hash []byte) ([]byte, error) {
|
||||
return ethCrypto.Sign(hash, ethSk)
|
||||
}, chainID, hermezContractAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Hash and sign manually and compare the generated signature
|
||||
hash, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e",
|
||||
hex.EncodeToString(hash))
|
||||
sig, err := ethCrypto.Sign(hash, ethSk)
|
||||
require.NoError(t, err)
|
||||
sig[64] += 27
|
||||
a.Signature = sig
|
||||
assert.Equal(t, sig, a.Signature)
|
||||
|
||||
assert.True(t, a.VerifySignature(chainID, hermezContractAddr))
|
||||
}
|
||||
@@ -107,7 +113,7 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
|
||||
// BabyJubJub key
|
||||
pkCompStr := tv.pkCompStr
|
||||
pkComp, err := BJJFromStringWithChecksum(pkCompStr)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
chainID := tv.chainID
|
||||
hermezContractAddr := ethCommon.HexToAddress(tv.hermezContractAddr)
|
||||
@@ -122,13 +128,13 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
|
||||
assert.Equal(t, 120+len(EthMsgPrefix)+len([]byte("120")), len(toHash))
|
||||
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tv.hashExpected,
|
||||
hex.EncodeToString(msg))
|
||||
|
||||
// sign AccountCreationAuth with eth key
|
||||
sig, err := ethCrypto.Sign(msg, ethSk)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
sig[64] += 27
|
||||
assert.Equal(t, tv.sigExpected,
|
||||
hex.EncodeToString(sig))
|
||||
|
||||
@@ -33,7 +33,7 @@ const (
|
||||
// RollupConstInputSHAConstantBytes [6 bytes] lastIdx + [6 bytes] newLastIdx + [32 bytes] stateRoot + [32 bytes] newStRoot + [32 bytes] newExitRoot +
|
||||
// [_MAX_L1_TX * _L1_USER_TOTALBYTES bytes] l1TxsData + totalL2TxsDataLength + feeIdxCoordinatorLength + [2 bytes] chainID =
|
||||
// 18542 bytes + totalL2TxsDataLength + feeIdxCoordinatorLength
|
||||
RollupConstInputSHAConstantBytes = 18542
|
||||
RollupConstInputSHAConstantBytes = 18546
|
||||
// RollupConstNumBuckets Number of buckets
|
||||
RollupConstNumBuckets = 5
|
||||
// RollupConstMaxWithdrawalDelay max withdrawal delay in seconds
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"gopkg.in/go-playground/validator.v9"
|
||||
)
|
||||
|
||||
@@ -37,6 +38,13 @@ type ServerProof struct {
|
||||
type Coordinator struct {
|
||||
// ForgerAddress is the address under which this coordinator is forging
|
||||
ForgerAddress ethCommon.Address `validate:"required"`
|
||||
// FeeAccount is the Hermez account that the coordinator uses to receive fees
|
||||
FeeAccount struct {
|
||||
// Address is the ethereum address of the account to receive fees
|
||||
Address ethCommon.Address `validate:"required"`
|
||||
// BJJ is the baby jub jub public key of the account to receive fees
|
||||
BJJ babyjub.PublicKeyComp `validate:"required"`
|
||||
} `validate:"required"`
|
||||
// ConfirmBlocks is the number of confirmation blocks to wait for sent
|
||||
// ethereum transactions before forgetting about them
|
||||
ConfirmBlocks int64 `validate:"required"`
|
||||
@@ -162,7 +170,8 @@ type Node struct {
|
||||
SyncLoopInterval Duration `validate:"required"`
|
||||
// StatsRefreshPeriod is the interval between updates of the
|
||||
// synchronizer state Eth parameters (`Eth.LastBlock` and
|
||||
// `Eth.LastBatch`)
|
||||
// `Eth.LastBatch`). This value only affects the reported % of
|
||||
// synchronization of blocks and batches, nothing else.
|
||||
StatsRefreshPeriod Duration `validate:"required"`
|
||||
} `validate:"required"`
|
||||
SmartContracts struct {
|
||||
|
||||
@@ -14,14 +14,22 @@ import (
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
// TxStatus is used to mark the status of an ethereum transaction
|
||||
type TxStatus string
|
||||
// Status is used to mark the status of the batch
|
||||
type Status string
|
||||
|
||||
const (
|
||||
// TxStatusPending marks the Tx as Pending
|
||||
TxStatusPending TxStatus = "pending"
|
||||
// TxStatusSent marks the Tx as Sent
|
||||
TxStatusSent TxStatus = "sent"
|
||||
// StatusPending marks the Tx as Pending
|
||||
StatusPending Status = "pending"
|
||||
// StatusForged marks the batch as forged internally
|
||||
StatusForged Status = "forged"
|
||||
// StatusProof marks the batch as proof calculated
|
||||
StatusProof Status = "proof"
|
||||
// StatusSent marks the EthTx as Sent
|
||||
StatusSent Status = "sent"
|
||||
// StatusMined marks the EthTx as Mined
|
||||
StatusMined Status = "mined"
|
||||
// StatusFailed marks the EthTx as Failed
|
||||
StatusFailed Status = "failed"
|
||||
)
|
||||
|
||||
// BatchInfo contans the Batch information
|
||||
@@ -40,9 +48,9 @@ type BatchInfo struct {
|
||||
CoordIdxs []common.Idx
|
||||
ForgeBatchArgs *eth.RollupForgeBatchArgs
|
||||
// FeesInfo
|
||||
TxStatus TxStatus
|
||||
EthTx *types.Transaction
|
||||
Receipt *types.Receipt
|
||||
Status Status
|
||||
EthTx *types.Transaction
|
||||
Receipt *types.Receipt
|
||||
}
|
||||
|
||||
// DebugStore is a debug function to store the BatchInfo as a json text file in
|
||||
|
||||
@@ -156,10 +156,9 @@ func NewCoordinator(cfg Config,
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func (c *Coordinator) newPipeline(ctx context.Context,
|
||||
stats *synchronizer.Stats) (*Pipeline, error) {
|
||||
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
|
||||
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector,
|
||||
c.batchBuilder, c.purger, c.txManager, c.provers, stats, &c.consts)
|
||||
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts)
|
||||
}
|
||||
|
||||
// MsgSyncBlock indicates an update to the Synchronizer stats
|
||||
@@ -174,6 +173,7 @@ type MsgSyncBlock struct {
|
||||
// MsgSyncReorg indicates a reorg
|
||||
type MsgSyncReorg struct {
|
||||
Stats synchronizer.Stats
|
||||
Vars synchronizer.SCVariablesPtr
|
||||
}
|
||||
|
||||
// MsgStopPipeline indicates a signal to reset the pipeline
|
||||
@@ -222,7 +222,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
stats.Eth.LastBlock.Num, "batch", stats.Sync.LastBatch)
|
||||
batchNum := common.BatchNum(stats.Sync.LastBatch)
|
||||
var err error
|
||||
if c.pipeline, err = c.newPipeline(ctx, stats); err != nil {
|
||||
if c.pipeline, err = c.newPipeline(ctx); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if err := c.pipeline.Start(batchNum, stats.Sync.LastForgeL1TxsNum,
|
||||
@@ -233,9 +233,7 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
c.pipelineBatchNum = batchNum
|
||||
}
|
||||
} else {
|
||||
if canForge {
|
||||
c.pipeline.SetSyncStats(stats)
|
||||
} else {
|
||||
if !canForge {
|
||||
log.Infow("Coordinator: forging state end", "block", stats.Eth.LastBlock.Num)
|
||||
c.pipeline.Stop(c.ctx)
|
||||
c.pipeline = nil
|
||||
@@ -269,15 +267,42 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
|
||||
|
||||
func (c *Coordinator) handleMsgSyncBlock(ctx context.Context, msg *MsgSyncBlock) error {
|
||||
c.stats = &msg.Stats
|
||||
// batches := msg.Batches
|
||||
c.syncSCVars(msg.Vars)
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.SetSyncStatsVars(&msg.Stats, &msg.Vars)
|
||||
}
|
||||
if !c.stats.Synced() {
|
||||
return nil
|
||||
}
|
||||
c.syncSCVars(msg.Vars)
|
||||
return c.syncStats(ctx, c.stats)
|
||||
}
|
||||
|
||||
func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error {
|
||||
c.stats = &msg.Stats
|
||||
c.syncSCVars(msg.Vars)
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.SetSyncStatsVars(&msg.Stats, &msg.Vars)
|
||||
}
|
||||
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
|
||||
// There's been a reorg and the batch from which the pipeline
|
||||
// was started was in a block that was discarded. The batch
|
||||
// may not be in the main chain, so we stop the pipeline as a
|
||||
// precaution (it will be started again once the node is in
|
||||
// sync).
|
||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
|
||||
"sync.LastBatch", c.stats.Sync.LastBatch,
|
||||
"c.pipelineBatchNum", c.pipelineBatchNum)
|
||||
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error {
|
||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if c.pipeline != nil {
|
||||
c.pipeline.Stop(c.ctx)
|
||||
c.pipeline = nil
|
||||
@@ -295,7 +320,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
|
||||
return tracerr.Wrap(fmt.Errorf("Coordinator.handleMsgSyncBlock error: %w", err))
|
||||
}
|
||||
case MsgSyncReorg:
|
||||
if err := c.handleReorg(ctx, &msg.Stats); err != nil {
|
||||
if err := c.handleReorg(ctx, &msg); err != nil {
|
||||
return tracerr.Wrap(fmt.Errorf("Coordinator.handleReorg error: %w", err))
|
||||
}
|
||||
case MsgStopPipeline:
|
||||
@@ -376,27 +401,6 @@ func (c *Coordinator) Stop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) handleReorg(ctx context.Context, stats *synchronizer.Stats) error {
|
||||
c.stats = stats
|
||||
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum {
|
||||
// There's been a reorg and the batch from which the pipeline
|
||||
// was started was in a block that was discarded. The batch
|
||||
// may not be in the main chain, so we stop the pipeline as a
|
||||
// precaution (it will be started again once the node is in
|
||||
// sync).
|
||||
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum",
|
||||
"sync.LastBatch", c.stats.Sync.LastBatch,
|
||||
"c.pipelineBatchNum", c.pipelineBatchNum)
|
||||
if err := c.handleStopPipeline(ctx, "reorg"); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxManager handles everything related to ethereum transactions: It makes the
|
||||
// call to forge, waits for transaction confirmation, and keeps checking them
|
||||
// until a number of confirmed blocks have passed.
|
||||
@@ -465,6 +469,7 @@ func (t *TxManager) rollupForgeBatch(ctx context.Context, batchInfo *BatchInfo)
|
||||
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
|
||||
}
|
||||
batchInfo.EthTx = ethTx
|
||||
batchInfo.Status = StatusSent
|
||||
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex())
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
|
||||
@@ -506,9 +511,13 @@ func (t *TxManager) handleReceipt(batchInfo *BatchInfo) (*int64, error) {
|
||||
receipt := batchInfo.Receipt
|
||||
if receipt != nil {
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
batchInfo.Status = StatusFailed
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
log.Errorw("TxManager receipt status is failed", "receipt", receipt)
|
||||
return nil, tracerr.Wrap(fmt.Errorf("ethereum transaction receipt statis is failed"))
|
||||
} else if receipt.Status == types.ReceiptStatusSuccessful {
|
||||
batchInfo.Status = StatusMined
|
||||
t.cfg.debugBatchStore(batchInfo)
|
||||
if batchInfo.BatchNum > t.lastConfirmedBatch {
|
||||
t.lastConfirmedBatch = batchInfo.BatchNum
|
||||
}
|
||||
@@ -534,7 +543,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
case lastBlock := <-t.lastBlockCh:
|
||||
t.lastBlock = lastBlock
|
||||
case batchInfo := <-t.batchCh:
|
||||
if err := t.rollupForgeBatch(ctx, batchInfo); common.IsErrDone(err) {
|
||||
if err := t.rollupForgeBatch(ctx, batchInfo); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
t.coord.SendMsg(MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch call: %v", err)})
|
||||
@@ -550,8 +559,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
current := next
|
||||
next = (current + 1) % len(t.queue)
|
||||
batchInfo := t.queue[current]
|
||||
err := t.ethTransactionReceipt(ctx, batchInfo)
|
||||
if common.IsErrDone(err) {
|
||||
if err := t.ethTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
|
||||
continue
|
||||
} else if err != nil { //nolint:staticcheck
|
||||
// We can't get the receipt for the
|
||||
@@ -580,6 +588,11 @@ func (t *TxManager) Run(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
type statsVars struct {
|
||||
Stats synchronizer.Stats
|
||||
Vars synchronizer.SCVariablesPtr
|
||||
}
|
||||
|
||||
// Pipeline manages the forging of batches with parallel server proofs
|
||||
type Pipeline struct {
|
||||
cfg Config
|
||||
@@ -587,7 +600,6 @@ type Pipeline struct {
|
||||
|
||||
// state
|
||||
batchNum common.BatchNum
|
||||
vars synchronizer.SCVariables
|
||||
lastScheduledL1BatchBlockNum int64
|
||||
lastForgeL1TxsNum int64
|
||||
started bool
|
||||
@@ -601,8 +613,9 @@ type Pipeline struct {
|
||||
batchBuilder *batchbuilder.BatchBuilder
|
||||
purger *Purger
|
||||
|
||||
stats synchronizer.Stats
|
||||
statsCh chan synchronizer.Stats
|
||||
stats synchronizer.Stats
|
||||
vars synchronizer.SCVariables
|
||||
statsVarsCh chan statsVars
|
||||
|
||||
ctx context.Context
|
||||
wg sync.WaitGroup
|
||||
@@ -619,7 +632,6 @@ func NewPipeline(ctx context.Context,
|
||||
purger *Purger,
|
||||
txManager *TxManager,
|
||||
provers []prover.Client,
|
||||
stats *synchronizer.Stats,
|
||||
scConsts *synchronizer.SCConsts,
|
||||
) (*Pipeline, error) {
|
||||
proversPool := NewProversPool(len(provers))
|
||||
@@ -646,22 +658,22 @@ func NewPipeline(ctx context.Context,
|
||||
purger: purger,
|
||||
txManager: txManager,
|
||||
consts: *scConsts,
|
||||
stats: *stats,
|
||||
statsCh: make(chan synchronizer.Stats, queueLen),
|
||||
statsVarsCh: make(chan statsVars, queueLen),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetSyncStats is a thread safe method to sets the synchronizer Stats
|
||||
func (p *Pipeline) SetSyncStats(stats *synchronizer.Stats) {
|
||||
p.statsCh <- *stats
|
||||
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
|
||||
func (p *Pipeline) SetSyncStatsVars(stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) {
|
||||
p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}
|
||||
}
|
||||
|
||||
// reset pipeline state
|
||||
func (p *Pipeline) reset(batchNum common.BatchNum, lastForgeL1TxsNum int64,
|
||||
initSCVars *synchronizer.SCVariables) error {
|
||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
||||
p.batchNum = batchNum
|
||||
p.lastForgeL1TxsNum = lastForgeL1TxsNum
|
||||
p.vars = *initSCVars
|
||||
p.stats = *stats
|
||||
p.vars = *vars
|
||||
p.lastScheduledL1BatchBlockNum = 0
|
||||
|
||||
err := p.txSelector.Reset(p.batchNum)
|
||||
@@ -675,15 +687,27 @@ func (p *Pipeline) reset(batchNum common.BatchNum, lastForgeL1TxsNum int64,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) {
|
||||
if vars.Rollup != nil {
|
||||
p.vars.Rollup = *vars.Rollup
|
||||
}
|
||||
if vars.Auction != nil {
|
||||
p.vars.Auction = *vars.Auction
|
||||
}
|
||||
if vars.WDelayer != nil {
|
||||
p.vars.WDelayer = *vars.WDelayer
|
||||
}
|
||||
}
|
||||
|
||||
// Start the forging pipeline
|
||||
func (p *Pipeline) Start(batchNum common.BatchNum, lastForgeL1TxsNum int64,
|
||||
syncStats *synchronizer.Stats, initSCVars *synchronizer.SCVariables) error {
|
||||
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error {
|
||||
if p.started {
|
||||
log.Fatal("Pipeline already started")
|
||||
}
|
||||
p.started = true
|
||||
|
||||
if err := p.reset(batchNum, lastForgeL1TxsNum, initSCVars); err != nil {
|
||||
if err := p.reset(batchNum, lastForgeL1TxsNum, stats, vars); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
p.ctx, p.cancel = context.WithCancel(context.Background())
|
||||
@@ -699,8 +723,9 @@ func (p *Pipeline) Start(batchNum common.BatchNum, lastForgeL1TxsNum int64,
|
||||
log.Info("Pipeline forgeBatch loop done")
|
||||
p.wg.Done()
|
||||
return
|
||||
case syncStats := <-p.statsCh:
|
||||
p.stats = syncStats
|
||||
case statsVars := <-p.statsVarsCh:
|
||||
p.stats = statsVars.Stats
|
||||
p.syncSCVars(statsVars.Vars)
|
||||
default:
|
||||
batchNum = p.batchNum + 1
|
||||
batchInfo, err := p.forgeBatch(batchNum)
|
||||
@@ -813,6 +838,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (*BatchInfo, error) {
|
||||
var l1UserTxsExtra, l1CoordTxs []common.L1Tx
|
||||
var auths [][]byte
|
||||
var coordIdxs []common.Idx
|
||||
|
||||
// 1. Decide if we forge L2Tx or L1+L2Tx
|
||||
if p.shouldL1L2Batch() {
|
||||
batchInfo.L1Batch = true
|
||||
@@ -876,6 +902,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (*BatchInfo, error) {
|
||||
|
||||
// 5. Save metadata from BatchBuilder output for BatchNum
|
||||
batchInfo.ZKInputs = zkInputs
|
||||
batchInfo.Status = StatusForged
|
||||
p.cfg.debugBatchStore(&batchInfo)
|
||||
|
||||
return &batchInfo, nil
|
||||
@@ -890,7 +917,7 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
|
||||
batchInfo.Proof = proof
|
||||
batchInfo.PublicInputs = pubInputs
|
||||
batchInfo.ForgeBatchArgs = prepareForgeBatchArgs(batchInfo)
|
||||
batchInfo.TxStatus = TxStatusPending
|
||||
batchInfo.Status = StatusProof
|
||||
p.cfg.debugBatchStore(batchInfo)
|
||||
return nil
|
||||
}
|
||||
@@ -905,7 +932,7 @@ func (p *Pipeline) shouldL1L2Batch() bool {
|
||||
// Return true if we have passed the l1BatchTimeoutPerc portion of the
|
||||
// range before the l1batch timeout.
|
||||
if p.stats.Eth.LastBlock.Num-lastL1BatchBlockNum >=
|
||||
int64(float64(p.vars.Rollup.ForgeL1L2BatchTimeout)*p.cfg.L1BatchTimeoutPerc) {
|
||||
int64(float64(p.vars.Rollup.ForgeL1L2BatchTimeout-1)*p.cfg.L1BatchTimeoutPerc) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -445,7 +445,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
|
||||
modules := newTestModules(t)
|
||||
var stats synchronizer.Stats
|
||||
coord := newTestCoordinator(t, forger, ethClient, ethClientSetup, modules)
|
||||
pipeline, err := coord.newPipeline(ctx, &stats)
|
||||
pipeline, err := coord.newPipeline(ctx)
|
||||
require.NoError(t, err)
|
||||
pipeline.vars = coord.vars
|
||||
|
||||
@@ -598,7 +598,7 @@ func TestPipeline1(t *testing.T) {
|
||||
batchNum := common.BatchNum(syncStats.Sync.LastBatch)
|
||||
syncSCVars := sync.SCVars()
|
||||
|
||||
pipeline, err := coord.newPipeline(ctx, syncStats)
|
||||
pipeline, err := coord.newPipeline(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert some l2txs in the Pool
|
||||
@@ -616,7 +616,7 @@ PoolTransfer(0) User2-User3: 300 (126)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = pipeline.reset(batchNum, syncStats.Sync.LastForgeL1TxsNum, &synchronizer.SCVariables{
|
||||
err = pipeline.reset(batchNum, syncStats.Sync.LastForgeL1TxsNum, syncStats, &synchronizer.SCVariables{
|
||||
Rollup: *syncSCVars.Rollup,
|
||||
Auction: *syncSCVars.Auction,
|
||||
WDelayer: *syncSCVars.WDelayer,
|
||||
|
||||
@@ -110,11 +110,24 @@ func (kvdb *KVDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// remove all checkpoints > batchNum
|
||||
for i := batchNum + 1; i <= kvdb.CurrentBatch; i++ {
|
||||
if err := kvdb.DeleteCheckpoint(i); err != nil {
|
||||
list, err := kvdb.ListCheckpoints()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// Find first batch that is greater than batchNum, and delete
|
||||
// everything after that
|
||||
start := 0
|
||||
for ; start < len(list); start++ {
|
||||
if common.BatchNum(list[start]) > batchNum {
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, bn := range list[start:] {
|
||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
if batchNum == 0 {
|
||||
// if batchNum == 0, open the new fresh 'current'
|
||||
sto, err := pebble.NewPebbleStorage(currentPath, false)
|
||||
@@ -163,41 +176,62 @@ func (kvdb *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKV
|
||||
return tracerr.Wrap(fmt.Errorf("synchronizerKVDB can not be nil"))
|
||||
}
|
||||
|
||||
if batchNum == 0 {
|
||||
kvdb.CurrentIdx = 0
|
||||
return nil
|
||||
}
|
||||
synchronizerCheckpointPath := path.Join(synchronizerKVDB.path,
|
||||
fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
currentPath := path.Join(kvdb.path, PathCurrent)
|
||||
|
||||
// use checkpoint from synchronizerKVDB
|
||||
if _, err := os.Stat(synchronizerCheckpointPath); os.IsNotExist(err) {
|
||||
// if synchronizerKVDB does not have checkpoint at batchNum, return err
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" not exist in Synchronizer",
|
||||
synchronizerCheckpointPath))
|
||||
}
|
||||
|
||||
if err := kvdb.db.Pebble().Close(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// remove 'current'
|
||||
err := os.RemoveAll(currentPath)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// copy synchronizer'BatchNumX' to 'current'
|
||||
err = pebbleMakeCheckpoint(synchronizerCheckpointPath, currentPath)
|
||||
// remove all checkpoints
|
||||
list, err := kvdb.ListCheckpoints()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
for _, bn := range list {
|
||||
if err := kvdb.DeleteCheckpoint(common.BatchNum(bn)); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
if batchNum == 0 {
|
||||
// if batchNum == 0, open the new fresh 'current'
|
||||
sto, err := pebble.NewPebbleStorage(currentPath, false)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
kvdb.db = sto
|
||||
kvdb.CurrentIdx = 255
|
||||
kvdb.CurrentBatch = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
checkpointPath := path.Join(kvdb.path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
|
||||
// use checkpoint from synchronizerKVDB
|
||||
synchronizerCheckpointPath := path.Join(synchronizerKVDB.path,
|
||||
fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||
if _, err := os.Stat(synchronizerCheckpointPath); os.IsNotExist(err) {
|
||||
// if synchronizerKVDB does not have checkpoint at batchNum, return err
|
||||
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" not exist in Synchronizer",
|
||||
synchronizerCheckpointPath))
|
||||
}
|
||||
// copy synchronizer'BatchNumX' to 'BatchNumX'
|
||||
err = pebbleMakeCheckpoint(synchronizerCheckpointPath, checkpointPath)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// copy 'BatchNumX' to 'current'
|
||||
err = pebbleMakeCheckpoint(checkpointPath, currentPath)
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// open the new 'current'
|
||||
sto, err := pebble.NewPebbleStorage(currentPath, false)
|
||||
if err != nil {
|
||||
@@ -354,6 +388,7 @@ func (kvdb *KVDB) ListCheckpoints() ([]int, error) {
|
||||
for _, checkpoint := range checkpoints[1:] {
|
||||
first++
|
||||
if checkpoint != first {
|
||||
log.Errorw("GAP", "checkpoints", checkpoints)
|
||||
return nil, tracerr.Wrap(fmt.Errorf("checkpoint gap at %v", checkpoint))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func addTestKV(t *testing.T, db *KVDB, k, v []byte) {
|
||||
|
||||
func printCheckpoints(t *testing.T, path string) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println(path)
|
||||
for _, f := range files {
|
||||
@@ -35,7 +35,7 @@ func printCheckpoints(t *testing.T, path string) {
|
||||
func TestCheckpoints(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "sdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
db, err := NewKVDB(dir, 128)
|
||||
require.NoError(t, err)
|
||||
@@ -47,47 +47,49 @@ func TestCheckpoints(t *testing.T) {
|
||||
|
||||
// do checkpoints and check that currentBatch is correct
|
||||
err = db.MakeCheckpoint()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
cb, err := db.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(1), cb)
|
||||
|
||||
for i := 1; i < 10; i++ {
|
||||
err = db.MakeCheckpoint()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
cb, err = db.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(i+1), cb)
|
||||
}
|
||||
|
||||
// printCheckpoints(t, sdb.path)
|
||||
// printCheckpoints(t, db.path)
|
||||
|
||||
// reset checkpoint
|
||||
err = db.Reset(3)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that reset can be repeated (as there exist the 'current' and
|
||||
// 'BatchNum3', from where the 'current' is a copy)
|
||||
err = db.Reset(3)
|
||||
require.NoError(t, err)
|
||||
|
||||
printCheckpoints(t, db.path)
|
||||
|
||||
// check that currentBatch is as expected after Reset
|
||||
cb, err = db.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(3), cb)
|
||||
|
||||
// advance one checkpoint and check that currentBatch is fine
|
||||
err = db.MakeCheckpoint()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
cb, err = db.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(4), cb)
|
||||
|
||||
err = db.DeleteCheckpoint(common.BatchNum(1))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
err = db.DeleteCheckpoint(common.BatchNum(2))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
err = db.DeleteCheckpoint(common.BatchNum(1)) // does not exist, should return err
|
||||
assert.NotNil(t, err)
|
||||
err = db.DeleteCheckpoint(common.BatchNum(2)) // does not exist, should return err
|
||||
@@ -96,43 +98,43 @@ func TestCheckpoints(t *testing.T) {
|
||||
// Create a new KVDB which will get Reset from the initial KVDB
|
||||
dirLocal, err := ioutil.TempDir("", "ldb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dirLocal))
|
||||
defer require.NoError(t, os.RemoveAll(dirLocal))
|
||||
ldb, err := NewKVDB(dirLocal, 128)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||
err = ldb.ResetFromSynchronizer(4, db)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// check that currentBatch is 4 after the Reset
|
||||
cb, err = ldb.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(4), cb)
|
||||
// advance one checkpoint in ldb
|
||||
err = ldb.MakeCheckpoint()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
cb, err = ldb.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(5), cb)
|
||||
|
||||
// Create a 3rd KVDB which will get Reset from the initial KVDB
|
||||
dirLocal2, err := ioutil.TempDir("", "ldb2")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dirLocal2))
|
||||
defer require.NoError(t, os.RemoveAll(dirLocal2))
|
||||
ldb2, err := NewKVDB(dirLocal2, 128)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get checkpoint 4 from sdb (StateDB) to ldb (LocalStateDB)
|
||||
err = ldb2.ResetFromSynchronizer(4, db)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
// check that currentBatch is 4 after the Reset
|
||||
cb, err = ldb2.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(4), cb)
|
||||
// advance one checkpoint in ldb2
|
||||
err = ldb2.MakeCheckpoint()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
cb, err = ldb2.GetCurrentBatch()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.BatchNum(5), cb)
|
||||
|
||||
debug := false
|
||||
@@ -146,7 +148,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
func TestListCheckpoints(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
db, err := NewKVDB(dir, 128)
|
||||
require.NoError(t, err)
|
||||
@@ -176,7 +178,7 @@ func TestListCheckpoints(t *testing.T) {
|
||||
func TestDeleteOldCheckpoints(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
defer require.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
keep := 16
|
||||
db, err := NewKVDB(dir, keep)
|
||||
|
||||
57
node/node.go
57
node/node.go
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
ethKeystore "github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -108,6 +107,9 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
}
|
||||
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
|
||||
scryptN, scryptP)
|
||||
|
||||
// Unlock Coordinator ForgerAddr in the keystore to make calls
|
||||
// to ForgeBatch in the smart contract
|
||||
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
|
||||
return nil, tracerr.Wrap(fmt.Errorf(
|
||||
"ethereum keystore doesn't have the key for address %v",
|
||||
@@ -191,17 +193,39 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
||||
cfg.Coordinator.L2DB.MaxTxs,
|
||||
cfg.Coordinator.L2DB.TTL.Duration,
|
||||
)
|
||||
// TODO: Get (maxL1UserTxs, maxL1OperatorTxs, maxTxs) from the smart contract
|
||||
coordAccount := &txselector.CoordAccount{ // TODO TMP
|
||||
Addr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
|
||||
BJJ: common.EmptyBJJComp,
|
||||
AccountCreationAuth: nil,
|
||||
|
||||
// Unlock FeeAccount EthAddr in the keystore to generate the
|
||||
// account creation authorization
|
||||
if !keyStore.HasAddress(cfg.Coordinator.FeeAccount.Address) {
|
||||
return nil, tracerr.Wrap(fmt.Errorf(
|
||||
"ethereum keystore doesn't have the key for address %v",
|
||||
cfg.Coordinator.FeeAccount.Address))
|
||||
}
|
||||
feeAccount := accounts.Account{
|
||||
Address: cfg.Coordinator.FeeAccount.Address,
|
||||
}
|
||||
if err := keyStore.Unlock(feeAccount,
|
||||
cfg.Coordinator.EthClient.Keystore.Password); err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
auth := &common.AccountCreationAuth{
|
||||
EthAddr: cfg.Coordinator.FeeAccount.Address,
|
||||
BJJ: cfg.Coordinator.FeeAccount.BJJ,
|
||||
}
|
||||
if err := auth.Sign(func(msg []byte) ([]byte, error) {
|
||||
return keyStore.SignHash(feeAccount, msg)
|
||||
}, chainIDU16, cfg.SmartContracts.Rollup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coordAccount := &txselector.CoordAccount{
|
||||
Addr: cfg.Coordinator.FeeAccount.Address,
|
||||
BJJ: cfg.Coordinator.FeeAccount.BJJ,
|
||||
AccountCreationAuth: auth.Signature,
|
||||
}
|
||||
txSelector, err := txselector.NewTxSelector(coordAccount, cfg.Coordinator.TxSelector.Path, stateDB, l2DB)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
// TODO: Get (configCircuits []ConfigCircuit, batchNum common.BatchNum, nLevels uint64) from smart contract
|
||||
batchBuilder, err := batchbuilder.NewBatchBuilder(cfg.Coordinator.BatchBuilder.Path,
|
||||
stateDB, nil, 0, uint64(cfg.Coordinator.Circuit.NLevels))
|
||||
if err != nil {
|
||||
@@ -408,12 +432,8 @@ func (n *Node) handleNewBlock(stats *synchronizer.Stats, vars synchronizer.SCVar
|
||||
if n.mode == ModeCoordinator {
|
||||
n.coord.SendMsg(coordinator.MsgSyncBlock{
|
||||
Stats: *stats,
|
||||
Vars: vars,
|
||||
Batches: batches,
|
||||
Vars: synchronizer.SCVariablesPtr{
|
||||
Rollup: vars.Rollup,
|
||||
Auction: vars.Auction,
|
||||
WDelayer: vars.WDelayer,
|
||||
},
|
||||
})
|
||||
}
|
||||
if n.nodeAPI != nil {
|
||||
@@ -439,10 +459,11 @@ func (n *Node) handleNewBlock(stats *synchronizer.Stats, vars synchronizer.SCVar
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) handleReorg(stats *synchronizer.Stats) {
|
||||
func (n *Node) handleReorg(stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
|
||||
if n.mode == ModeCoordinator {
|
||||
n.coord.SendMsg(coordinator.MsgSyncReorg{
|
||||
Stats: *stats,
|
||||
Vars: vars,
|
||||
})
|
||||
}
|
||||
if n.nodeAPI != nil {
|
||||
@@ -467,15 +488,17 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
|
||||
} else if discarded != nil {
|
||||
// case: reorg
|
||||
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
|
||||
n.handleReorg(stats)
|
||||
vars := n.sync.SCVars()
|
||||
n.handleReorg(stats, vars)
|
||||
return nil, time.Duration(0), nil
|
||||
} else if blockData != nil {
|
||||
// case: new block
|
||||
n.handleNewBlock(stats, synchronizer.SCVariablesPtr{
|
||||
vars := synchronizer.SCVariablesPtr{
|
||||
Rollup: blockData.Rollup.Vars,
|
||||
Auction: blockData.Auction.Vars,
|
||||
WDelayer: blockData.WDelayer.Vars,
|
||||
}, blockData.Rollup.Batches)
|
||||
}
|
||||
n.handleNewBlock(stats, vars, blockData.Rollup.Batches)
|
||||
return &blockData.Block, time.Duration(0), nil
|
||||
} else {
|
||||
// case: no block
|
||||
@@ -625,9 +648,9 @@ func (n *Node) Start() {
|
||||
func (n *Node) Stop() {
|
||||
log.Infow("Stopping node...")
|
||||
n.cancel()
|
||||
n.wg.Wait()
|
||||
if n.mode == ModeCoordinator {
|
||||
log.Info("Stopping Coordinator...")
|
||||
n.coord.Stop()
|
||||
}
|
||||
n.wg.Wait()
|
||||
}
|
||||
|
||||
@@ -139,6 +139,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer sto.Close()
|
||||
exitTree, err = merkletree.NewMerkleTree(sto, tp.s.MT.MaxLevels())
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
|
||||
@@ -57,16 +57,15 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
||||
BJJ: bjj,
|
||||
AccountCreationAuth: nil,
|
||||
}
|
||||
a := &common.AccountCreationAuth{
|
||||
auth := common.AccountCreationAuth{
|
||||
EthAddr: addr,
|
||||
BJJ: bjj,
|
||||
}
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
err = auth.Sign(func(hash []byte) ([]byte, error) {
|
||||
return ethCrypto.Sign(hash, ðSk)
|
||||
}, chainID, hermezContractAddr)
|
||||
assert.NoError(t, err)
|
||||
sig, err := ethCrypto.Sign(msg, ðSk)
|
||||
assert.NoError(t, err)
|
||||
sig[64] += 27
|
||||
coordAccount.AccountCreationAuth = sig
|
||||
coordAccount.AccountCreationAuth = auth.Signature
|
||||
|
||||
txsel, err := NewTxSelector(coordAccount, txselDir, sdb, l2DB)
|
||||
require.NoError(t, err)
|
||||
@@ -80,20 +79,18 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
|
||||
|
||||
func addAccCreationAuth(t *testing.T, tc *til.Context, txsel *TxSelector, chainID uint16, hermezContractAddr ethCommon.Address, username string) []byte {
|
||||
user := tc.Users[username]
|
||||
a := &common.AccountCreationAuth{
|
||||
auth := &common.AccountCreationAuth{
|
||||
EthAddr: user.Addr,
|
||||
BJJ: user.BJJ.Public().Compress(),
|
||||
}
|
||||
msg, err := a.HashToSign(chainID, hermezContractAddr)
|
||||
err := auth.Sign(func(hash []byte) ([]byte, error) {
|
||||
return ethCrypto.Sign(hash, user.EthSk)
|
||||
}, chainID, hermezContractAddr)
|
||||
assert.NoError(t, err)
|
||||
sig, err := ethCrypto.Sign(msg, user.EthSk)
|
||||
assert.NoError(t, err)
|
||||
sig[64] += 27
|
||||
a.Signature = sig
|
||||
|
||||
err = txsel.l2db.AddAccountCreationAuth(a)
|
||||
err = txsel.l2db.AddAccountCreationAuth(auth)
|
||||
assert.NoError(t, err)
|
||||
return a.Signature
|
||||
return auth.Signature
|
||||
}
|
||||
|
||||
func addL2Txs(t *testing.T, txsel *TxSelector, poolL2Txs []common.PoolL2Tx) {
|
||||
|
||||
Reference in New Issue
Block a user