Browse Source

Update node and coordinator, fix linters

feature/sql-semaphore1
Eduard S 4 years ago
parent
commit
d8050dd0a6
5 changed files with 31 additions and 33 deletions
  1. +6
    -5
      config/config.go
  2. +1
    -1
      coordinator/coordinator.go
  3. +4
    -3
      node/node.go
  4. +18
    -20
      prover/prover.go
  5. +2
    -4
      prover/prover_test.go

+ 6
- 5
config/config.go

@ -43,8 +43,9 @@ type Coordinator struct {
ConfirmBlocks int64 `validate:"required"` ConfirmBlocks int64 `validate:"required"`
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"`
L2DB struct {
ProofServerPollInterval Duration `validate:"required"`
L1BatchTimeoutPerc float64 `validate:"required"`
L2DB struct {
SafetyPeriod common.BatchNum `validate:"required"` SafetyPeriod common.BatchNum `validate:"required"`
MaxTxs uint32 `validate:"required"` MaxTxs uint32 `validate:"required"`
TTL Duration `validate:"required"` TTL Duration `validate:"required"`
@ -69,10 +70,10 @@ type Coordinator struct {
DeployGasLimit uint64 `validate:"required"` DeployGasLimit uint64 `validate:"required"`
GasPriceDiv uint64 `validate:"required"` GasPriceDiv uint64 `validate:"required"`
ReceiptTimeout Duration `validate:"required"` ReceiptTimeout Duration `validate:"required"`
IntervalReceiptLoop Duration `validate:"required"`
// IntervalCheckLoop is the waiting interval between receipt
ReceiptLoopInterval Duration `validate:"required"`
// CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
IntervalCheckLoop Duration `validate:"required"`
CheckLoopInterval Duration `validate:"required"`
// Attempts is the number of attempts to do an eth client RPC // Attempts is the number of attempts to do an eth client RPC
// call before giving up // call before giving up
Attempts int `validate:"required"` Attempts int `validate:"required"`

+ 1
- 1
coordinator/coordinator.go

@ -807,7 +807,7 @@ func (p *Pipeline) forgeSendServerProof(ctx context.Context, batchNum common.Bat
// 7. Call the selected idle server proof with BatchBuilder output, // 7. Call the selected idle server proof with BatchBuilder output,
// save server proof info for batchNum // save server proof info for batchNum
err = batchInfo.ServerProof.CalculateProof(zkInputs)
err = batchInfo.ServerProof.CalculateProof(ctx, zkInputs)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

+ 4
- 3
node/node.go

@ -96,7 +96,7 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
DeployGasLimit: coordCfg.EthClient.DeployGasLimit, DeployGasLimit: coordCfg.EthClient.DeployGasLimit,
GasPriceDiv: coordCfg.EthClient.GasPriceDiv, GasPriceDiv: coordCfg.EthClient.GasPriceDiv,
ReceiptTimeout: coordCfg.EthClient.ReceiptTimeout.Duration, ReceiptTimeout: coordCfg.EthClient.ReceiptTimeout.Duration,
IntervalReceiptLoop: coordCfg.EthClient.IntervalReceiptLoop.Duration,
IntervalReceiptLoop: coordCfg.EthClient.ReceiptLoopInterval.Duration,
} }
} }
client, err := eth.NewClient(ethClient, nil, nil, &eth.ClientConfig{ client, err := eth.NewClient(ethClient, nil, nil, &eth.ClientConfig{
@ -165,7 +165,8 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
} }
serverProofs := make([]prover.Client, len(coordCfg.ServerProofs)) serverProofs := make([]prover.Client, len(coordCfg.ServerProofs))
for i, serverProofCfg := range coordCfg.ServerProofs { for i, serverProofCfg := range coordCfg.ServerProofs {
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL)
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
coordCfg.ProofServerPollInterval.Duration)
} }
coord, err = coordinator.NewCoordinator( coord, err = coordinator.NewCoordinator(
@ -175,7 +176,7 @@ func NewNode(mode Mode, cfg *config.Node, coordCfg *config.Coordinator) (*Node,
L1BatchTimeoutPerc: coordCfg.L1BatchTimeoutPerc, L1BatchTimeoutPerc: coordCfg.L1BatchTimeoutPerc,
EthClientAttempts: coordCfg.EthClient.Attempts, EthClientAttempts: coordCfg.EthClient.Attempts,
EthClientAttemptsDelay: coordCfg.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: coordCfg.EthClient.AttemptsDelay.Duration,
TxManagerCheckInterval: coordCfg.EthClient.IntervalCheckLoop.Duration,
TxManagerCheckInterval: coordCfg.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: coordCfg.Debug.BatchPath, DebugBatchPath: coordCfg.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
PurgeBatchDelay: coordCfg.L2DB.PurgeBatchDelay, PurgeBatchDelay: coordCfg.L2DB.PurgeBatchDelay,

+ 18
- 20
prover/prover.go

@ -133,7 +133,7 @@ type formFileProvider struct {
body []byte body []byte
} }
//nolint:unused
//nolint:unused,deadcode
func newFormFileProvider(payload interface{}) (*formFileProvider, error) { func newFormFileProvider(payload interface{}) (*formFileProvider, error) {
body := new(bytes.Buffer) body := new(bytes.Buffer)
writer := multipart.NewWriter(body) writer := multipart.NewWriter(body)
@ -239,9 +239,8 @@ func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
return &proof, nil return &proof, nil
} else {
return nil, errors.New("State is not Success")
} }
return nil, errors.New("State is not Success")
} }
// Cancel cancels any current proof computation // Cancel cancels any current proof computation
@ -262,22 +261,21 @@ func (p *ProofServerClient) WaitReady(ctx context.Context) error {
if !status.Status.IsInitialized() { if !status.Status.IsInitialized() {
err := errors.New("Proof Server is not initialized") err := errors.New("Proof Server is not initialized")
return err return err
} else {
if status.Status.IsReady() {
return nil
}
for {
select {
case <-ctx.Done():
return tracerr.Wrap(common.ErrDone)
case <-time.After(p.timeCons):
status, err := p.apiStatus(ctx)
if err != nil {
return tracerr.Wrap(err)
}
if status.Status.IsReady() {
return nil
}
}
if status.Status.IsReady() {
return nil
}
for {
select {
case <-ctx.Done():
return tracerr.Wrap(common.ErrDone)
case <-time.After(p.timeCons):
status, err := p.apiStatus(ctx)
if err != nil {
return tracerr.Wrap(err)
}
if status.Status.IsReady() {
return nil
} }
} }
} }
@ -289,7 +287,7 @@ type MockClient struct {
// CalculateProof sends the *common.ZKInputs to the ServerProof to compute the // CalculateProof sends the *common.ZKInputs to the ServerProof to compute the
// Proof // Proof
func (p *MockClient) CalculateProof(zkInputs *common.ZKInputs) error {
func (p *MockClient) CalculateProof(ctx context.Context, zkInputs *common.ZKInputs) error {
return nil return nil
} }

+ 2
- 4
prover/prover_test.go

@ -47,8 +47,7 @@ func testAPIStatus(t *testing.T) {
} }
func testCalculateProof(t *testing.T) { func testCalculateProof(t *testing.T) {
var zkInputs *common.ZKInputs
zkInputs = common.NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
zkInputs := common.NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
err := proofServerClient.CalculateProof(context.Background(), zkInputs) err := proofServerClient.CalculateProof(context.Background(), zkInputs)
require.NoError(t, err) require.NoError(t, err)
} }
@ -64,8 +63,7 @@ func testGetProof(t *testing.T) {
} }
func testCancel(t *testing.T) { func testCancel(t *testing.T) {
var zkInputs *common.ZKInputs
zkInputs = common.NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
zkInputs := common.NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
err := proofServerClient.CalculateProof(context.Background(), zkInputs) err := proofServerClient.CalculateProof(context.Background(), zkInputs)
require.NoError(t, err) require.NoError(t, err)
// TODO: remove sleep when the server has been reviewed // TODO: remove sleep when the server has been reviewed

Loading…
Cancel
Save