mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-06 19:06:42 +01:00
fix some code and comment typos
This commit is contained in:
@@ -44,7 +44,7 @@ func (a *API) getAccounts(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type accountResponse struct {
|
type accountResponse struct {
|
||||||
Accounts []historydb.AccountAPI `json:"accounts"`
|
Accounts []historydb.AccountAPI `json:"accounts"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func (a *API) getAccountCreationAuth(c *gin.Context) {
|
|||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
c.JSON(http.StatusOK, auth)
|
c.JSON(http.StatusOK, auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -201,7 +201,7 @@ func TestMain(m *testing.M) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
apiConnCon := db.NewAPICnnectionController(1, time.Second)
|
apiConnCon := db.NewAPIConnectionController(1, time.Second)
|
||||||
hdb := historydb.NewHistoryDB(database, database, apiConnCon)
|
hdb := historydb.NewHistoryDB(database, database, apiConnCon)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -260,7 +260,7 @@ func TestMain(m *testing.M) {
|
|||||||
// Reset DB
|
// Reset DB
|
||||||
test.WipeDB(api.h.DB())
|
test.WipeDB(api.h.DB())
|
||||||
|
|
||||||
// Genratre blockchain data with til
|
// Generate blockchain data with til
|
||||||
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
@@ -512,7 +512,7 @@ func TestMain(m *testing.M) {
|
|||||||
WithdrawalDelay: uint64(3000),
|
WithdrawalDelay: uint64(3000),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate test data, as expected to be received/sended from/to the API
|
// Generate test data, as expected to be received/sent from/to the API
|
||||||
testCoords := genTestCoordinators(commonCoords)
|
testCoords := genTestCoordinators(commonCoords)
|
||||||
testBids := genTestBids(commonBlocks, testCoords, bids)
|
testBids := genTestBids(commonBlocks, testCoords, bids)
|
||||||
testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
|
testExits := genTestExits(commonExitTree, testTokens, commonAccounts)
|
||||||
@@ -599,7 +599,7 @@ func TestTimeout(t *testing.T) {
|
|||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
|
apiConnConTO := db.NewAPIConnectionController(1, 100*time.Millisecond)
|
||||||
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
|
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// L2DB
|
// L2DB
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func (a *API) getBatches(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type batchesResponse struct {
|
type batchesResponse struct {
|
||||||
Batches []historydb.BatchAPI `json:"batches"`
|
Batches []historydb.BatchAPI `json:"batches"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func (a *API) getBids(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type bidsResponse struct {
|
type bidsResponse struct {
|
||||||
Bids []historydb.BidAPI `json:"bids"`
|
Bids []historydb.BidAPI `json:"bids"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func (a *API) getCoordinators(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type coordinatorsResponse struct {
|
type coordinatorsResponse struct {
|
||||||
Coordinators []historydb.CoordinatorAPI `json:"coordinators"`
|
Coordinators []historydb.CoordinatorAPI `json:"coordinators"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ func (a *API) getExits(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type exitsResponse struct {
|
type exitsResponse struct {
|
||||||
Exits []historydb.ExitAPI `json:"exits"`
|
Exits []historydb.ExitAPI `json:"exits"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
@@ -72,6 +72,6 @@ func (a *API) getExit(c *gin.Context) {
|
|||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
c.JSON(http.StatusOK, exit)
|
c.JSON(http.StatusOK, exit)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// maxLimit is the max permited items to be returned in paginated responses
|
// maxLimit is the max permitted items to be returned in paginated responses
|
||||||
maxLimit uint = 2049
|
maxLimit uint = 2049
|
||||||
|
|
||||||
// dfltOrder indicates how paginated endpoints are ordered if not specified
|
// dfltOrder indicates how paginated endpoints are ordered if not specified
|
||||||
@@ -40,8 +40,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNillBidderAddr is used when a nil bidderAddr is received in the getCoordinator method
|
// ErrNilBidderAddr is used when a nil bidderAddr is received in the getCoordinator method
|
||||||
ErrNillBidderAddr = errors.New("biderAddr can not be nil")
|
ErrNilBidderAddr = errors.New("biderAddr can not be nil")
|
||||||
)
|
)
|
||||||
|
|
||||||
func retSQLErr(err error, c *gin.Context) {
|
func retSQLErr(err error, c *gin.Context) {
|
||||||
|
|||||||
@@ -50,19 +50,19 @@ func parsePagination(c querier) (fromItem *uint, order string, limit *uint, err
|
|||||||
return fromItem, order, limit, nil
|
return fromItem, order, limit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: res may be not overwriten
|
// nolint reason: res may be not overwritten
|
||||||
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009
|
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009
|
||||||
str := c.Query(name)
|
str := c.Query(name)
|
||||||
return stringToUint(str, name, dflt, min, max)
|
return stringToUint(str, name, dflt, min, max)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: res may be not overwriten
|
// nolint reason: res may be not overwritten
|
||||||
func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009
|
func parseQueryInt64(name string, dflt *int64, min, max int64, c querier) (*int64, error) { //nolint:SA4009
|
||||||
str := c.Query(name)
|
str := c.Query(name)
|
||||||
return stringToInt64(str, name, dflt, min, max)
|
return stringToInt64(str, name, dflt, min, max)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: res may be not overwriten
|
// nolint reason: res may be not overwritten
|
||||||
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009
|
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009
|
||||||
str := c.Query(name)
|
str := c.Query(name)
|
||||||
if str == "" {
|
if str == "" {
|
||||||
@@ -295,13 +295,13 @@ func parseParamIdx(c paramer) (*common.Idx, error) {
|
|||||||
return stringToIdx(idxStr, name)
|
return stringToIdx(idxStr, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: res may be not overwriten
|
// nolint reason: res may be not overwritten
|
||||||
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009
|
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009
|
||||||
str := c.Param(name)
|
str := c.Param(name)
|
||||||
return stringToUint(str, name, dflt, min, max)
|
return stringToUint(str, name, dflt, min, max)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint reason: res may be not overwriten
|
// nolint reason: res may be not overwritten
|
||||||
func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009
|
func parseParamInt64(name string, dflt *int64, min, max int64, c paramer) (*int64, error) { //nolint:SA4009
|
||||||
str := c.Param(name)
|
str := c.Param(name)
|
||||||
return stringToInt64(str, name, dflt, min, max)
|
return stringToInt64(str, name, dflt, min, max)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SlotAPI is a repesentation of a slot information
|
// SlotAPI is a representation of a slot information
|
||||||
type SlotAPI struct {
|
type SlotAPI struct {
|
||||||
ItemID uint64 `json:"itemId"`
|
ItemID uint64 `json:"itemId"`
|
||||||
SlotNum int64 `json:"slotNum"`
|
SlotNum int64 `json:"slotNum"`
|
||||||
@@ -316,7 +316,7 @@ func (a *API) getSlots(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type slotsResponse struct {
|
type slotsResponse struct {
|
||||||
Slots []SlotAPI `json:"slots"`
|
Slots []SlotAPI `json:"slots"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func (a *API) getTokens(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type tokensResponse struct {
|
type tokensResponse struct {
|
||||||
Tokens []historydb.TokenWithUSD `json:"tokens"`
|
Tokens []historydb.TokenWithUSD `json:"tokens"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
type txsResponse struct {
|
type txsResponse struct {
|
||||||
Txs []historydb.TxAPI `json:"transactions"`
|
Txs []historydb.TxAPI `json:"transactions"`
|
||||||
PendingItems uint64 `json:"pendingItems"`
|
PendingItems uint64 `json:"pendingItems"`
|
||||||
@@ -66,6 +66,6 @@ func (a *API) getHistoryTx(c *gin.Context) {
|
|||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
c.JSON(http.StatusOK, tx)
|
c.JSON(http.StatusOK, tx)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -455,7 +455,7 @@ func TestGetHistoryTx(t *testing.T) {
|
|||||||
// 400, due invalid TxID
|
// 400, due invalid TxID
|
||||||
err := doBadReq("GET", endpoint+"0x001", nil, 400)
|
err := doBadReq("GET", endpoint+"0x001", nil, 400)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// 404, due inexistent TxID in DB
|
// 404, due nonexistent TxID in DB
|
||||||
err = doBadReq("GET", endpoint+"0x00eb5e95e1ce5e9f6c4ed402d415e8d0bdd7664769cfd2064d28da04a2c76be432", nil, 404)
|
err = doBadReq("GET", endpoint+"0x00eb5e95e1ce5e9f6c4ed402d415e8d0bdd7664769cfd2064d28da04a2c76be432", nil, 404)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ func (a *API) getPoolTx(c *gin.Context) {
|
|||||||
retSQLErr(err, c)
|
retSQLErr(err, c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Build succesfull response
|
// Build successful response
|
||||||
c.JSON(http.StatusOK, tx)
|
c.JSON(http.StatusOK, tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -235,7 +235,7 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
// 400, due invalid TxID
|
// 400, due invalid TxID
|
||||||
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
|
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// 404, due inexistent TxID in DB
|
// 404, due nonexistent TxID in DB
|
||||||
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
|
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ func (c *CollectedFees) UnmarshalJSON(text []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez fotmat (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez format (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
||||||
// It assumes that Ethereum Address are inserted/fetched to/from the DB using the default Scan/Value interface
|
// It assumes that Ethereum Address are inserted/fetched to/from the DB using the default Scan/Value interface
|
||||||
type HezEthAddr string
|
type HezEthAddr string
|
||||||
|
|
||||||
@@ -143,7 +143,7 @@ func (s *StrHezEthAddr) UnmarshalText(text []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HezBJJ is used to scan/value *babyjub.PublicKeyComp directly into strings that follow the BJJ public key hez fotmat (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs.
|
// HezBJJ is used to scan/value *babyjub.PublicKeyComp directly into strings that follow the BJJ public key hez format (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs.
|
||||||
// It assumes that *babyjub.PublicKeyComp are inserted/fetched to/from the DB using the default Scan/Value interface
|
// It assumes that *babyjub.PublicKeyComp are inserted/fetched to/from the DB using the default Scan/Value interface
|
||||||
type HezBJJ string
|
type HezBJJ string
|
||||||
|
|
||||||
@@ -216,7 +216,7 @@ func (b HezBJJ) Value() (driver.Value, error) {
|
|||||||
// StrHezBJJ is used to unmarshal HezBJJ directly into an alias of babyjub.PublicKeyComp
|
// StrHezBJJ is used to unmarshal HezBJJ directly into an alias of babyjub.PublicKeyComp
|
||||||
type StrHezBJJ babyjub.PublicKeyComp
|
type StrHezBJJ babyjub.PublicKeyComp
|
||||||
|
|
||||||
// UnmarshalText unmarshals a StrHezBJJ
|
// UnmarshalText unmarshalls a StrHezBJJ
|
||||||
func (s *StrHezBJJ) UnmarshalText(text []byte) error {
|
func (s *StrHezBJJ) UnmarshalText(text []byte) error {
|
||||||
bjj, err := hezStrToBJJ(string(text))
|
bjj, err := hezStrToBJJ(string(text))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -226,8 +226,8 @@ func (s *StrHezBJJ) UnmarshalText(text []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HezIdx is used to value common.Idx directly into strings that follow the Idx key hez fotmat (hez:tokenSymbol:idx) to sql DBs.
|
// HezIdx is used to value common.Idx directly into strings that follow the Idx key hez format (hez:tokenSymbol:idx) to sql DBs.
|
||||||
// Note that this can only be used to insert to DB since there is no way to automaticaly read from the DB since it needs the tokenSymbol
|
// Note that this can only be used to insert to DB since there is no way to automatically read from the DB since it needs the tokenSymbol
|
||||||
type HezIdx string
|
type HezIdx string
|
||||||
|
|
||||||
// StrHezIdx is used to unmarshal HezIdx directly into an alias of common.Idx
|
// StrHezIdx is used to unmarshal HezIdx directly into an alias of common.Idx
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ func (a *AccountCreationAuth) toHash(chainID uint16,
|
|||||||
return rawData, nil
|
return rawData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashToSign returns the hash to be signed by the Etherum address to authorize
|
// HashToSign returns the hash to be signed by the Ethereum address to authorize
|
||||||
// the account creation, which follows the EIP-712 encoding
|
// the account creation, which follows the EIP-712 encoding
|
||||||
func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
||||||
hermezContractAddr ethCommon.Address) ([]byte, error) {
|
hermezContractAddr ethCommon.Address) ([]byte, error) {
|
||||||
@@ -96,9 +96,9 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sign signs the account creation authorization message using the provided
|
// Sign signs the account creation authorization message using the provided
|
||||||
// `signHash` function, and stores the signaure in `a.Signature`. `signHash`
|
// `signHash` function, and stores the signature in `a.Signature`. `signHash`
|
||||||
// should do an ethereum signature using the account corresponding to
|
// should do an ethereum signature using the account corresponding to
|
||||||
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in
|
// `a.EthAddr`. The `signHash` function is used to make signing flexible: in
|
||||||
// tests we sign directly using the private key, outside tests we sign using
|
// tests we sign directly using the private key, outside tests we sign using
|
||||||
// the keystore (which never exposes the private key). Sign follows the EIP-712
|
// the keystore (which never exposes the private key). Sign follows the EIP-712
|
||||||
// encoding.
|
// encoding.
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ type Slot struct {
|
|||||||
// BatchesLen int
|
// BatchesLen int
|
||||||
BidValue *big.Int
|
BidValue *big.Int
|
||||||
BootCoord bool
|
BootCoord bool
|
||||||
// Bidder, Forer and URL correspond to the winner of the slot (which is
|
// Bidder, Forger and URL correspond to the winner of the slot (which is
|
||||||
// not always the highest bidder). These are the values of the
|
// not always the highest bidder). These are the values of the
|
||||||
// coordinator that is able to forge exclusively before the deadline.
|
// coordinator that is able to forge exclusively before the deadline.
|
||||||
Bidder ethCommon.Address
|
Bidder ethCommon.Address
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
// L2Tx is a struct that represents an already forged L2 tx
|
// L2Tx is a struct that represents an already forged L2 tx
|
||||||
type L2Tx struct {
|
type L2Tx struct {
|
||||||
// Stored in DB: mandatory fileds
|
// Stored in DB: mandatory fields
|
||||||
TxID TxID `meddler:"id"`
|
TxID TxID `meddler:"id"`
|
||||||
BatchNum BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged.
|
BatchNum BatchNum `meddler:"batch_num"` // batchNum in which this tx was forged.
|
||||||
Position int `meddler:"position"`
|
Position int `meddler:"position"`
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ func (txid TxID) MarshalText() ([]byte, error) {
|
|||||||
return []byte(txid.String()), nil
|
return []byte(txid.String()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalText unmarshals a TxID
|
// UnmarshalText unmarshalls a TxID
|
||||||
func (txid *TxID) UnmarshalText(data []byte) error {
|
func (txid *TxID) UnmarshalText(data []byte) error {
|
||||||
idStr := string(data)
|
idStr := string(data)
|
||||||
id, err := NewTxIDFromString(idStr)
|
id, err := NewTxIDFromString(idStr)
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ type ZKInputs struct {
|
|||||||
|
|
||||||
// accumulate fees
|
// accumulate fees
|
||||||
// FeePlanTokens contains all the tokenIDs for which the fees are being
|
// FeePlanTokens contains all the tokenIDs for which the fees are being
|
||||||
// accumulated and those fees accoumulated will be paid to the FeeIdxs
|
// accumulated and those fees accumulated will be paid to the FeeIdxs
|
||||||
// array. The order of FeeIdxs & FeePlanTokens & State3 must match.
|
// array. The order of FeeIdxs & FeePlanTokens & State3 must match.
|
||||||
// Coordinator fees are processed correlated such as:
|
// Coordinator fees are processed correlated such as:
|
||||||
// [FeePlanTokens[i], FeeIdxs[i]]
|
// [FeePlanTokens[i], FeeIdxs[i]]
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ type Coordinator struct {
|
|||||||
// checking the next block), used to decide when to stop scheduling new
|
// checking the next block), used to decide when to stop scheduling new
|
||||||
// batches (by stopping the pipeline).
|
// batches (by stopping the pipeline).
|
||||||
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||||
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
// is 5, even though at block 11 we canForge, the pipeline will be
|
||||||
// stopped if we can't forge at block 15.
|
// stopped if we can't forge at block 15.
|
||||||
// This value should be the expected number of blocks it takes between
|
// This value should be the expected number of blocks it takes between
|
||||||
// scheduling a batch and having it mined.
|
// scheduling a batch and having it mined.
|
||||||
@@ -83,7 +83,7 @@ type Coordinator struct {
|
|||||||
// from the next block; used to decide when to stop sending batches to
|
// from the next block; used to decide when to stop sending batches to
|
||||||
// the smart contract.
|
// the smart contract.
|
||||||
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||||
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
// 5, even though at block 11 we canForge, the batch will be discarded
|
||||||
// if we can't forge at block 15.
|
// if we can't forge at block 15.
|
||||||
SendBatchBlocksMarginCheck int64
|
SendBatchBlocksMarginCheck int64
|
||||||
// ProofServerPollInterval is the waiting interval between polling the
|
// ProofServerPollInterval is the waiting interval between polling the
|
||||||
@@ -126,7 +126,7 @@ type Coordinator struct {
|
|||||||
// L2Txs is reached, L2Txs older than TTL will be deleted.
|
// L2Txs is reached, L2Txs older than TTL will be deleted.
|
||||||
TTL Duration `validate:"required"`
|
TTL Duration `validate:"required"`
|
||||||
// PurgeBatchDelay is the delay between batches to purge
|
// PurgeBatchDelay is the delay between batches to purge
|
||||||
// outdated transactions. Oudated L2Txs are those that have
|
// outdated transactions. Outdated L2Txs are those that have
|
||||||
// been forged or marked as invalid for longer than the
|
// been forged or marked as invalid for longer than the
|
||||||
// SafetyPeriod and pending L2Txs that have been in the pool
|
// SafetyPeriod and pending L2Txs that have been in the pool
|
||||||
// for longer than TTL once there are MaxTxs.
|
// for longer than TTL once there are MaxTxs.
|
||||||
@@ -136,7 +136,7 @@ type Coordinator struct {
|
|||||||
// nonce.
|
// nonce.
|
||||||
InvalidateBatchDelay int64 `validate:"required"`
|
InvalidateBatchDelay int64 `validate:"required"`
|
||||||
// PurgeBlockDelay is the delay between blocks to purge
|
// PurgeBlockDelay is the delay between blocks to purge
|
||||||
// outdated transactions. Oudated L2Txs are those that have
|
// outdated transactions. Outdated L2Txs are those that have
|
||||||
// been forged or marked as invalid for longer than the
|
// been forged or marked as invalid for longer than the
|
||||||
// SafetyPeriod and pending L2Txs that have been in the pool
|
// SafetyPeriod and pending L2Txs that have been in the pool
|
||||||
// for longer than TTL once there are MaxTxs.
|
// for longer than TTL once there are MaxTxs.
|
||||||
@@ -168,7 +168,7 @@ type Coordinator struct {
|
|||||||
MaxGasPrice *big.Int `validate:"required"`
|
MaxGasPrice *big.Int `validate:"required"`
|
||||||
// GasPriceIncPerc is the percentage increase of gas price set
|
// GasPriceIncPerc is the percentage increase of gas price set
|
||||||
// in an ethereum transaction from the suggested gas price by
|
// in an ethereum transaction from the suggested gas price by
|
||||||
// the ehtereum node
|
// the ethereum node
|
||||||
GasPriceIncPerc int64
|
GasPriceIncPerc int64
|
||||||
// CheckLoopInterval is the waiting interval between receipt
|
// CheckLoopInterval is the waiting interval between receipt
|
||||||
// checks of ethereum transactions in the TxManager
|
// checks of ethereum transactions in the TxManager
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ type Config struct {
|
|||||||
// checking the next block), used to decide when to stop scheduling new
|
// checking the next block), used to decide when to stop scheduling new
|
||||||
// batches (by stopping the pipeline).
|
// batches (by stopping the pipeline).
|
||||||
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
|
||||||
// is 5, eventhough at block 11 we canForge, the pipeline will be
|
// is 5, even though at block 11 we canForge, the pipeline will be
|
||||||
// stopped if we can't forge at block 15.
|
// stopped if we can't forge at block 15.
|
||||||
// This value should be the expected number of blocks it takes between
|
// This value should be the expected number of blocks it takes between
|
||||||
// scheduling a batch and having it mined.
|
// scheduling a batch and having it mined.
|
||||||
@@ -64,7 +64,7 @@ type Config struct {
|
|||||||
// from the next block; used to decide when to stop sending batches to
|
// from the next block; used to decide when to stop sending batches to
|
||||||
// the smart contract.
|
// the smart contract.
|
||||||
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
|
||||||
// 5, eventhough at block 11 we canForge, the batch will be discarded
|
// 5, even though at block 11 we canForge, the batch will be discarded
|
||||||
// if we can't forge at block 15.
|
// if we can't forge at block 15.
|
||||||
// This value should be the expected number of blocks it takes between
|
// This value should be the expected number of blocks it takes between
|
||||||
// sending a batch and having it mined.
|
// sending a batch and having it mined.
|
||||||
|
|||||||
@@ -504,7 +504,7 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invalidate transactions that become invalid beause of
|
// Invalidate transactions that become invalid because of
|
||||||
// the poolL2Txs selected. Will mark as invalid the txs that have a
|
// the poolL2Txs selected. Will mark as invalid the txs that have a
|
||||||
// (fromIdx, nonce) which already appears in the selected txs (includes
|
// (fromIdx, nonce) which already appears in the selected txs (includes
|
||||||
// all the nonces smaller than the current one)
|
// all the nonces smaller than the current one)
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
// PurgerCfg is the purger configuration
|
// PurgerCfg is the purger configuration
|
||||||
type PurgerCfg struct {
|
type PurgerCfg struct {
|
||||||
// PurgeBatchDelay is the delay between batches to purge outdated
|
// PurgeBatchDelay is the delay between batches to purge outdated
|
||||||
// transactions. Oudated L2Txs are those that have been forged or
|
// transactions. Outdated L2Txs are those that have been forged or
|
||||||
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
||||||
// that have been in the pool for longer than TTL once there are
|
// that have been in the pool for longer than TTL once there are
|
||||||
// MaxTxs.
|
// MaxTxs.
|
||||||
@@ -23,7 +23,7 @@ type PurgerCfg struct {
|
|||||||
// transactions due to nonce lower than the account nonce.
|
// transactions due to nonce lower than the account nonce.
|
||||||
InvalidateBatchDelay int64
|
InvalidateBatchDelay int64
|
||||||
// PurgeBlockDelay is the delay between blocks to purge outdated
|
// PurgeBlockDelay is the delay between blocks to purge outdated
|
||||||
// transactions. Oudated L2Txs are those that have been forged or
|
// transactions. Outdated L2Txs are those that have been forged or
|
||||||
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
|
||||||
// that have been in the pool for longer than TTL once there are
|
// that have been in the pool for longer than TTL once there are
|
||||||
// MaxTxs.
|
// MaxTxs.
|
||||||
|
|||||||
@@ -503,7 +503,7 @@ func (t *TxManager) Run(ctx context.Context) {
|
|||||||
// Our ethNode is giving an error different
|
// Our ethNode is giving an error different
|
||||||
// than "not found" when getting the receipt
|
// than "not found" when getting the receipt
|
||||||
// for the transaction, so we can't figure out
|
// for the transaction, so we can't figure out
|
||||||
// if it was not mined, mined and succesfull or
|
// if it was not mined, mined and successful or
|
||||||
// mined and failed. This could be due to the
|
// mined and failed. This could be due to the
|
||||||
// ethNode failure.
|
// ethNode failure.
|
||||||
t.coord.SendMsg(ctx, MsgStopPipeline{
|
t.coord.SendMsg(ctx, MsgStopPipeline{
|
||||||
@@ -568,7 +568,7 @@ func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
|
|||||||
// Our ethNode is giving an error different
|
// Our ethNode is giving an error different
|
||||||
// than "not found" when getting the receipt
|
// than "not found" when getting the receipt
|
||||||
// for the transaction, so we can't figure out
|
// for the transaction, so we can't figure out
|
||||||
// if it was not mined, mined and succesfull or
|
// if it was not mined, mined and successful or
|
||||||
// mined and failed. This could be due to the
|
// mined and failed. This could be due to the
|
||||||
// ethNode failure.
|
// ethNode failure.
|
||||||
next++
|
next++
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
|
||||||
historyDBWithACC = NewHistoryDB(db, db, apiConnCon)
|
historyDBWithACC = NewHistoryDB(db, db, apiConnCon)
|
||||||
// Run tests
|
// Run tests
|
||||||
result := m.Run()
|
result := m.Run()
|
||||||
|
|||||||
@@ -316,7 +316,7 @@ func (k *KVDB) ResetFromSynchronizer(batchNum common.BatchNum, synchronizerKVDB
|
|||||||
|
|
||||||
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
|
||||||
|
|
||||||
// copy synchronizer'BatchNumX' to 'BatchNumX'
|
// copy synchronizer 'BatchNumX' to 'BatchNumX'
|
||||||
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
|
if err := synchronizerKVDB.MakeCheckpointFromTo(batchNum, checkpointPath); err != nil {
|
||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -310,7 +310,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Fill the batch_num in the query with Sprintf because we are using a
|
// Fill the batch_num in the query with Sprintf because we are using a
|
||||||
// named query which works with slices, and doens't handle an extra
|
// named query which works with slices, and doesn't handle an extra
|
||||||
// individual argument.
|
// individual argument.
|
||||||
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
|
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
|
||||||
if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil {
|
if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil {
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ func TestMain(m *testing.M) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
|
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
|
||||||
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
|
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
|
||||||
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
|
||||||
test.WipeDB(l2DB.DB())
|
test.WipeDB(l2DB.DB())
|
||||||
historyDB = historydb.NewHistoryDB(db, db, nil)
|
historyDB = historydb.NewHistoryDB(db, db, nil)
|
||||||
|
|||||||
@@ -510,7 +510,7 @@ func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error)
|
|||||||
return l.db.CheckpointExists(batchNum)
|
return l.db.CheckpointExists(batchNum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
|
// Reset performs a reset in the LocalStateDB. If fromSynchronizer is true, it
|
||||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
|
||||||
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||||
|
|||||||
@@ -93,8 +93,8 @@ type APIConnectionController struct {
|
|||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAPICnnectionController initialize APIConnectionController
|
// NewAPIConnectionController initialize APIConnectionController
|
||||||
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
func NewAPIConnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
|
||||||
return &APIConnectionController{
|
return &APIConnectionController{
|
||||||
smphr: semaphore.NewWeighted(int64(maxConnections)),
|
smphr: semaphore.NewWeighted(int64(maxConnections)),
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
}
|
}
|
||||||
var apiConnCon *dbUtils.APIConnectionController
|
var apiConnCon *dbUtils.APIConnectionController
|
||||||
if cfg.API.Explorer || mode == ModeCoordinator {
|
if cfg.API.Explorer || mode == ModeCoordinator {
|
||||||
apiConnCon = dbUtils.NewAPICnnectionController(
|
apiConnCon = dbUtils.NewAPIConnectionController(
|
||||||
cfg.API.MaxSQLConnections,
|
cfg.API.MaxSQLConnections,
|
||||||
cfg.API.SQLConnectionTimeout.Duration,
|
cfg.API.SQLConnectionTimeout.Duration,
|
||||||
)
|
)
|
||||||
@@ -491,8 +491,8 @@ func NewNodeAPI(
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts the http server of the NodeAPI. To stop it, pass a context with
|
// Run starts the http server of the NodeAPI. To stop it, pass a context
|
||||||
// cancelation.
|
// with cancellation.
|
||||||
func (a *NodeAPI) Run(ctx context.Context) error {
|
func (a *NodeAPI) Run(ctx context.Context) error {
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
Handler: a.engine,
|
Handler: a.engine,
|
||||||
|
|||||||
@@ -235,7 +235,7 @@ func (p *ProofServerClient) CalculateProof(ctx context.Context, zkInputs *common
|
|||||||
return tracerr.Wrap(p.apiInput(ctx, zkInputs))
|
return tracerr.Wrap(p.apiInput(ctx, zkInputs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProof retreives the Proof and Public Data (public inputs) from the
|
// GetProof retrieves the Proof and Public Data (public inputs) from the
|
||||||
// ServerProof, blocking until the proof is ready.
|
// ServerProof, blocking until the proof is ready.
|
||||||
func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
|
func (p *ProofServerClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
|
||||||
if err := p.WaitReady(ctx); err != nil {
|
if err := p.WaitReady(ctx); err != nil {
|
||||||
@@ -298,7 +298,7 @@ func (p *MockClient) CalculateProof(ctx context.Context, zkInputs *common.ZKInpu
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProof retreives the Proof from the ServerProof
|
// GetProof retrieves the Proof from the ServerProof
|
||||||
func (p *MockClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
|
func (p *MockClient) GetProof(ctx context.Context) (*Proof, []*big.Int, error) {
|
||||||
// Simulate a delay
|
// Simulate a delay
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ var (
|
|||||||
ErrUnknownBlock = fmt.Errorf("unknown block")
|
ErrUnknownBlock = fmt.Errorf("unknown block")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Stats of the syncrhonizer
|
// Stats of the synchronizer
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
Eth struct {
|
Eth struct {
|
||||||
RefreshPeriod time.Duration
|
RefreshPeriod time.Duration
|
||||||
@@ -347,7 +347,7 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// updateCurrentSlot updates the slot with information of the current slot.
|
// updateCurrentSlot updates the slot with information of the current slot.
|
||||||
// The information abouth which coordinator is allowed to forge is only updated
|
// The information about which coordinator is allowed to forge is only updated
|
||||||
// when we are Synced.
|
// when we are Synced.
|
||||||
// hasBatch is true when the last synced block contained at least one batch.
|
// hasBatch is true when the last synced block contained at least one batch.
|
||||||
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
|
func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
|
||||||
@@ -400,7 +400,7 @@ func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch
|
|||||||
}
|
}
|
||||||
|
|
||||||
// updateNextSlot updates the slot with information of the next slot.
|
// updateNextSlot updates the slot with information of the next slot.
|
||||||
// The information abouth which coordinator is allowed to forge is only updated
|
// The information about which coordinator is allowed to forge is only updated
|
||||||
// when we are Synced.
|
// when we are Synced.
|
||||||
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
|
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
|
||||||
// We want the next block because the current one is already mined
|
// We want the next block because the current one is already mined
|
||||||
@@ -503,9 +503,9 @@ func (s *Synchronizer) resetIntermediateState() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync attems to synchronize an ethereum block starting from lastSavedBlock.
|
// Sync attempts to synchronize an ethereum block starting from lastSavedBlock.
|
||||||
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
|
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
|
||||||
// If a block is synched, it will be returned and also stored in the DB. If a
|
// If a block is synced, it will be returned and also stored in the DB. If a
|
||||||
// reorg is detected, the number of discarded blocks will be returned and no
|
// reorg is detected, the number of discarded blocks will be returned and no
|
||||||
// synchronization will be made.
|
// synchronization will be made.
|
||||||
// TODO: Be smart about locking: only lock during the read/write operations
|
// TODO: Be smart about locking: only lock during the read/write operations
|
||||||
@@ -558,7 +558,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
|
|||||||
"ethLastBlock", s.stats.Eth.LastBlock,
|
"ethLastBlock", s.stats.Eth.LastBlock,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Check that the obtianed ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
|
// Check that the obtained ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg!
|
||||||
if lastSavedBlock != nil {
|
if lastSavedBlock != nil {
|
||||||
if lastSavedBlock.Hash != ethBlock.ParentHash {
|
if lastSavedBlock.Hash != ethBlock.ParentHash {
|
||||||
// Reorg detected
|
// Reorg detected
|
||||||
@@ -578,7 +578,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
|
|||||||
// If there was an error during sync, reset to the last block
|
// If there was an error during sync, reset to the last block
|
||||||
// in the historyDB because the historyDB is written last in
|
// in the historyDB because the historyDB is written last in
|
||||||
// the Sync method and is the source of consistency. This
|
// the Sync method and is the source of consistency. This
|
||||||
// allows reseting the stateDB in the case a batch was
|
// allows resetting the stateDB in the case a batch was
|
||||||
// processed but the historyDB block was not committed due to an
|
// processed but the historyDB block was not committed due to an
|
||||||
// error.
|
// error.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -818,7 +818,7 @@ func (s *Synchronizer) resetState(block *common.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// rollupSync retreives all the Rollup Smart Contract Data that happened at
|
// rollupSync retrieves all the Rollup Smart Contract Data that happened at
|
||||||
// ethBlock.blockNum with ethBlock.Hash.
|
// ethBlock.blockNum with ethBlock.Hash.
|
||||||
func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, error) {
|
func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, error) {
|
||||||
blockNum := ethBlock.Num
|
blockNum := ethBlock.Num
|
||||||
|
|||||||
@@ -823,7 +823,7 @@ func TestSyncForgerCommitment(t *testing.T) {
|
|||||||
|
|
||||||
// Store ForgerComitmnent observed at every block by the live synchronizer
|
// Store ForgerComitmnent observed at every block by the live synchronizer
|
||||||
syncCommitment := map[int64]bool{}
|
syncCommitment := map[int64]bool{}
|
||||||
// Store ForgerComitmnent observed at every block by a syncrhonizer that is restarted
|
// Store ForgerComitmnent observed at every block by a synchronizer that is restarted
|
||||||
syncRestartedCommitment := map[int64]bool{}
|
syncRestartedCommitment := map[int64]bool{}
|
||||||
for _, block := range blocks {
|
for _, block := range blocks {
|
||||||
// Add block data to the smart contracts
|
// Add block data to the smart contracts
|
||||||
|
|||||||
@@ -101,8 +101,8 @@ func (a *DebugAPI) handleSyncStats(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, stats)
|
c.JSON(http.StatusOK, stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts the http server of the DebugAPI. To stop it, pass a context with
|
// Run starts the http server of the DebugAPI. To stop it, pass a context
|
||||||
// cancelation (see `debugapi_test.go` for an example).
|
// with cancellation (see `debugapi_test.go` for an example).
|
||||||
func (a *DebugAPI) Run(ctx context.Context) error {
|
func (a *DebugAPI) Run(ctx context.Context) error {
|
||||||
api := gin.Default()
|
api := gin.Default()
|
||||||
api.NoRoute(handleNoRoute)
|
api.NoRoute(handleNoRoute)
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ func TestDebugAPI(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
addr := "localhost:12345"
|
addr := "localhost:12345"
|
||||||
// We won't test the sync/stats endpoint, so we can se the Syncrhonizer to nil
|
// We won't test the sync/stats endpoint, so we can se the synchronizer to nil
|
||||||
debugAPI := NewDebugAPI(addr, sdb, nil)
|
debugAPI := NewDebugAPI(addr, sdb, nil)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|||||||
Reference in New Issue
Block a user