mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
7 Commits
develop
...
feature/ge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0511acd5e6 | ||
|
|
8d087e2727 | ||
|
|
22ffd93292 | ||
|
|
b565c9da1a | ||
|
|
7901705dfd | ||
|
|
3a706e7775 | ||
|
|
c84b3a4d0f |
@@ -21,7 +21,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
|
||||
}
|
||||
// API to common + verify signature
|
||||
commonAuth := accountCreationAuthAPIToCommon(&apiAuth)
|
||||
if !commonAuth.VerifySignature(a.cg.ChainID, a.hermezAddress) {
|
||||
if !commonAuth.VerifySignature(a.chainID, a.hermezAddress) {
|
||||
retBadReq(errors.New("invalid signature"), c)
|
||||
return
|
||||
}
|
||||
|
||||
48
api/api.go
48
api/api.go
@@ -1,27 +1,3 @@
|
||||
/*
|
||||
Package api implements the public interface of the hermez-node using a HTTP REST API.
|
||||
There are two subsets of endpoints:
|
||||
- coordinatorEndpoints: used to receive L2 transactions and account creation authorizations. Targeted for wallets.
|
||||
- explorerEndpoints: used to provide all sorts of information about the network. Targeted for explorers and similar services.
|
||||
|
||||
About the configuration of the API:
|
||||
- The API is supposed to be launched using the cli found at the package cli/node, and configured through the configuration file.
|
||||
- The mentioned configuration file allows exposing any combination of the endpoint subsets.
|
||||
- Although the API can run in a "standalone" manner using the serveapi command, it won't work properly
|
||||
unless another process acting as a coord or sync is filling the HistoryDB.
|
||||
|
||||
Design principles and considerations:
|
||||
- In order to decouple the API process from the rest of the node, all the communication between this package and the rest of
|
||||
the system is done through the SQL database. As a matter of fact, the only public function of the package is the constructor NewAPI.
|
||||
All the information needed for the API to work should be obtained through the configuration file of the cli or the database.
|
||||
- The format of the requests / responses doesn't match directly with the common types, and for this reason, the package api/apitypes is used
|
||||
to facilitate the format conversion. Most of the time, this is done directly at the db level.
|
||||
- The API endpoints are fully documented using OpenAPI aka Swagger. All the endpoints are tested against the spec to ensure consistency
|
||||
between implementation and specification. To get a sense of which endpoints exist and how they work, it's strongly recommended to check this specification.
|
||||
The specification can be found at api/swagger.yml.
|
||||
- In general, all the API endpoints produce queries to the SQL database in order to retrieve / insert the requested information. The most notable exceptions to this are
|
||||
the /config endpoint, which returns a static object generated at construction time, and the /state, which also is retrieved from the database, but it's generated by API/stateapiupdater package.
|
||||
*/
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -31,7 +7,6 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/metric"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
|
||||
@@ -40,6 +15,7 @@ type API struct {
|
||||
h *historydb.HistoryDB
|
||||
cg *configAPI
|
||||
l2 *l2db.L2DB
|
||||
chainID uint16
|
||||
hermezAddress ethCommon.Address
|
||||
}
|
||||
|
||||
@@ -51,6 +27,7 @@ func NewAPI(
|
||||
l2db *l2db.L2DB,
|
||||
) (*API, error) {
|
||||
// Check input
|
||||
// TODO: is stateDB only needed for explorer endpoints or for both?
|
||||
if coordinatorEndpoints && l2db == nil {
|
||||
return nil, tracerr.Wrap(errors.New("cannot serve Coordinator endpoints without L2DB"))
|
||||
}
|
||||
@@ -67,30 +44,23 @@ func NewAPI(
|
||||
RollupConstants: *newRollupConstants(consts.Rollup),
|
||||
AuctionConstants: consts.Auction,
|
||||
WDelayerConstants: consts.WDelayer,
|
||||
ChainID: consts.ChainID,
|
||||
},
|
||||
l2: l2db,
|
||||
chainID: consts.ChainID,
|
||||
hermezAddress: consts.HermezAddress,
|
||||
}
|
||||
|
||||
middleware, err := metric.PrometheusMiddleware()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server.Use(middleware)
|
||||
|
||||
server.NoRoute(a.noRoute)
|
||||
|
||||
v1 := server.Group("/v1")
|
||||
|
||||
// Add coordinator endpoints
|
||||
if coordinatorEndpoints {
|
||||
// Account creation authorization
|
||||
// Account
|
||||
v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
|
||||
v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
|
||||
// Transaction
|
||||
v1.POST("/transactions-pool", a.postPoolTx)
|
||||
v1.GET("/transactions-pool/:id", a.getPoolTx)
|
||||
v1.GET("/transactions-pool", a.getPoolTxs)
|
||||
}
|
||||
|
||||
// Add explorer endpoints
|
||||
@@ -103,23 +73,17 @@ func NewAPI(
|
||||
// Transaction
|
||||
v1.GET("/transactions-history", a.getHistoryTxs)
|
||||
v1.GET("/transactions-history/:id", a.getHistoryTx)
|
||||
// Batches
|
||||
// Status
|
||||
v1.GET("/batches", a.getBatches)
|
||||
v1.GET("/batches/:batchNum", a.getBatch)
|
||||
v1.GET("/full-batches/:batchNum", a.getFullBatch)
|
||||
// Slots
|
||||
v1.GET("/slots", a.getSlots)
|
||||
v1.GET("/slots/:slotNum", a.getSlot)
|
||||
// Bids
|
||||
v1.GET("/bids", a.getBids)
|
||||
// State
|
||||
v1.GET("/state", a.getState)
|
||||
// Config
|
||||
v1.GET("/config", a.getConfig)
|
||||
// Tokens
|
||||
v1.GET("/tokens", a.getTokens)
|
||||
v1.GET("/tokens/:id", a.getToken)
|
||||
// Coordinators
|
||||
v1.GET("/coordinators", a.getCoordinators)
|
||||
}
|
||||
|
||||
|
||||
@@ -40,11 +40,8 @@ type Pendinger interface {
|
||||
New() Pendinger
|
||||
}
|
||||
|
||||
const (
|
||||
apiPort = "4010"
|
||||
apiIP = "http://localhost:"
|
||||
apiURL = apiIP + apiPort + "/v1/"
|
||||
)
|
||||
const apiPort = "4010"
|
||||
const apiURL = "http://localhost:" + apiPort + "/v1/"
|
||||
|
||||
var SetBlockchain = `
|
||||
Type: Blockchain
|
||||
@@ -218,7 +215,6 @@ func TestMain(m *testing.M) {
|
||||
chainID := uint16(0)
|
||||
_config := getConfigTest(chainID)
|
||||
config = configAPI{
|
||||
ChainID: chainID,
|
||||
RollupConstants: *newRollupConstants(_config.RollupConstants),
|
||||
AuctionConstants: _config.AuctionConstants,
|
||||
WDelayerConstants: _config.WDelayerConstants,
|
||||
@@ -850,25 +846,6 @@ func doBadReq(method, path string, reqBody io.Reader, expectedResponseCode int)
|
||||
return swagger.ValidateResponse(ctx, responseValidationInput)
|
||||
}
|
||||
|
||||
func doSimpleReq(method, endpoint string) (string, error) {
|
||||
client := &http.Client{}
|
||||
httpReq, err := http.NewRequest(method, endpoint, nil)
|
||||
if err != nil {
|
||||
return "", tracerr.Wrap(err)
|
||||
}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return "", tracerr.Wrap(err)
|
||||
}
|
||||
//nolint
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", tracerr.Wrap(err)
|
||||
}
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
// test helpers
|
||||
|
||||
func getTimestamp(blockNum int64, blocks []common.Block) time.Time {
|
||||
|
||||
@@ -1,11 +1,3 @@
|
||||
/*
|
||||
Package apitypes is used to map the common types used across the node with the format expected by the API.
|
||||
|
||||
This is done using different strategies:
|
||||
- Marshallers: they get triggered when the API marshals the response structs into JSONs
|
||||
- Scanners/Valuers: they get triggered when a struct is sent/received to/from the SQL database
|
||||
- Adhoc functions: when the already mentioned strategies are not suitable, functions are added to the structs to facilitate the conversions
|
||||
*/
|
||||
package apitypes
|
||||
|
||||
import (
|
||||
|
||||
@@ -109,7 +109,7 @@ func (a *API) getFullBatch(c *gin.Context) {
|
||||
// Fetch txs forged in the batch from historyDB
|
||||
maxTxsPerBatch := uint(2048) //nolint:gomnd
|
||||
txs, _, err := a.h.GetTxsAPI(
|
||||
nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
|
||||
nil, nil, nil, nil, nil, batchNum, nil, nil, &maxTxsPerBatch, historydb.OrderAsc,
|
||||
)
|
||||
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
|
||||
retSQLErr(err, c)
|
||||
|
||||
@@ -57,7 +57,6 @@ type Config struct {
|
||||
}
|
||||
|
||||
type configAPI struct {
|
||||
ChainID uint16 `json:"chainId"`
|
||||
RollupConstants rollupConstants `json:"hermez"`
|
||||
AuctionConstants common.AuctionConstants `json:"auction"`
|
||||
WDelayerConstants common.WDelayerConstants `json:"withdrawalDelayer"`
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/metric"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/lib/pq"
|
||||
"github.com/russross/meddler"
|
||||
@@ -47,9 +46,7 @@ var (
|
||||
|
||||
func retSQLErr(err error, c *gin.Context) {
|
||||
log.Warnw("HTTP API SQL request error", "err", err)
|
||||
unwrapErr := tracerr.Unwrap(err)
|
||||
metric.CollectError(unwrapErr)
|
||||
errMsg := unwrapErr.Error()
|
||||
errMsg := tracerr.Unwrap(err).Error()
|
||||
retDupKey := func(errCode pq.ErrorCode) {
|
||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||
if errCode == "23505" {
|
||||
@@ -83,7 +80,6 @@ func retSQLErr(err error, c *gin.Context) {
|
||||
|
||||
func retBadReq(err error, c *gin.Context) {
|
||||
log.Warnw("HTTP API Bad request error", "err", err)
|
||||
metric.CollectError(err)
|
||||
c.JSON(http.StatusBadRequest, errorMsg{
|
||||
Message: err.Error(),
|
||||
})
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func (a *API) noRoute(c *gin.Context) {
|
||||
matched, _ := regexp.MatchString(`^/v[0-9]+/`, c.Request.URL.Path)
|
||||
if !matched {
|
||||
c.JSON(http.StatusNotFound, gin.H{
|
||||
"error": "Version not provided, please provide a valid version in the path such as v1",
|
||||
})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusNotFound, gin.H{
|
||||
"error": "404 page not found",
|
||||
})
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNoRouteVersionNotProvided(t *testing.T) {
|
||||
endpoint := apiIP + apiPort + "/"
|
||||
// not using doGoodReq, bcs internally
|
||||
// there is a method FindRoute that checks route and returns error
|
||||
resp, err := doSimpleReq("GET", endpoint)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
"{\"error\":\"Version not provided, please provide a valid version in the path such as v1\"}\n",
|
||||
resp)
|
||||
}
|
||||
|
||||
func TestNoRoute(t *testing.T) {
|
||||
endpoint := apiURL
|
||||
// not using doGoodReq, bcs internally
|
||||
// there is a method FindRoute that checks route and returns error
|
||||
resp, err := doSimpleReq("GET", endpoint)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
"{\"error\":\"404 page not found\"}\n",
|
||||
resp)
|
||||
}
|
||||
@@ -96,6 +96,32 @@ func parseQueryBJJ(c querier) (*babyjub.PublicKeyComp, error) {
|
||||
return hezStringToBJJ(bjjStr, name)
|
||||
}
|
||||
|
||||
func parseQueryPoolL2TxState(c querier) (*common.PoolL2TxState, error) {
|
||||
const name = "state"
|
||||
stateStr := c.Query(name)
|
||||
if stateStr == "" {
|
||||
return nil, nil
|
||||
}
|
||||
switch common.PoolL2TxState(stateStr) {
|
||||
case common.PoolL2TxStatePending:
|
||||
ret := common.PoolL2TxStatePending
|
||||
return &ret, nil
|
||||
case common.PoolL2TxStateForged:
|
||||
ret := common.PoolL2TxStateForged
|
||||
return &ret, nil
|
||||
case common.PoolL2TxStateForging:
|
||||
ret := common.PoolL2TxStateForging
|
||||
return &ret, nil
|
||||
case common.PoolL2TxStateInvalid:
|
||||
ret := common.PoolL2TxStateInvalid
|
||||
return &ret, nil
|
||||
}
|
||||
return nil, tracerr.Wrap(fmt.Errorf(
|
||||
"invalid %s, %s is not a valid option. Check the valid options in the docmentation",
|
||||
name, stateStr,
|
||||
))
|
||||
}
|
||||
|
||||
func parseQueryTxType(c querier) (*common.TxType, error) {
|
||||
const name = "type"
|
||||
typeStr := c.Query(name)
|
||||
@@ -146,6 +172,18 @@ func parseIdx(c querier) (*common.Idx, error) {
|
||||
return stringToIdx(idxStr, name)
|
||||
}
|
||||
|
||||
func parseFromIdx(c querier) (*common.Idx, error) {
|
||||
const name = "fromAccountIndex"
|
||||
idxStr := c.Query(name)
|
||||
return stringToIdx(idxStr, name)
|
||||
}
|
||||
|
||||
func parseToIdx(c querier) (*common.Idx, error) {
|
||||
const name = "toAccountIndex"
|
||||
idxStr := c.Query(name)
|
||||
return stringToIdx(idxStr, name)
|
||||
}
|
||||
|
||||
func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.PublicKeyComp, *common.Idx, error) {
|
||||
// TokenID
|
||||
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
|
||||
@@ -181,6 +219,47 @@ func parseExitFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.
|
||||
return tokenID, addr, bjj, idx, nil
|
||||
}
|
||||
|
||||
func parseTxsHistoryFilters(c querier) (*common.TokenID, *ethCommon.Address,
|
||||
*babyjub.PublicKeyComp, *common.Idx, *common.Idx, error) {
|
||||
// TokenID
|
||||
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
var tokenID *common.TokenID
|
||||
if tid != nil {
|
||||
tokenID = new(common.TokenID)
|
||||
*tokenID = common.TokenID(*tid)
|
||||
}
|
||||
// Hez Eth addr
|
||||
addr, err := parseQueryHezEthAddr(c)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
// BJJ
|
||||
bjj, err := parseQueryBJJ(c)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
if addr != nil && bjj != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(errors.New("bjj and hezEthereumAddress params are incompatible"))
|
||||
}
|
||||
// from Idx
|
||||
fromIdx, err := parseFromIdx(c)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
// to Idx
|
||||
toIdx, err := parseToIdx(c)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
if (fromIdx != nil || toIdx != nil) && (addr != nil || bjj != nil || tokenID != nil) {
|
||||
return nil, nil, nil, nil, nil, tracerr.Wrap(errors.New("accountIndex is incompatible with BJJ, hezEthereumAddress and tokenId"))
|
||||
}
|
||||
return tokenID, addr, bjj, fromIdx, toIdx, nil
|
||||
}
|
||||
|
||||
func parseTokenFilters(c querier) ([]common.TokenID, []string, string, error) {
|
||||
idsStr := c.Query("ids")
|
||||
symbolsStr := c.Query("symbols")
|
||||
|
||||
@@ -1,10 +1,3 @@
|
||||
/*
|
||||
Package stateapiupdater is responsible for generating and storing the object response of the GET /state endpoint exposed through the api package.
|
||||
This object is extensively defined at the OpenAPI spec located at api/swagger.yml.
|
||||
|
||||
Deployment considerations: in a setup where multiple processes are used (dedicated api process, separated coord / sync, ...), only one process should care
|
||||
of using this package.
|
||||
*/
|
||||
package stateapiupdater
|
||||
|
||||
import (
|
||||
|
||||
@@ -415,6 +415,55 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error500'
|
||||
get:
|
||||
tags:
|
||||
- Coordinator
|
||||
summary: Get transactions that are in the pool.
|
||||
operationId: getPoolTxs
|
||||
parameters:
|
||||
- name: state
|
||||
in: query
|
||||
required: false
|
||||
description: State of the transactions, e.g. "pend"
|
||||
schema:
|
||||
$ref: '#/components/schemas/PoolL2TransactionState'
|
||||
- name: fromAccountIndex
|
||||
in: query
|
||||
required: false
|
||||
description: Id of the from account
|
||||
schema:
|
||||
$ref: '#/components/schemas/AccountIndex'
|
||||
- name: toAccountIndex
|
||||
in: query
|
||||
required: false
|
||||
description: Id of the to account
|
||||
schema:
|
||||
$ref: '#/components/schemas/AccountIndex'
|
||||
responses:
|
||||
'200':
|
||||
description: Successful operation.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PoolL2Transactions'
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error400'
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error404'
|
||||
'500':
|
||||
description: Internal server error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error500'
|
||||
'/transactions-pool/{id}':
|
||||
get:
|
||||
tags:
|
||||
@@ -487,10 +536,16 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
$ref: '#/components/schemas/BJJ'
|
||||
- name: accountIndex
|
||||
- name: fromAccountIndex
|
||||
in: query
|
||||
required: false
|
||||
description: Only get transactions sent from or to a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
|
||||
description: Only get transactions sent from a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
|
||||
schema:
|
||||
$ref: '#/components/schemas/AccountIndex'
|
||||
- name: toAccountIndex
|
||||
in: query
|
||||
required: false
|
||||
description: Only get transactions sent to a specific account. Incompatible with the queries `tokenId`, `hezEthereumAddress` and `BJJ`.
|
||||
schema:
|
||||
$ref: '#/components/schemas/AccountIndex'
|
||||
- name: batchNum
|
||||
@@ -1439,6 +1494,14 @@ components:
|
||||
- requestFee
|
||||
- requestNonce
|
||||
- token
|
||||
PoolL2Transactions:
|
||||
type: object
|
||||
properties:
|
||||
transactions:
|
||||
type: array
|
||||
description: List of pool l2 transactions
|
||||
items:
|
||||
$ref: '#/components/schemas/PoolL2Transaction'
|
||||
TransactionId:
|
||||
type: string
|
||||
description: Identifier for transactions. Used for any kind of transaction (both L1 and L2). More info on how the identifiers are built [here](https://idocs.hermez.io/#/spec/architecture/db/README?id=txid)
|
||||
@@ -3040,15 +3103,10 @@ components:
|
||||
- maxEmergencyModeTime
|
||||
- hermezRollup
|
||||
additionalProperties: false
|
||||
chainId:
|
||||
type: integer
|
||||
description: Id of the chain
|
||||
example: 27
|
||||
required:
|
||||
- hermez
|
||||
- auction
|
||||
- withdrawalDelayer
|
||||
- chainId
|
||||
additionalProperties: false
|
||||
Error:
|
||||
type: object
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func (a *API) getHistoryTxs(c *gin.Context) {
|
||||
// Get query parameters
|
||||
tokenID, addr, bjj, idx, err := parseExitFilters(c)
|
||||
tokenID, addr, bjj, fromIdx, toIdx, err := parseTxsHistoryFilters(c)
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
@@ -35,7 +35,7 @@ func (a *API) getHistoryTxs(c *gin.Context) {
|
||||
|
||||
// Fetch txs from historyDB
|
||||
txs, pendingItems, err := a.h.GetTxsAPI(
|
||||
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
||||
addr, bjj, tokenID, fromIdx, toIdx, batchNum, txType, fromItem, limit, order,
|
||||
)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
|
||||
@@ -324,8 +324,8 @@ func TestGetHistoryTxs(t *testing.T) {
|
||||
idx, err := stringToIdx(idxStr, "")
|
||||
assert.NoError(t, err)
|
||||
path = fmt.Sprintf(
|
||||
"%s?accountIndex=%s&limit=%d",
|
||||
endpoint, idxStr, limit,
|
||||
"%s?fromAccountIndex=%s&toAccountIndex=%s&limit=%d",
|
||||
endpoint, idxStr, idxStr, limit,
|
||||
)
|
||||
err = doGoodReqPaginated(path, historydb.OrderAsc, &testTxsResponse{}, appendIter)
|
||||
assert.NoError(t, err)
|
||||
@@ -431,8 +431,8 @@ func TestGetHistoryTxs(t *testing.T) {
|
||||
assertTxs(t, []testTx{}, fetchedTxs)
|
||||
// 400
|
||||
path = fmt.Sprintf(
|
||||
"%s?accountIndex=%s&hezEthereumAddress=%s",
|
||||
endpoint, idx, account.EthAddr,
|
||||
"%s?fromAccountIndex=%s&toAccountIndex=%s&hezEthereumAddress=%s",
|
||||
endpoint, idx, idx, account.EthAddr,
|
||||
)
|
||||
err = doBadReq("GET", path, nil, 400)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -55,6 +55,41 @@ func (a *API) getPoolTx(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, tx)
|
||||
}
|
||||
|
||||
func (a *API) getPoolTxs(c *gin.Context) {
|
||||
// Get from idx
|
||||
fromIdx, err := parseFromIdx(c)
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
// Get to idx
|
||||
toIdx, err := parseToIdx(c)
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
// Get state
|
||||
state, err := parseQueryPoolL2TxState(c)
|
||||
if err != nil {
|
||||
retBadReq(err, c)
|
||||
return
|
||||
}
|
||||
// Fetch txs from l2DB
|
||||
txs, err := a.l2.GetPoolTxs(fromIdx, toIdx, state)
|
||||
if err != nil {
|
||||
retSQLErr(err, c)
|
||||
return
|
||||
}
|
||||
|
||||
// Build successful response
|
||||
type txsResponse struct {
|
||||
Txs []*l2db.PoolTxAPI `json:"transactions"`
|
||||
}
|
||||
c.JSON(http.StatusOK, &txsResponse{
|
||||
Txs: txs,
|
||||
})
|
||||
}
|
||||
|
||||
type receivedPoolTx struct {
|
||||
TxID common.TxID `json:"id" binding:"required"`
|
||||
Type common.TxType `json:"type" binding:"required"`
|
||||
@@ -187,7 +222,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
|
||||
poolTx.TokenID, account.TokenID))
|
||||
}
|
||||
// Check signature
|
||||
if !poolTx.VerifySignature(a.cg.ChainID, account.BJJ) {
|
||||
if !poolTx.VerifySignature(a.chainID, account.BJJ) {
|
||||
return tracerr.Wrap(errors.New("wrong signature"))
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -47,6 +47,10 @@ type testPoolTxReceive struct {
|
||||
Token historydb.TokenWithUSD `json:"token"`
|
||||
}
|
||||
|
||||
type testPoolTxsResponse struct {
|
||||
Txs []testPoolTxReceive `json:"transactions"`
|
||||
}
|
||||
|
||||
// testPoolTxSend is a struct to be used as a JSON body
|
||||
// when testing POST /transactions-pool
|
||||
type testPoolTxSend struct {
|
||||
@@ -225,6 +229,24 @@ func TestPoolTxs(t *testing.T) {
|
||||
err = doBadReq("POST", endpoint, jsonTxReader, 400)
|
||||
require.NoError(t, err)
|
||||
// GET
|
||||
// get by idx
|
||||
fetchedTxs := testPoolTxsResponse{}
|
||||
require.NoError(t, doGoodReq(
|
||||
"GET",
|
||||
endpoint+"?fromAccountIndex=hez:ETH:263",
|
||||
nil, &fetchedTxs))
|
||||
assert.Equal(t, 1, len(fetchedTxs.Txs))
|
||||
assert.Equal(t, "hez:ETH:263", fetchedTxs.Txs[0].FromIdx)
|
||||
// get by state
|
||||
require.NoError(t, doGoodReq(
|
||||
"GET",
|
||||
endpoint+"?state=pend",
|
||||
nil, &fetchedTxs))
|
||||
assert.Equal(t, 4, len(fetchedTxs.Txs))
|
||||
for _, v := range fetchedTxs.Txs {
|
||||
assert.Equal(t, common.PoolL2TxStatePending, v.State)
|
||||
}
|
||||
// GET
|
||||
endpoint += "/"
|
||||
for _, tx := range tc.poolTxsToReceive {
|
||||
fetchedTx := testPoolTxReceive{}
|
||||
|
||||
@@ -35,7 +35,7 @@ Symbol = "SUSHI"
|
||||
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
|
||||
|
||||
[Debug]
|
||||
APIAddress = "0.0.0.0:12345"
|
||||
APIAddress = "localhost:12345"
|
||||
MeddlerLogs = true
|
||||
GinDebugMode = true
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ type L1Tx struct {
|
||||
// - L1UserTx: 0
|
||||
// - L1CoordinatorTx: 1
|
||||
TxID TxID `meddler:"id"`
|
||||
// ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged
|
||||
// ToForgeL1TxsNum indicates in which the tx was forged / will be forged
|
||||
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
|
||||
Position int `meddler:"position"`
|
||||
// UserOrigin is set to true if the tx was originated by a user, false if it was
|
||||
|
||||
@@ -80,7 +80,6 @@ type BatchInfo struct {
|
||||
PipelineNum int
|
||||
BatchNum common.BatchNum
|
||||
ServerProof prover.Client
|
||||
ProofStart time.Time
|
||||
ZKInputs *common.ZKInputs
|
||||
Proof *prover.Proof
|
||||
PublicInputs []*big.Int
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/eth"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/metric"
|
||||
"github.com/hermeznetwork/hermez-node/prover"
|
||||
"github.com/hermeznetwork/hermez-node/synchronizer"
|
||||
"github.com/hermeznetwork/hermez-node/txselector"
|
||||
@@ -248,7 +246,6 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
|
||||
|
||||
// 3. Send the ZKInputs to the proof server
|
||||
batchInfo.ServerProof = serverProof
|
||||
batchInfo.ProofStart = time.Now()
|
||||
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
} else if err != nil {
|
||||
@@ -523,30 +520,15 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
// l1UserFutureTxs are the l1UserTxs that are not being forged
|
||||
// in the next batch, but that are also in the queue for the
|
||||
// future batches
|
||||
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs)
|
||||
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
} else {
|
||||
// get l1UserFutureTxs which are all the l1 pending in all the
|
||||
// queues
|
||||
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// 2b: only L2 txs
|
||||
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs)
|
||||
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
|
||||
if err != nil {
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -620,9 +602,6 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
|
||||
|
||||
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
||||
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
|
||||
defer metric.MeasureDuration(metric.WaitServerProof, batchInfo.ProofStart,
|
||||
batchInfo.BatchNum.BigInt().String(), strconv.Itoa(batchInfo.PipelineNum))
|
||||
|
||||
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
|
||||
// until not resolved don't continue. Returns when the proof server has calculated the proof
|
||||
if err != nil {
|
||||
|
||||
@@ -456,7 +456,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
|
||||
// and pagination info
|
||||
func (hdb *HistoryDB) GetTxsAPI(
|
||||
ethAddr *ethCommon.Address, bjj *babyjub.PublicKeyComp,
|
||||
tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType,
|
||||
tokenID *common.TokenID, fromIdx, toIdx *common.Idx, batchNum *uint, txType *common.TxType,
|
||||
fromItem, limit *uint, order string,
|
||||
) ([]TxAPI, uint64, error) {
|
||||
// Warning: amount_success and deposit_amount_success have true as default for
|
||||
@@ -508,14 +508,32 @@ func (hdb *HistoryDB) GetTxsAPI(
|
||||
nextIsAnd = true
|
||||
}
|
||||
// idx filter
|
||||
if idx != nil {
|
||||
if fromIdx != nil && toIdx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "(tx.effective_from_idx = ? OR tx.to_idx = ?) "
|
||||
args = append(args, idx, idx)
|
||||
queryStr += "(tx.effective_from_idx = ? "
|
||||
queryStr += "OR tx.to_idx = ?) "
|
||||
args = append(args, fromIdx, toIdx)
|
||||
nextIsAnd = true
|
||||
} else if fromIdx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx.effective_from_idx = ? "
|
||||
nextIsAnd = true
|
||||
} else if toIdx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx.to_idx = ? "
|
||||
args = append(args, toIdx)
|
||||
nextIsAnd = true
|
||||
}
|
||||
// batchNum filter
|
||||
|
||||
@@ -1,24 +1,3 @@
|
||||
/*
|
||||
Package historydb is responsible for storing and retrieving the historic data of the Hermez network.
|
||||
It's mostly but not exclusively used by the API and the synchronizer.
|
||||
|
||||
Apart from the logic defined in this package, it's important to notice that there are some triggers defined in the
|
||||
migration files that have to be taken into consideration to understanding the results of some queries. This is especially true
|
||||
for reorgs: all the data is directly or indirectly related to a block, this makes handling reorgs as easy as deleting the
|
||||
reorged blocks from the block table, and all related items will be dropped in cascade. This is not the only case, in general
|
||||
functions defined in this package that get affected somehow by the SQL level defined logic has a special mention on the function description.
|
||||
|
||||
Some of the database tooling used in this package such as meddler and migration tools is explained in the db package.
|
||||
|
||||
This package is spitted in different files following these ideas:
|
||||
- historydb.go: constructor and functions used by packages other than the api.
|
||||
- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore
|
||||
to restrict the maximum concurrent connections to the database.
|
||||
- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however
|
||||
most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables.
|
||||
In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats.
|
||||
- nodeinfo.go: used to handle the interfaces and structs that allow communication across running in different machines/process but sharing the same database.
|
||||
*/
|
||||
package historydb
|
||||
|
||||
import (
|
||||
@@ -772,24 +751,6 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
|
||||
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch
|
||||
// with toForgeL1TxsNum (in one of the future batches, not in the next one).
|
||||
func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
|
||||
var txs []*common.L1Tx
|
||||
err := meddler.QueryAll(
|
||||
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
|
||||
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||
tx.amount, NULL AS effective_amount,
|
||||
tx.deposit_amount, NULL AS effective_deposit_amount,
|
||||
tx.eth_block_num, tx.type, tx.batch_num
|
||||
FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1
|
||||
ORDER BY position;`,
|
||||
toForgeL1TxsNum,
|
||||
)
|
||||
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
|
||||
// open or frozen queues that are not yet forged)
|
||||
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {
|
||||
|
||||
@@ -699,55 +699,34 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
|
||||
CreateAccountDeposit(1) B: 5
|
||||
CreateAccountDeposit(1) C: 5
|
||||
CreateAccountDeposit(1) D: 5
|
||||
> block
|
||||
|
||||
> batchL1
|
||||
> block
|
||||
|
||||
CreateAccountDeposit(1) E: 5
|
||||
CreateAccountDeposit(1) F: 5
|
||||
> block
|
||||
|
||||
`
|
||||
tc := til.NewContext(uint16(0), 128)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
require.NoError(t, err)
|
||||
// Sanity check
|
||||
require.Equal(t, 3, len(blocks))
|
||||
require.Equal(t, 1, len(blocks))
|
||||
require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs))
|
||||
|
||||
toForgeL1TxsNum := int64(1)
|
||||
|
||||
for i := range blocks {
|
||||
err = historyDB.AddBlockSCData(&blocks[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, len(l1UserTxs))
|
||||
|
||||
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1)
|
||||
l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, len(l1UserTxs))
|
||||
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
|
||||
|
||||
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(l1UserTxs))
|
||||
|
||||
count, err := historyDB.GetUnforgedL1UserTxsCount()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, count)
|
||||
|
||||
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(l1UserTxs))
|
||||
|
||||
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(l1UserTxs))
|
||||
assert.Equal(t, 5, count)
|
||||
|
||||
// No l1UserTxs for this toForgeL1TxsNum
|
||||
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3)
|
||||
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(l1UserTxs))
|
||||
}
|
||||
|
||||
@@ -127,3 +127,57 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
|
||||
txID,
|
||||
))
|
||||
}
|
||||
|
||||
// GetPoolTxs return Txs from the pool
|
||||
func (l2db *L2DB) GetPoolTxs(fromIdx, toIdx *common.Idx, state *common.PoolL2TxState) ([]*PoolTxAPI, error) {
|
||||
cancel, err := l2db.apiConnCon.Acquire()
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
defer l2db.apiConnCon.Release()
|
||||
// Apply filters
|
||||
nextIsAnd := false
|
||||
queryStr := selectPoolTxAPI
|
||||
var args []interface{}
|
||||
if state != nil {
|
||||
queryStr += "WHERE state = ? "
|
||||
args = append(args, state)
|
||||
nextIsAnd = true
|
||||
}
|
||||
|
||||
if fromIdx != nil && toIdx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND ("
|
||||
} else {
|
||||
queryStr += "WHERE ("
|
||||
}
|
||||
queryStr += "tx_pool.from_idx = ? "
|
||||
queryStr += "OR tx_pool.to_idx = ?) "
|
||||
args = append(args, fromIdx, toIdx)
|
||||
} else if fromIdx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx_pool.from_idx = ? "
|
||||
args = append(args, fromIdx)
|
||||
} else if toIdx != nil {
|
||||
if nextIsAnd {
|
||||
queryStr += "AND "
|
||||
} else {
|
||||
queryStr += "WHERE "
|
||||
}
|
||||
queryStr += "tx_pool.to_idx = ? "
|
||||
args = append(args, toIdx)
|
||||
}
|
||||
queryStr += "AND NOT external_delete;"
|
||||
query := l2db.dbRead.Rebind(queryStr)
|
||||
txs := []*PoolTxAPI{}
|
||||
err = meddler.QueryAll(
|
||||
l2db.dbRead, &txs,
|
||||
query,
|
||||
args...)
|
||||
return txs, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
/*
|
||||
Package l2db is responsible for storing and retrieving the data received by the coordinator through the api.
|
||||
Note that this data will be different for each coordinator in the network, as this represents the L2 information.
|
||||
|
||||
The data managed by this package is fundamentally PoolL2Tx and AccountCreationAuth. All this data come from
|
||||
the API sent by clients and is used by the txselector to decide which transactions are selected to forge a batch.
|
||||
|
||||
Some of the database tooling used in this package such as meddler and migration tools is explained in the db package.
|
||||
|
||||
This package is spitted in different files following these ideas:
|
||||
- l2db.go: constructor and functions used by packages other than the api.
|
||||
- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore
|
||||
to restrict the maximum concurrent connections to the database.
|
||||
- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however
|
||||
most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables.
|
||||
In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats.
|
||||
*/
|
||||
package l2db
|
||||
|
||||
import (
|
||||
|
||||
@@ -311,6 +311,28 @@ func TestGetPending(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestL2DB_GetPoolTxs(t *testing.T) {
|
||||
err := prepareHistoryDB(historyDB)
|
||||
if err != nil {
|
||||
log.Error("Error prepare historyDB", err)
|
||||
}
|
||||
poolL2Txs, err := generatePoolL2Txs()
|
||||
require.NoError(t, err)
|
||||
state := common.PoolL2TxState("pend")
|
||||
idx := common.Idx(256)
|
||||
var pendingTxs []*common.PoolL2Tx
|
||||
for i := range poolL2Txs {
|
||||
if poolL2Txs[i].FromIdx == idx || poolL2Txs[i].ToIdx == idx {
|
||||
err := l2DB.AddTxTest(&poolL2Txs[i])
|
||||
require.NoError(t, err)
|
||||
pendingTxs = append(pendingTxs, &poolL2Txs[i])
|
||||
}
|
||||
}
|
||||
fetchedTxs, err := l2DBWithACC.GetPoolTxs(&idx, &idx, &state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(pendingTxs), len(fetchedTxs))
|
||||
}
|
||||
|
||||
func TestStartForging(t *testing.T) {
|
||||
// Generate txs
|
||||
var fakeBatchNum common.BatchNum = 33
|
||||
|
||||
@@ -1,10 +1,3 @@
|
||||
/*
|
||||
Package db have some common utilities shared by db/l2db and db/historydb, the most relevant ones are:
|
||||
- SQL connection utilities
|
||||
- Managing the SQL schema: this is done using migration files placed under db/migrations. The files are executed by
|
||||
order of the file name.
|
||||
- Custom meddlers: used to easily transform struct <==> table
|
||||
*/
|
||||
package db
|
||||
|
||||
import (
|
||||
|
||||
192
metric/metric.go
192
metric/metric.go
@@ -1,192 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type (
|
||||
// Metric represents the metric type
|
||||
Metric string
|
||||
)
|
||||
|
||||
const (
|
||||
namespaceError = "error"
|
||||
namespaceSync = "synchronizer"
|
||||
namespaceTxSelector = "txselector"
|
||||
namespaceAPI = "api"
|
||||
)
|
||||
|
||||
var (
|
||||
// Errors errors count metric.
|
||||
Errors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespaceError,
|
||||
Name: "errors",
|
||||
Help: "",
|
||||
}, []string{"error"})
|
||||
|
||||
// WaitServerProof duration time to get the calculated
|
||||
// proof from the server.
|
||||
WaitServerProof = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespaceSync,
|
||||
Name: "wait_server_proof",
|
||||
Help: "",
|
||||
}, []string{"batch_number", "pipeline_number"})
|
||||
|
||||
// Reorgs block reorg count
|
||||
Reorgs = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespaceSync,
|
||||
Name: "reorgs",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// LastBlockNum last block synced
|
||||
LastBlockNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceSync,
|
||||
Name: "synced_last_block_num",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// EthLastBlockNum last eth block synced
|
||||
EthLastBlockNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceSync,
|
||||
Name: "eth_last_block_num",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// LastBatchNum last batch synced
|
||||
LastBatchNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceSync,
|
||||
Name: "synced_last_batch_num",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// EthLastBatchNum last eth batch synced
|
||||
EthLastBatchNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceSync,
|
||||
Name: "eth_last_batch_num",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// GetL2TxSelection L2 tx selection count
|
||||
GetL2TxSelection = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespaceTxSelector,
|
||||
Name: "get_l2_txselection_total",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// GetL1L2TxSelection L1L2 tx selection count
|
||||
GetL1L2TxSelection = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespaceTxSelector,
|
||||
Name: "get_l1_l2_txselection_total",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// SelectedL1CoordinatorTxs selected L1 coordinator tx count
|
||||
SelectedL1CoordinatorTxs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceTxSelector,
|
||||
Name: "selected_l1_coordinator_txs",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// SelectedL1UserTxs selected L1 user tx count
|
||||
SelectedL1UserTxs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceTxSelector,
|
||||
Name: "selected_l1_user_txs",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// SelectedL2Txs selected L2 tx count
|
||||
SelectedL2Txs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceTxSelector,
|
||||
Name: "selected_l2_txs",
|
||||
Help: "",
|
||||
})
|
||||
|
||||
// DiscardedL2Txs discarded L2 tx count
|
||||
DiscardedL2Txs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespaceTxSelector,
|
||||
Name: "discarded_l2_txs",
|
||||
Help: "",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := registerCollectors(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
func registerCollectors() error {
|
||||
if err := registerCollector(Errors); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(WaitServerProof); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(Reorgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(LastBlockNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(LastBatchNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(EthLastBlockNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(EthLastBatchNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(GetL2TxSelection); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(GetL1L2TxSelection); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(SelectedL1CoordinatorTxs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := registerCollector(SelectedL1UserTxs); err != nil {
|
||||
return err
|
||||
}
|
||||
return registerCollector(DiscardedL2Txs)
|
||||
}
|
||||
|
||||
func registerCollector(collector prometheus.Collector) error {
|
||||
err := prometheus.Register(collector)
|
||||
if err != nil {
|
||||
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MeasureDuration measure the method execution duration
|
||||
// and save it into a histogram metric
|
||||
func MeasureDuration(histogram *prometheus.HistogramVec, start time.Time, lvs ...string) {
|
||||
duration := time.Since(start)
|
||||
histogram.WithLabelValues(lvs...).Observe(float64(duration.Milliseconds()))
|
||||
}
|
||||
|
||||
// CollectError collect the error message and increment
|
||||
// the error count
|
||||
func CollectError(err error) {
|
||||
Errors.With(map[string]string{"error": err.Error()}).Inc()
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
favicon = "/favicon.ico"
|
||||
)
|
||||
|
||||
// Prometheus contains the metrics gathered by the instance and its path
|
||||
type Prometheus struct {
|
||||
reqCnt *prometheus.CounterVec
|
||||
reqDur *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
// NewPrometheus generates a new set of metrics with a certain subsystem name
|
||||
func NewPrometheus() (*Prometheus, error) {
|
||||
reqCnt := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespaceAPI,
|
||||
Name: "requests_total",
|
||||
Help: "How many HTTP requests processed, partitioned by status code and HTTP method",
|
||||
},
|
||||
[]string{"code", "method", "path"},
|
||||
)
|
||||
if err := registerCollector(reqCnt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqDur := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespaceAPI,
|
||||
Name: "request_duration_seconds",
|
||||
Help: "The HTTP request latencies in seconds",
|
||||
},
|
||||
[]string{"code", "method", "path"},
|
||||
)
|
||||
if err := registerCollector(reqDur); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Prometheus{
|
||||
reqCnt: reqCnt,
|
||||
reqDur: reqDur,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrometheusMiddleware creates the prometheus collector and
|
||||
// defines status handler function for the middleware
|
||||
func PrometheusMiddleware() (gin.HandlerFunc, error) {
|
||||
p, err := NewPrometheus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Middleware(), nil
|
||||
}
|
||||
|
||||
// Middleware defines status handler function for middleware
|
||||
func (p *Prometheus) Middleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if c.Request.URL.Path == favicon {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
c.Next()
|
||||
|
||||
status := strconv.Itoa(c.Writer.Status())
|
||||
elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||
fullPath := c.FullPath()
|
||||
|
||||
p.reqDur.WithLabelValues(status, c.Request.Method, fullPath).Observe(elapsed)
|
||||
p.reqCnt.WithLabelValues(status, c.Request.Method, fullPath).Inc()
|
||||
}
|
||||
}
|
||||
@@ -616,6 +616,12 @@ type NodeAPI struct { //nolint:golint
|
||||
addr string
|
||||
}
|
||||
|
||||
func handleNoRoute(c *gin.Context) {
|
||||
c.JSON(http.StatusNotFound, gin.H{
|
||||
"error": "404 page not found",
|
||||
})
|
||||
}
|
||||
|
||||
// NewNodeAPI creates a new NodeAPI (which internally calls api.NewAPI)
|
||||
func NewNodeAPI(
|
||||
addr string,
|
||||
@@ -625,6 +631,7 @@ func NewNodeAPI(
|
||||
l2db *l2db.L2DB,
|
||||
) (*NodeAPI, error) {
|
||||
engine := gin.Default()
|
||||
engine.NoRoute(handleNoRoute)
|
||||
engine.Use(cors.Default())
|
||||
_api, err := api.NewAPI(
|
||||
coordinatorEndpoints, explorerEndpoints,
|
||||
|
||||
@@ -173,10 +173,6 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
|
||||
tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr)
|
||||
case UpdateMethodTypeStatic:
|
||||
tokenPrice = token.StaticValue
|
||||
if tokenPrice == float64(0) {
|
||||
log.Warn("token price is set to 0. Probably StaticValue is not put in the configuration file,",
|
||||
"token", token.Symbol)
|
||||
}
|
||||
case UpdateMethodTypeIgnore:
|
||||
continue
|
||||
}
|
||||
|
||||
44
synchronizer/metrics.go
Normal file
44
synchronizer/metrics.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package synchronizer
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
metricReorgsCount = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "sync_reorgs",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricSyncedLastBlockNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "sync_synced_last_block_num",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricEthLastBlockNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "sync_eth_last_block_num",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricSyncedLastBatchNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "sync_synced_last_batch_num",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricEthLastBatchNum = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "sync_eth_last_batch_num",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(metricReorgsCount)
|
||||
prometheus.MustRegister(metricSyncedLastBlockNum)
|
||||
prometheus.MustRegister(metricEthLastBlockNum)
|
||||
prometheus.MustRegister(metricSyncedLastBatchNum)
|
||||
prometheus.MustRegister(metricEthLastBatchNum)
|
||||
}
|
||||
@@ -47,7 +47,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/eth"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/metric"
|
||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
)
|
||||
@@ -582,7 +581,6 @@ func (s *Synchronizer) Sync(ctx context.Context,
|
||||
return nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
discarded := lastSavedBlock.Num - lastDBBlockNum
|
||||
metric.Reorgs.Inc()
|
||||
return nil, &discarded, nil
|
||||
}
|
||||
}
|
||||
@@ -675,16 +673,16 @@ func (s *Synchronizer) Sync(ctx context.Context,
|
||||
}
|
||||
|
||||
for _, batchData := range rollupData.Batches {
|
||||
metric.LastBatchNum.Set(float64(batchData.Batch.BatchNum))
|
||||
metric.EthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
|
||||
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
|
||||
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
|
||||
log.Debugw("Synced batch",
|
||||
"syncLastBatch", batchData.Batch.BatchNum,
|
||||
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
||||
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||
)
|
||||
}
|
||||
metric.LastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
|
||||
metric.EthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
|
||||
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
|
||||
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
|
||||
log.Debugw("Synced block",
|
||||
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
|
||||
"syncBlocksPerc", s.stats.blocksPerc(),
|
||||
|
||||
@@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
||||
}
|
||||
// TxSelector select the transactions for the next Batch
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// BatchBuilder build Batch
|
||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||
@@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
||||
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||
// TxSelector select the transactions for the next Batch
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// BatchBuilder build Batch
|
||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||
@@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||
// TxSelector select the transactions for the next Batch
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// BatchBuilder build Batch
|
||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||
@@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
||||
// TxSelector select the transactions for the next Batch
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// BatchBuilder build Batch
|
||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||
@@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
// TxSelector select the transactions for the next Batch
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// BatchBuilder build Batch
|
||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||
@@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
||||
// TxSelector select the transactions for the next Batch
|
||||
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// BatchBuilder build Batch
|
||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||
@@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
|
||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(txprocConfig, nil, nil)
|
||||
txsel.GetL1L2TxSelection(txprocConfig, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(coordIdxs))
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
|
||||
53
txselector/metrics.go
Normal file
53
txselector/metrics.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package txselector
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
metricGetL2TxSelection = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "txsel_get_l2_txselecton_total",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricGetL1L2TxSelection = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "txsel_get_l1_l2_txselecton_total",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
|
||||
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "txsel_selected_l1_coordinator_txs",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricSelectedL1UserTxs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "txsel_selected_l1_user_txs",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricSelectedL2Txs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "txsel_selected_l2_txs",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
metricDiscardedL2Txs = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "txsel_discarded_l2_txs",
|
||||
Help: "",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(metricGetL2TxSelection)
|
||||
prometheus.MustRegister(metricGetL1L2TxSelection)
|
||||
|
||||
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
|
||||
prometheus.MustRegister(metricSelectedL1UserTxs)
|
||||
prometheus.MustRegister(metricSelectedL2Txs)
|
||||
prometheus.MustRegister(metricDiscardedL2Txs)
|
||||
}
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/metric"
|
||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
@@ -85,7 +84,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error)
|
||||
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
|
||||
// check if CoordinatorAccount for TokenID is already pending to create
|
||||
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID,
|
||||
if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID,
|
||||
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
|
||||
return nil, positionL1, nil
|
||||
}
|
||||
@@ -122,12 +121,11 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||
// but there is a transactions to them and the authorization of account
|
||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||
// included in the next batch.
|
||||
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx,
|
||||
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx,
|
||||
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||
metric.GetL2TxSelection.Inc()
|
||||
metricGetL2TxSelection.Inc()
|
||||
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
|
||||
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig,
|
||||
[]common.L1Tx{}, l1UserFutureTxs)
|
||||
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{})
|
||||
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
|
||||
discardedL2Txs, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -141,11 +139,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1
|
||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||
// included in the next batch.
|
||||
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||
metric.GetL1L2TxSelection.Inc()
|
||||
metricGetL1L2TxSelection.Inc()
|
||||
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
||||
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs)
|
||||
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
|
||||
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
||||
discardedL2Txs, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -159,7 +157,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||
// included in the next batch.
|
||||
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
||||
// current version is implemented in order to have a functional
|
||||
@@ -223,11 +221,10 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||
metric.SelectedL1CoordinatorTxs.Set(0)
|
||||
metric.SelectedL2Txs.Set(0)
|
||||
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||
|
||||
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||
metricSelectedL1CoordinatorTxs.Set(0)
|
||||
metricSelectedL2Txs.Set(0)
|
||||
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
|
||||
}
|
||||
|
||||
@@ -236,7 +233,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
var validTxs, discardedL2Txs []common.PoolL2Tx
|
||||
l2TxsForgable = sortL2Txs(l2TxsForgable)
|
||||
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
|
||||
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs,
|
||||
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs),
|
||||
l2TxsForgable, validTxs, discardedL2Txs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
@@ -250,8 +247,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
var l1CoordinatorTxs2 []common.L1Tx
|
||||
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
|
||||
txsel.processL2Txs(tp, selectionConfig,
|
||||
len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs,
|
||||
l2TxsNonForgable, validTxs, discardedL2Txs)
|
||||
len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable,
|
||||
validTxs, discardedL2Txs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -323,18 +320,17 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
metric.SelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
||||
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||
metric.SelectedL2Txs.Set(float64(len(validTxs)))
|
||||
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
||||
metricSelectedL2Txs.Set(float64(len(validTxs)))
|
||||
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||
|
||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
||||
}
|
||||
|
||||
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
||||
selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx,
|
||||
l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx,
|
||||
[]common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||
selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) (
|
||||
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||
var l1CoordinatorTxs []common.L1Tx
|
||||
positionL1 := nL1Txs
|
||||
var accAuths [][]byte
|
||||
@@ -436,8 +432,7 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
||||
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
|
||||
validL2Tx, l1CoordinatorTx, accAuth, err :=
|
||||
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
|
||||
nL1Txs, l1UserFutureTxs, l1CoordinatorTxs,
|
||||
positionL1, l2Txs[i])
|
||||
nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i])
|
||||
if err != nil {
|
||||
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
|
||||
// Discard L2Tx, and update Info parameter of
|
||||
@@ -577,35 +572,18 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
||||
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
|
||||
// array.
|
||||
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
||||
selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs,
|
||||
l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) (
|
||||
*common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) {
|
||||
selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx,
|
||||
positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx,
|
||||
*common.AccountCreationAuth, error) {
|
||||
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
|
||||
// previous L2Tx in the current process already created a
|
||||
// L1CoordinatorTx of this type, in the DB there still seem that needs
|
||||
// to create a new L1CoordinatorTx, but as is already created, the tx
|
||||
// is valid
|
||||
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
||||
if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
||||
return &l2Tx, nil, nil, nil
|
||||
}
|
||||
|
||||
// check if L2Tx receiver account will be created by a L1UserFutureTxs
|
||||
// (in the next batch, the current frozen queue). In that case, the L2Tx
|
||||
// will be discarded at the current batch, even if there is an
|
||||
// AccountCreationAuth for the account, as there is a L1UserTx in the
|
||||
// frozen queue that will create the receiver Account. The L2Tx is
|
||||
// discarded to avoid the Coordinator creating a new L1CoordinatorTx to
|
||||
// create the receiver account, which will be also created in the next
|
||||
// batch from the L1UserFutureTx, ending with the user having 2
|
||||
// different accounts for the same TokenID. The double account creation
|
||||
// is supported by the Hermez zkRollup specification, but it was decided
|
||||
// to mitigate it at the TxSelector level for the explained cases.
|
||||
if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
||||
return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" +
|
||||
" receiver account does not exist yet, and there is a L1UserTx that" +
|
||||
" will create that account in a future batch.")
|
||||
}
|
||||
|
||||
var l1CoordinatorTx *common.L1Tx
|
||||
var accAuth *common.AccountCreationAuth
|
||||
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
|
||||
@@ -708,7 +686,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
||||
return &l2Tx, l1CoordinatorTx, accAuth, nil
|
||||
}
|
||||
|
||||
func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
||||
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
||||
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
||||
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
||||
if l1CoordinatorTxs[i].FromEthAddr == addr &&
|
||||
@@ -720,23 +698,6 @@ func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID commo
|
||||
return false
|
||||
}
|
||||
|
||||
func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID,
|
||||
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
||||
for i := 0; i < len(l1UserFutureTxs); i++ {
|
||||
if l1UserFutureTxs[i].FromEthAddr == addr &&
|
||||
l1UserFutureTxs[i].TokenID == tokenID &&
|
||||
l1UserFutureTxs[i].FromBJJ == bjj {
|
||||
return true
|
||||
}
|
||||
if l1UserFutureTxs[i].FromEthAddr == addr &&
|
||||
l1UserFutureTxs[i].TokenID == tokenID &&
|
||||
common.EmptyBJJComp == bjj {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
|
||||
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
|
||||
// Sort by absolute fee with SliceStable, so that txs with same
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
log.Debug("block:0 batch:1")
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -193,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
log.Debug("block:0 batch:2")
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -204,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
log.Debug("block:0 batch:3")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -217,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
log.Debug("block:0 batch:4")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -231,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
log.Debug("block:0 batch:5")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -245,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
log.Debug("block:0 batch:6")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -279,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
||||
@@ -328,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||
assert.Equal(t, 0, len(accAuths))
|
||||
@@ -372,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
||||
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []common.Idx{263}, coordIdxs)
|
||||
assert.Equal(t, 0, len(accAuths))
|
||||
@@ -434,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
||||
}
|
||||
// batch1
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
// 1st TransferToEthAddr
|
||||
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965"
|
||||
@@ -456,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
||||
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -481,7 +481,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
||||
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -500,7 +500,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
||||
// initial PoolExit, which now is valid as B has enough Balance
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -550,7 +550,7 @@ func TestTransferToBjj(t *testing.T) {
|
||||
// batch1 to freeze L1UserTxs that will create some accounts with
|
||||
// positive balance
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
|
||||
@@ -568,7 +568,7 @@ func TestTransferToBjj(t *testing.T) {
|
||||
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, len(oL1UserTxs))
|
||||
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
|
||||
@@ -595,7 +595,7 @@ func TestTransferToBjj(t *testing.T) {
|
||||
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
|
||||
@@ -623,7 +623,7 @@ func TestTransferToBjj(t *testing.T) {
|
||||
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
// We expect the coordinator to add an L1CoordTx to create an account
|
||||
@@ -678,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 8 transfers from the same account
|
||||
@@ -710,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
||||
// transfers from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -760,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
batchPoolL2 := `
|
||||
@@ -794,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
|
||||
// select L1 & L2 txs
|
||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -809,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
// batch 3
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, len(oL1UserTxs))
|
||||
@@ -825,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
||||
// batch 4
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, len(oL1UserTxs))
|
||||
@@ -873,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) {
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3 transfers from the same account
|
||||
// 8 transfers from the same account
|
||||
batchPoolL2 := `
|
||||
Type: PoolL2
|
||||
PoolTransfer(0) A-B: 10 (126)
|
||||
@@ -889,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) {
|
||||
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
// batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A
|
||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -968,7 +968,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// batch 2 to crate the accounts (from L1UserTxs)
|
||||
@@ -976,7 +976,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
||||
|
||||
// select L1 & L2 txs
|
||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
@@ -1014,7 +1014,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
l1UserTxs = []common.L1Tx{}
|
||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, len(oL1UserTxs))
|
||||
@@ -1029,7 +1029,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
||||
|
||||
// batch 4. In this Batch, account B has enough balance to send the txs
|
||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, len(oL1UserTxs))
|
||||
@@ -1038,112 +1038,3 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
||||
require.Equal(t, 3, len(discardedL2Txs))
|
||||
require.Equal(t, 0, len(accAuths))
|
||||
}
|
||||
|
||||
func TestL1UserFutureTxs(t *testing.T) {
|
||||
set := `
|
||||
Type: Blockchain
|
||||
|
||||
CreateAccountDeposit(0) Coord: 0
|
||||
CreateAccountDeposit(0) A: 100
|
||||
|
||||
> batchL1 // freeze L1User{2}
|
||||
CreateAccountDeposit(0) B: 18
|
||||
> batchL1 // forge L1User{2}, freeze L1User{1}
|
||||
> batchL1 // forge L1User{1}
|
||||
> block
|
||||
`
|
||||
|
||||
chainID := uint16(0)
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||
|
||||
// restart nonces of TilContext, as will be set by generating directly
|
||||
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||
tc.RestartNonces()
|
||||
|
||||
tpc := txprocessor.Config{
|
||||
NLevels: 16,
|
||||
MaxFeeTx: 10,
|
||||
MaxTx: 10,
|
||||
MaxL1Tx: 10,
|
||||
ChainID: chainID,
|
||||
}
|
||||
// batch1 to freeze L1UserTxs
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
l1UserFutureTxs := []common.L1Tx{}
|
||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
|
||||
require.NoError(t, err)
|
||||
|
||||
batchPoolL2 := `
|
||||
Type: PoolL2
|
||||
PoolTransferToEthAddr(0) A-B: 10 (126)
|
||||
`
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(poolL2Txs))
|
||||
|
||||
// add AccountCreationAuth for B
|
||||
_ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B")
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
// batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
l1UserFutureTxs =
|
||||
til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||
require.Equal(t, 2, len(l1UserTxs))
|
||||
require.Equal(t, 1, len(l1UserFutureTxs))
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
// no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed
|
||||
// at the next batch once the L1UserTx of CreateAccount B is processed,
|
||||
// despite that there is an AccountCreationAuth for Account B.
|
||||
assert.Equal(t, 0, len(oL2Txs))
|
||||
assert.Equal(t, 1, len(discardedL2Txs))
|
||||
assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+
|
||||
" discarded at the current batch, as the receiver account does"+
|
||||
" not exist yet, and there is a L1UserTx that will create that"+
|
||||
" account in a future batch.",
|
||||
discardedL2Txs[0].Info)
|
||||
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||
l1UserFutureTxs = []common.L1Tx{}
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
// L2Tx selected as now the L1UserTx of CreateAccount B is processed
|
||||
assert.Equal(t, 1, len(oL2Txs))
|
||||
assert.Equal(t, 0, len(discardedL2Txs))
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
|
||||
// generate a new L2Tx A-B and check that is processed
|
||||
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(poolL2Txs))
|
||||
// add the PoolL2Txs to the l2DB
|
||||
addL2Txs(t, txsel, poolL2Txs)
|
||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||
txsel.GetL1L2TxSelection(tpc, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(oL1UserTxs))
|
||||
require.Equal(t, 0, len(oL1CoordTxs))
|
||||
assert.Equal(t, 1, len(oL2Txs))
|
||||
assert.Equal(t, 0, len(discardedL2Txs))
|
||||
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||
txsel.localAccountsDB.CurrentBatch())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user