Browse Source

Merge pull request #179 from hermeznetwork/feature/api-exits

Impl exit edpoint and refactor pagination
feature/sql-semaphore1
arnau 4 years ago
committed by GitHub
parent
commit
c145147e6e
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 1365 additions and 562 deletions
  1. +566
    -276
      api/api_test.go
  2. +84
    -26
      api/dbtoapistructs.go
  3. +77
    -42
      api/handlers.go
  4. +115
    -36
      api/parsers.go
  5. +22
    -28
      api/parsers_test.go
  6. +196
    -98
      api/swagger.yml
  7. +17
    -0
      common/tx.go
  8. +184
    -24
      db/historydb/historydb.go
  9. +8
    -2
      db/historydb/historydb_test.go
  10. +28
    -0
      db/historydb/views.go
  11. +32
    -25
      db/migrations/0001.sql
  12. +15
    -0
      db/utils.go
  13. +21
    -5
      test/historydb.go

+ 566
- 276
api/api_test.go
File diff suppressed because it is too large
View File


+ 84
- 26
api/dbtoapistructs.go

@ -7,36 +7,52 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/iden3/go-merkletree"
) )
// Commons of the API
type errorMsg struct {
Message string
}
type pagination struct {
TotalItems int `json:"totalItems"`
LastReturnedItem int `json:"lastReturnedItem"`
func bjjToString(bjj *babyjub.PublicKey) string {
pkComp := [32]byte(bjj.Compress())
sum := pkComp[0]
for i := 1; i < len(pkComp); i++ {
sum += pkComp[i]
}
bjjSum := append(pkComp[:], sum)
return "hez:" + base64.RawURLEncoding.EncodeToString(bjjSum)
} }
//nolint:govet this is a temp patch to avoid running the test
type paginationer interface {
GetPagination() pagination
Len() int
func ethAddrToHez(addr ethCommon.Address) string {
return "hez:" + addr.String()
} }
type errorMsg struct {
Message string
func idxToHez(idx common.Idx, tokenSymbol string) string {
return "hez:" + tokenSymbol + ":" + strconv.Itoa(int(idx))
} }
// History Tx related
// History Tx
type historyTxsAPI struct { type historyTxsAPI struct {
Txs []historyTxAPI `json:"transactions"` Txs []historyTxAPI `json:"transactions"`
Pagination pagination `json:"pagination"`
Pagination *db.Pagination `json:"pagination"`
} }
func (htx *historyTxsAPI) GetPagination() pagination { return htx.Pagination }
func (htx *historyTxsAPI) Len() int { return len(htx.Txs) }
func (htx *historyTxsAPI) GetPagination() *db.Pagination {
if htx.Txs[0].ItemID < htx.Txs[len(htx.Txs)-1].ItemID {
htx.Pagination.FirstReturnedItem = htx.Txs[0].ItemID
htx.Pagination.LastReturnedItem = htx.Txs[len(htx.Txs)-1].ItemID
} else {
htx.Pagination.LastReturnedItem = htx.Txs[0].ItemID
htx.Pagination.FirstReturnedItem = htx.Txs[len(htx.Txs)-1].ItemID
}
return htx.Pagination
}
func (htx *historyTxsAPI) Len() int { return len(htx.Txs) }
type l1Info struct { type l1Info struct {
ToForgeL1TxsNum *int64 `json:"toForgeL1TransactionsNum"` ToForgeL1TxsNum *int64 `json:"toForgeL1TransactionsNum"`
@ -57,6 +73,7 @@ type l2Info struct {
type historyTxAPI struct { type historyTxAPI struct {
IsL1 string `json:"L1orL2"` IsL1 string `json:"L1orL2"`
TxID string `json:"id"` TxID string `json:"id"`
ItemID int `json:"itemId"`
Type common.TxType `json:"type"` Type common.TxType `json:"type"`
Position int `json:"position"` Position int `json:"position"`
FromIdx *string `json:"fromAccountIndex"` FromIdx *string `json:"fromAccountIndex"`
@ -75,6 +92,7 @@ func historyTxsToAPI(dbTxs []historydb.HistoryTx) []historyTxAPI {
for i := 0; i < len(dbTxs); i++ { for i := 0; i < len(dbTxs); i++ {
apiTx := historyTxAPI{ apiTx := historyTxAPI{
TxID: dbTxs[i].TxID.String(), TxID: dbTxs[i].TxID.String(),
ItemID: dbTxs[i].ItemID,
Type: dbTxs[i].Type, Type: dbTxs[i].Type,
Position: dbTxs[i].Position, Position: dbTxs[i].Position,
ToIdx: idxToHez(dbTxs[i].ToIdx, dbTxs[i].TokenSymbol), ToIdx: idxToHez(dbTxs[i].ToIdx, dbTxs[i].TokenSymbol),
@ -124,20 +142,60 @@ func historyTxsToAPI(dbTxs []historydb.HistoryTx) []historyTxAPI {
return apiTxs return apiTxs
} }
func bjjToString(bjj *babyjub.PublicKey) string {
pkComp := [32]byte(bjj.Compress())
sum := pkComp[0]
for i := 1; i < len(pkComp); i++ {
sum += pkComp[i]
}
bjjSum := append(pkComp[:], sum)
return "hez:" + base64.RawURLEncoding.EncodeToString(bjjSum)
// Exit
type exitsAPI struct {
Exits []exitAPI `json:"exits"`
Pagination *db.Pagination `json:"pagination"`
} }
func ethAddrToHez(addr ethCommon.Address) string {
return "hez:" + addr.String()
func (e *exitsAPI) GetPagination() *db.Pagination {
if e.Exits[0].ItemID < e.Exits[len(e.Exits)-1].ItemID {
e.Pagination.FirstReturnedItem = e.Exits[0].ItemID
e.Pagination.LastReturnedItem = e.Exits[len(e.Exits)-1].ItemID
} else {
e.Pagination.LastReturnedItem = e.Exits[0].ItemID
e.Pagination.FirstReturnedItem = e.Exits[len(e.Exits)-1].ItemID
}
return e.Pagination
}
func (e *exitsAPI) Len() int { return len(e.Exits) }
type exitAPI struct {
ItemID int `json:"itemId"`
BatchNum common.BatchNum `json:"batchNum"`
AccountIdx string `json:"accountIndex"`
MerkleProof *merkletree.CircomVerifierProof `json:"merkleProof"`
Balance string `json:"balance"`
InstantWithdrawn *int64 `json:"instantWithdrawn"`
DelayedWithdrawRequest *int64 `json:"delayedWithdrawRequest"`
DelayedWithdrawn *int64 `json:"delayedWithdrawn"`
Token historydb.TokenRead `json:"token"`
} }
func idxToHez(idx common.Idx, tokenSymbol string) string {
return "hez:" + tokenSymbol + ":" + strconv.Itoa(int(idx))
func historyExitsToAPI(dbExits []historydb.HistoryExit) []exitAPI {
apiExits := []exitAPI{}
for i := 0; i < len(dbExits); i++ {
apiExits = append(apiExits, exitAPI{
ItemID: dbExits[i].ItemID,
BatchNum: dbExits[i].BatchNum,
AccountIdx: idxToHez(dbExits[i].AccountIdx, dbExits[i].TokenSymbol),
MerkleProof: dbExits[i].MerkleProof,
Balance: dbExits[i].Balance.String(),
InstantWithdrawn: dbExits[i].InstantWithdrawn,
DelayedWithdrawRequest: dbExits[i].DelayedWithdrawRequest,
DelayedWithdrawn: dbExits[i].DelayedWithdrawn,
Token: historydb.TokenRead{
TokenID: dbExits[i].TokenID,
EthBlockNum: dbExits[i].TokenEthBlockNum,
EthAddr: dbExits[i].TokenEthAddr,
Name: dbExits[i].TokenName,
Symbol: dbExits[i].TokenSymbol,
Decimals: dbExits[i].TokenDecimals,
USD: dbExits[i].TokenUSD,
USDUpdate: dbExits[i].TokenUSDUpdate,
},
})
}
return apiExits
} }

+ 77
- 42
api/handlers.go

@ -2,23 +2,25 @@ package api
import ( import (
"database/sql" "database/sql"
"errors"
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb"
) )
// maxLimit is the max permited items to be returned in paginated responses
const maxLimit uint = 2049
const (
// maxLimit is the max permited items to be returned in paginated responses
maxLimit uint = 2049
// dfltLast indicates how paginated endpoints use the query param last if not provided
const dfltLast = false
// dfltOrder indicates how paginated endpoints are ordered if not specified
dfltOrder = historydb.OrderAsc
// dfltLimit indicates the limit of returned items in paginated responses if the query param limit is not provided
const dfltLimit uint = 20
// dfltLimit indicates the limit of returned items in paginated responses if the query param limit is not provided
dfltLimit uint = 20
// 2^32 -1
const maxUint32 = 4294967295
// 2^32 -1
maxUint32 = 4294967295
)
func postAccountCreationAuth(c *gin.Context) { func postAccountCreationAuth(c *gin.Context) {
@ -45,45 +47,71 @@ func getAccount(c *gin.Context) {
} }
func getExits(c *gin.Context) { func getExits(c *gin.Context) {
}
func getExit(c *gin.Context) {
}
func getHistoryTxs(c *gin.Context) {
// Get query parameters // Get query parameters
// TokenID
tokenID, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
// Account filters
tokenID, addr, bjj, idx, err := parseAccountFilters(c)
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
// Hez Eth addr
addr, err := parseQueryHezEthAddr(c)
// BatchNum
batchNum, err := parseQueryUint("batchNum", nil, 0, maxUint32, c)
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
// BJJ
bjj, err := parseQueryBJJ(c)
// Pagination
fromItem, order, limit, err := parsePagination(c)
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
if addr != nil && bjj != nil {
retBadReq(errors.New("bjj and hermezEthereumAddress params are incompatible"), c)
// Fetch exits from historyDB
exits, pagination, err := h.GetExits(
addr, bjj, tokenID, idx, batchNum, fromItem, limit, order,
)
if err != nil {
retSQLErr(err, c)
return
}
// Build succesfull response
apiExits := historyExitsToAPI(exits)
c.JSON(http.StatusOK, &exitsAPI{
Exits: apiExits,
Pagination: pagination,
})
}
func getExit(c *gin.Context) {
// Get batchNum and accountIndex
batchNum, err := parseParamUint("batchNum", nil, 0, maxUint32, c)
if err != nil {
retBadReq(err, c)
return return
} }
// Idx
idx, err := parseIdx(c)
idx, err := parseParamIdx(c)
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
if idx != nil && (addr != nil || bjj != nil || tokenID != nil) {
retBadReq(errors.New("accountIndex is incompatible with BJJ, hermezEthereumAddress and tokenId"), c)
// Fetch tx from historyDB
exit, err := h.GetExit(batchNum, idx)
if err != nil {
retSQLErr(err, c)
return
}
apiExits := historyExitsToAPI([]historydb.HistoryExit{*exit})
// Build succesfull response
c.JSON(http.StatusOK, apiExits[0])
}
func getHistoryTxs(c *gin.Context) {
// Get query parameters
tokenID, addr, bjj, idx, err := parseAccountFilters(c)
if err != nil {
retBadReq(err, c)
return return
} }
// BatchNum // BatchNum
@ -99,15 +127,15 @@ func getHistoryTxs(c *gin.Context) {
return return
} }
// Pagination // Pagination
offset, last, limit, err := parsePagination(c)
fromItem, order, limit, err := parsePagination(c)
if err != nil { if err != nil {
retBadReq(err, c) retBadReq(err, c)
return return
} }
// Fetch txs from historyDB // Fetch txs from historyDB
txs, totalItems, err := h.GetHistoryTxs(
addr, bjj, tokenID, idx, batchNum, txType, offset, limit, *last,
txs, pagination, err := h.GetHistoryTxs(
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
) )
if err != nil { if err != nil {
retSQLErr(err, c) retSQLErr(err, c)
@ -116,21 +144,28 @@ func getHistoryTxs(c *gin.Context) {
// Build succesfull response // Build succesfull response
apiTxs := historyTxsToAPI(txs) apiTxs := historyTxsToAPI(txs)
lastRet := int(*offset) + len(apiTxs) - 1
if *last {
lastRet = totalItems - 1
}
c.JSON(http.StatusOK, &historyTxsAPI{ c.JSON(http.StatusOK, &historyTxsAPI{
Txs: apiTxs,
Pagination: pagination{
TotalItems: totalItems,
LastReturnedItem: lastRet,
},
Txs: apiTxs,
Pagination: pagination,
}) })
} }
func getHistoryTx(c *gin.Context) { func getHistoryTx(c *gin.Context) {
// Get TxID
txID, err := parseParamTxID(c)
if err != nil {
retBadReq(err, c)
return
}
// Fetch tx from historyDB
tx, err := h.GetHistoryTx(txID)
if err != nil {
retSQLErr(err, c)
return
}
apiTxs := historyTxsToAPI([]historydb.HistoryTx{*tx})
// Build succesfull response
c.JSON(http.StatusOK, apiTxs[0])
} }
func getBatches(c *gin.Context) { func getBatches(c *gin.Context) {

+ 115
- 36
api/parsers.go

@ -9,56 +9,49 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// Query parsers
type querier interface { type querier interface {
Query(string) string Query(string) string
} }
func parsePagination(c querier) (*uint, *bool, *uint, error) {
// Offset
offset := new(uint)
*offset = 0
offset, err := parseQueryUint("offset", offset, 0, maxUint32, c)
if err != nil {
return nil, nil, nil, err
}
// Last
last := new(bool)
*last = dfltLast
last, err = parseQueryBool("last", last, c)
func parsePagination(c querier) (fromItem *uint, order string, limit *uint, err error) {
// FromItem
fromItem, err = parseQueryUint("fromItem", nil, 0, maxUint32, c)
if err != nil { if err != nil {
return nil, nil, nil, err
return nil, "", nil, err
} }
if *last && (offset != nil && *offset > 0) {
return nil, nil, nil, errors.New(
"last and offset are incompatible, provide only one of them",
// Order
order = dfltOrder
const orderName = "order"
orderStr := c.Query(orderName)
if orderStr != "" && !(orderStr == historydb.OrderAsc || historydb.OrderDesc == orderStr) {
return nil, "", nil, errors.New(
"order must have the value " + historydb.OrderAsc + " or " + historydb.OrderDesc,
) )
} }
if orderStr == historydb.OrderAsc {
order = historydb.OrderAsc
} else if orderStr == historydb.OrderDesc {
order = historydb.OrderDesc
}
// Limit // Limit
limit := new(uint)
limit = new(uint)
*limit = dfltLimit *limit = dfltLimit
limit, err = parseQueryUint("limit", limit, 1, maxLimit, c) limit, err = parseQueryUint("limit", limit, 1, maxLimit, c)
if err != nil { if err != nil {
return nil, nil, nil, err
return nil, "", nil, err
} }
return offset, last, limit, nil
return fromItem, order, limit, nil
} }
func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009 res may be not overwriten func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009 res may be not overwriten
str := c.Query(name) str := c.Query(name)
if str != "" {
resInt, err := strconv.Atoi(str)
if err != nil || resInt < 0 || resInt < int(min) || resInt > int(max) {
return nil, fmt.Errorf(
"Inavlid %s. Must be an integer within the range [%d, %d]",
name, min, max)
}
res := uint(resInt)
return &res, nil
}
return dflt, nil
return stringToUint(str, name, dflt, min, max)
} }
func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009 res may be not overwriten func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009 res may be not overwriten
@ -183,19 +176,105 @@ func parseQueryTxType(c querier) (*common.TxType, error) {
) )
} }
func parseIdx(c querier) (*uint, error) {
func parseIdx(c querier) (*common.Idx, error) {
const name = "accountIndex" const name = "accountIndex"
addrStr := c.Query(name)
if addrStr == "" {
idxStr := c.Query(name)
return stringToIdx(idxStr, name)
}
func parseAccountFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.PublicKey, *common.Idx, error) {
// TokenID
tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c)
if err != nil {
return nil, nil, nil, nil, err
}
var tokenID *common.TokenID
if tid != nil {
tokenID = new(common.TokenID)
*tokenID = common.TokenID(*tid)
}
// Hez Eth addr
addr, err := parseQueryHezEthAddr(c)
if err != nil {
return nil, nil, nil, nil, err
}
// BJJ
bjj, err := parseQueryBJJ(c)
if err != nil {
return nil, nil, nil, nil, err
}
if addr != nil && bjj != nil {
return nil, nil, nil, nil,
errors.New("bjj and hermezEthereumAddress params are incompatible")
}
// Idx
idx, err := parseIdx(c)
if err != nil {
return nil, nil, nil, nil, err
}
if idx != nil && (addr != nil || bjj != nil || tokenID != nil) {
return nil, nil, nil, nil,
errors.New("accountIndex is incompatible with BJJ, hermezEthereumAddress and tokenId")
}
return tokenID, addr, bjj, idx, nil
}
// Param parsers
type paramer interface {
Param(string) string
}
func parseParamTxID(c paramer) (common.TxID, error) {
const name = "id"
txIDStr := c.Param(name)
if txIDStr == "" {
return common.TxID{}, fmt.Errorf("%s is required", name)
}
txID, err := common.NewTxIDFromString(txIDStr)
if err != nil {
return common.TxID{}, fmt.Errorf("invalid %s", name)
}
return txID, nil
}
func parseParamIdx(c paramer) (*common.Idx, error) {
const name = "accountIndex"
idxStr := c.Param(name)
return stringToIdx(idxStr, name)
}
func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009 res may be not overwriten
str := c.Param(name)
return stringToUint(str, name, dflt, min, max)
}
func stringToIdx(idxStr, name string) (*common.Idx, error) {
if idxStr == "" {
return nil, nil return nil, nil
} }
splitted := strings.Split(addrStr, ":")
splitted := strings.Split(idxStr, ":")
const expectedLen = 3 const expectedLen = 3
if len(splitted) != expectedLen {
if len(splitted) != expectedLen || splitted[0] != "hez" {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"invalid %s, must follow this: hez:<tokenSymbol>:index", name) "invalid %s, must follow this: hez:<tokenSymbol>:index", name)
} }
// TODO: check that the tokenSymbol match the token related to the account index
idxInt, err := strconv.Atoi(splitted[2]) idxInt, err := strconv.Atoi(splitted[2])
idx := uint(idxInt)
idx := common.Idx(idxInt)
return &idx, err return &idx, err
} }
func stringToUint(uintStr, name string, dflt *uint, min, max uint) (*uint, error) {
if uintStr != "" {
resInt, err := strconv.Atoi(uintStr)
if err != nil || resInt < 0 || resInt < int(min) || resInt > int(max) {
return nil, fmt.Errorf(
"Inavlid %s. Must be an integer within the range [%d, %d]",
name, min, max)
}
res := uint(resInt)
return &res, nil
}
return dflt, nil
}

+ 22
- 28
api/parsers_test.go

@ -111,52 +111,46 @@ func TestParseQueryBool(t *testing.T) {
func TestParsePagination(t *testing.T) { func TestParsePagination(t *testing.T) {
c := &queryParser{} c := &queryParser{}
c.m = make(map[string]string) c.m = make(map[string]string)
// Offset out of range
c.m["offset"] = "-1"
// fromItem out of range
c.m["fromItem"] = "-1"
_, _, _, err := parsePagination(c) _, _, _, err := parsePagination(c)
assert.Error(t, err) assert.Error(t, err)
c.m["offset"] = strconv.Itoa(maxUint32 + 1)
c.m["fromItem"] = strconv.Itoa(maxUint32 + 1)
_, _, _, err = parsePagination(c) _, _, _, err = parsePagination(c)
assert.Error(t, err) assert.Error(t, err)
c.m["offset"] = ""
// Limit out of range
c.m["limit"] = "0"
c.m["fromItem"] = ""
// Bad order
c.m["order"] = "0"
_, _, _, err = parsePagination(c) _, _, _, err = parsePagination(c)
assert.Error(t, err) assert.Error(t, err)
c.m["limit"] = strconv.Itoa(int(maxLimit) + 1)
_, _, _, err = parsePagination(c)
assert.Error(t, err)
c.m["limit"] = ""
// Last and offset
c.m["offset"] = "1"
c.m["last"] = "true"
c.m["order"] = strconv.Itoa(int(maxLimit) + 1)
_, _, _, err = parsePagination(c) _, _, _, err = parsePagination(c)
assert.Error(t, err) assert.Error(t, err)
// Default // Default
c.m["offset"] = ""
c.m["last"] = ""
c.m["fromItem"] = ""
c.m["order"] = ""
c.m["limit"] = "" c.m["limit"] = ""
offset, last, limit, err := parsePagination(c)
fromItem, order, limit, err := parsePagination(c)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 0, int(*offset))
assert.Equal(t, dfltLast, *last)
assert.Nil(t, fromItem)
assert.Equal(t, dfltOrder, order)
assert.Equal(t, dfltLimit, *limit) assert.Equal(t, dfltLimit, *limit)
// Correct // Correct
c.m["offset"] = ""
c.m["last"] = "true"
c.m["fromItem"] = ""
c.m["order"] = "ASC"
c.m["limit"] = "25" c.m["limit"] = "25"
offset, last, limit, err = parsePagination(c)
fromItem, order, limit, err = parsePagination(c)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 0, int(*offset))
assert.True(t, *last)
assert.Nil(t, fromItem)
assert.Equal(t, "ASC", order)
assert.Equal(t, 25, int(*limit)) assert.Equal(t, 25, int(*limit))
c.m["offset"] = "25"
c.m["last"] = "false"
c.m["fromItem"] = "25"
c.m["order"] = "DESC"
c.m["limit"] = "50" c.m["limit"] = "50"
offset, last, limit, err = parsePagination(c)
fromItem, order, limit, err = parsePagination(c)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 25, int(*offset))
assert.False(t, *last)
assert.Equal(t, 25, int(*fromItem))
assert.Equal(t, "DESC", order)
assert.Equal(t, 50, int(*limit)) assert.Equal(t, 50, int(*limit))
} }

+ 196
- 98
api/swagger.yml

@ -7,31 +7,27 @@ info:
* Explorer: List transactions, slots, batches, ... * Explorer: List transactions, slots, batches, ...
* Exchange integrations * Exchange integrations
### Pagination
#### Usage
All the endpoints that return a list of undefined size use pagination. Unless the opposite is explicitly said. All the endpoints that return a list of undefined size use pagination. Unless the opposite is explicitly said.
All the retunred items are ordered by ascending chronological order.
This may not be trivial to deduce as the atributes used to order are not timestamps but the protocol ensures that those atributes follow the mentioned chronological order.
Each endpoint description clarify this in the `offset` description.
In order to use pagination, three query parameters are used:
* `fromItem`: indicates the first item to be returned. In general, this parameter shouldn't be provided in the first call to the endpoint, and use the `itemId` of the last returned item (+/-) 1, if the order is (ascending/descending).
* `order`: all pginated items are ordered chronologicaly. However the specific fields to guarantee this order depend on each endpoint. For this purpose, `itemId` is used (itemId follows ascending chronological order except for unforged L1 user transactions). If the parameter is not provided, ascending order will be used by default.
* `limit`: maximum amount of items to include in each response. Default is 20, maximum 2049.
Responses for those endpoint will always include a `pagination` object. This object includes the total amount of items that the endpoint will return at a given time with the given filters. Apart from that, it also includes the `itemId` of the last and first items that will be returned (not in a single response but within the total items). These two properties can be used to know when to stop querying.
The response of the calls to these endpoints will always include a `pagination` object that includes `totalItems` and `lastReturnedItem`.
To iterate over the items the following query parameters are used:
- `offset`: Indicates the first item that will be returned. Defaul 0. Incompatible with `last`.
- `limit`: Indicates the maximum number of returned items. Default 20. Maximum 2049.
- `last`: When true the last `limit` items are returned. Default false. Incompatible with `offset`.
#### Reorgs and safetyness
Iterate items in ascending chronological order:
Since all the items are ordered chronologicaly, there are no safety problems when fetching items in ascending order, except for reorgs (more on this later).
On the other hand, when iterating in descending order, new items will be added at the beginning. This doesn't cause any safety problem, but to get those new items, it's necessary to start queryng without the `fromItem` set to `pagination.lastItem`.
To handle reorgs, the `itemId` can be used since it will change. This is important since other identifiers may be the same but with different content. As an example, if the batch 424 get's reorged, it will be deleted, but eventualy, a new batch 424 will appear with potentialy different content.
1. Call the endpoint with no `offset` nor `last`.
2. Call the endpoint with `offset=<lastReturnedItem + 1>` until `lastReturnedItem == totalItems - 1`.
### Signatures
Iterate items in descending chronological order:
1. Call the endpoint with `last`.
2. Call the endpoint with `offset=<min(0, lastReturnedItem - 2*limit)>`. Once the `calculated offset == 0`, it will be known that that call will return the first item and therefore no subsequent calls need to be done.
If the `totalItems` change while iterating, it means that new items have been added at the end of the list. To fetch this items, use the following: `offset=<first received lastReturnedItem + 1>`, and from there iterate as decribed in *Iterate items in ascending chronological order*.
**Note:** The returned list will alway be in ascending chronlogical order, so the returned arrays must be iterated from end to start in order to achieve reverse chronological order.
**Note:** Pagination safety can be affected by Ethereum reorgs. In most of the cases this means that the last page can be changed, but older items should be safe.
The POST endpoint require to be signed using BabyJubJub or Ethereum keys. TODO: add references to libraries / examples / ...
version: "0.0.1" version: "0.0.1"
title: Hermez Network API title: Hermez Network API
@ -164,20 +160,22 @@ paths:
type: string type: string
description: Comma separated list of token identifiers. description: Comma separated list of token identifiers.
example: "3,87,91" example: "3,87,91"
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: accounts will be ordered by increasing account index.
- Default first item: the first account to be returned will be the one that has the smallest account index.
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Accounts will be ordered by increasing account index.
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -258,6 +256,12 @@ paths:
description: Get exit information. This information is required to perform a withdraw. description: Get exit information. This information is required to perform a withdraw.
operationId: getExits operationId: getExits
parameters: parameters:
- name: tokenId
in: query
required: false
description: Only get exits of specific token
schema:
$ref: '#/components/schemas/TokenId'
- name: hezEthereumAddress - name: hezEthereumAddress
in: query in: query
description: Get exits associated to a Ethereum address. Incompatible with query `BJJ` and `accountIndex`. description: Get exits associated to a Ethereum address. Incompatible with query `BJJ` and `accountIndex`.
@ -272,7 +276,7 @@ paths:
$ref: '#/components/schemas/BJJ' $ref: '#/components/schemas/BJJ'
- name: accountIndex - name: accountIndex
in: query in: query
description: Get exits associated to a specific account. Incompatible with queries `hezEthereumAddress` and `BJJ`.
description: Get exits associated to a specific account. Incompatible with queries `tokenId`, `hezEthereumAddress` and `BJJ`.
required: false required: false
schema: schema:
$ref: '#/components/schemas/AccountIndex' $ref: '#/components/schemas/AccountIndex'
@ -282,20 +286,22 @@ paths:
required: false required: false
schema: schema:
$ref: '#/components/schemas/BatchNum' $ref: '#/components/schemas/BatchNum'
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: exits will be ordered by increasing (batchNum, accountIndex).
- Default first item: the first exit to be returned will be the one that has the smallest (baychNum, accountIndex).
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Exits will be ordered by increasing (batchNum, accountIndex).
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -459,7 +465,7 @@ paths:
description: >- description: >-
Get historical transactions. This endpoint will return all the different types of transactions except for: Get historical transactions. This endpoint will return all the different types of transactions except for:
- Transactions that are still in the transaction pool of any coordinator. These transactions can be fetched using `GET /transactions-pool/{id}`. - Transactions that are still in the transaction pool of any coordinator. These transactions can be fetched using `GET /transactions-pool/{id}`.
- L1 transactions that have not been forged yet. These transactions can be fetched using `GET /transactions-history/{id}`.
- L1 transactions sent by users that have not been forged yet. These transactions can be fetched using `GET /transactions-history/{id}`.
operationId: getHistoryTxs operationId: getHistoryTxs
parameters: parameters:
- name: tokenId - name: tokenId
@ -498,20 +504,22 @@ paths:
description: Only get transactions of a specific type. description: Only get transactions of a specific type.
schema: schema:
$ref: '#/components/schemas/TransactionType' $ref: '#/components/schemas/TransactionType'
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: History transactions will be ordered by increasing (batchNum, position).
- Default first item: the first transaction to be returned will be the one that has the smallest (batchNum, position).
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. History transactions will be ordered by (batchNum, position).
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -612,20 +620,22 @@ paths:
description: Include only batches forged by `forgerAddr` description: Include only batches forged by `forgerAddr`
schema: schema:
$ref: '#/components/schemas/EthereumAddress' $ref: '#/components/schemas/EthereumAddress'
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: batches will be ordered by increasing `batchNum`.
- Default first item: the first batch to be returned will be the one that has the smallest `batchNum`.
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Batches will be ordered by increasing `batchNum`.
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -773,20 +783,22 @@ paths:
description: If set to true, only include slots whose auction has finished. description: If set to true, only include slots whose auction has finished.
schema: schema:
type: boolean type: boolean
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: slots will be ordered by increasing `slotNum`.
- Default first item: the first slot to be returned will be the one that has the smallest `slotNum`.
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Slots will be ordered by increasing `slotNum`.
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -880,20 +892,22 @@ paths:
required: false required: false
schema: schema:
$ref: '#/components/schemas/EthereumAddress' $ref: '#/components/schemas/EthereumAddress'
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: bids will be ordered by increasing (slotNum, bidValue)`.
- Default first item: the first bid to be returned will be the one that has the smallest (slotNum, bidValue).
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Bids will be ordered by increasing (slotNum, bidValue)`.
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -1031,20 +1045,22 @@ paths:
description: Include token(s) by their names (or a substring of the name). description: Include token(s) by their names (or a substring of the name).
schema: schema:
type: string type: string
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: tokens will be ordered by increasing tokenID.
- Default first item: the first token to be returned will be the one that has the smallest tokenID.
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Tokens will be ordered by increasing tokenID.
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -1146,20 +1162,22 @@ paths:
description: Get information about coordinators. description: Get information about coordinators.
operationId: getCoordinators operationId: getCoordinators
parameters: parameters:
- name: offset
- name: fromItem
in: query in: query
required: false required: false
description: |
- Order: coordinators will be ordered by increasing (ethereumBlock, forgerAddr).
- Default first item: the first token to be returned will be the one that has the smallest (ethereumBlock, forgerAddr).
description: Indicates the desired first item (using the itemId property) to be included in the response.
schema: schema:
type: number type: number
- name: last
- name: order
in: query in: query
required: false required: false
description: Get the last page.
description: Order of the returned items. Coordinators will be ordered by increasing (ethereumBlock, forgerAddr).
schema: schema:
type: boolean
type: string
default: ASC
enum:
- ASC
- DESC
- name: limit - name: limit
in: query in: query
required: false required: false
@ -1228,13 +1246,16 @@ paths:
$ref: '#/components/schemas/Error500' $ref: '#/components/schemas/Error500'
components: components:
schemas: schemas:
ItemId:
type: integer
description: Position of the item in the DB. This is useful for pagination, but has nothing to do with the protocol.
PostPoolL2Transaction: PostPoolL2Transaction:
type: object type: object
properties: properties:
id: id:
$ref: '#/components/schemas/TransactionId' $ref: '#/components/schemas/TransactionId'
type: type:
$ref: '#/components/schemas/TransactionType'
$ref: '#/components/schemas/TransactionTypeL2'
tokenId: tokenId:
$ref: '#/components/schemas/TokenId' $ref: '#/components/schemas/TokenId'
fromAccountIndex: fromAccountIndex:
@ -1341,7 +1362,7 @@ components:
id: id:
$ref: '#/components/schemas/TransactionId' $ref: '#/components/schemas/TransactionId'
type: type:
$ref: '#/components/schemas/TransactionType'
$ref: '#/components/schemas/TransactionTypeL2'
fromAccountIndex: fromAccountIndex:
$ref: '#/components/schemas/AccountIndex' $ref: '#/components/schemas/AccountIndex'
toAccountIndex: toAccountIndex:
@ -1480,6 +1501,14 @@ components:
- ForceExit - ForceExit
- TransferToEthAddr - TransferToEthAddr
- TransferToBJJ - TransferToBJJ
TransactionTypeL2:
type: string
description: Type of transaction.
enum:
- Exit
- Transfer
- TransferToEthAddr
- TransferToBJJ
TokenId: TokenId:
type: integer type: integer
description: Identifier of a token registered in the network. description: Identifier of a token registered in the network.
@ -1556,6 +1585,8 @@ components:
- L2 - L2
id: id:
$ref: '#/components/schemas/TransactionId' $ref: '#/components/schemas/TransactionId'
itemId:
$ref: '#/components/schemas/ItemId'
type: type:
$ref: '#/components/schemas/TransactionType' $ref: '#/components/schemas/TransactionType'
position: position:
@ -1655,6 +1686,7 @@ components:
required: required:
- L1orL2 - L1orL2
- id - id
- itemId
- type - type
- position - position
- fromAccountIndex - fromAccountIndex
@ -1943,29 +1975,87 @@ components:
- example: 7394 - example: 7394
accountIndex: accountIndex:
$ref: '#/components/schemas/AccountIndex' $ref: '#/components/schemas/AccountIndex'
itemId:
$ref: '#/components/schemas/ItemId'
merkleProof: merkleProof:
type: string
type: object
description: Existence proof of a leaf in a given Merkle Root. Encoded as hexadecimal string. description: Existence proof of a leaf in a given Merkle Root. Encoded as hexadecimal string.
example: "0x347089321de8971320489793a823470918fffeab"
properties:
Root:
type: array
items:
type: integer
Siblings:
type: array
items:
type: integer
OldKey:
type: array
items:
type: integer
OldValue:
type: array
items:
type: integer
IsOld0:
type: boolean
Key:
type: array
items:
type: integer
Value:
type: array
items:
type: integer
Fnc:
type: integer
required:
- Root
- Siblings
- OldKey
- OldValue
- IsOld0
- Key
- Value
- Fnc
additionalProperties: false
example: {"Root":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Siblings":[0,1,2],"OldKey":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"OldValue":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"IsOld0":true,"Key":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Value":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Fnc":0}
balance: balance:
$ref: '#/components/schemas/BigInt' $ref: '#/components/schemas/BigInt'
instantWithdrawn: instantWithdrawn:
allOf:
- $ref: '#/components/schemas/EthBlockNum'
- description: Block in which the exit balance was instantly withdrawn. Null indicates that an instant withdrawn hasn't been performed.
- example: 74747363
type: integer
description: Block in which the exit balance was instantly withdrawn. Null indicates that an instant withdrawn hasn't been performed.
minimum: 0
maximum: 1.84467440737096e+19
example: 74747363
nullable: true
delayedWithdrawRequest: delayedWithdrawRequest:
allOf:
- $ref: '#/components/schemas/EthBlockNum'
- description: Block in which the exit balance was requested to delay withdraw. Null indicates that a delay withdraw hasn't been performed.
- example: null
type: integer
description: Block in which the exit balance was requested to delay withdraw. Null indicates that a delay withdraw hasn't been performed.
minimum: 0
maximum: 1.84467440737096e+19
example: null
nullable: true
delayedWithdrawn: delayedWithdrawn:
allOf:
- $ref: '#/components/schemas/EthBlockNum'
- description: Block in which the exit balance was delayed withdrawn after a delay withdraw request. Null indicates that a delay withdraw hasn't been performed.
- example: null
type: integer
description: Block in which the exit balance was delayed withdrawn after a delay withdraw request. Null indicates that a delay withdraw hasn't been performed.
minimum: 0
maximum: 1.84467440737096e+19
example: null
nullable: true
token: token:
$ref: '#/components/schemas/Token' $ref: '#/components/schemas/Token'
required:
- batchNum
- accountIndex
- itemId
- merkleProof
- balance
- instantWithdrawn
- delayedWithdrawRequest
- delayedWithdrawn
- token
additionalProperties: false
Exits: Exits:
type: object type: object
properties: properties:
@ -1975,7 +2065,11 @@ components:
items: items:
$ref: '#/components/schemas/Exit' $ref: '#/components/schemas/Exit'
pagination: pagination:
$ref: '#/components/schemas/PaginationInfo'
$ref: '#/components/schemas/PaginationInfo'
required:
- exits
- pagination
additionalProperties: false
Account: Account:
type: object type: object
description: State tree leaf. It contains balance and nonce of an account. description: State tree leaf. It contains balance and nonce of an account.
@ -2217,10 +2311,14 @@ components:
type: integer type: integer
description: Amount of items that the endpoint can return given the filters and the current state of the database. description: Amount of items that the endpoint can return given the filters and the current state of the database.
example: 2048 example: 2048
lastReturnedItem:
firstItem:
type: integer
description: The smallest itemId that the endpoint will return with the given filters.
example: 50
lastItem:
type: integer type: integer
description: Index of the last returned item. Useful to query next items.
example: 439
description: The greatest itemId that the endpoint will return with the given filters.
example: 2130
Config: Config:
type: object type: object
description: Configuration parameters of the different smart contracts that power the Hermez network. description: Configuration parameters of the different smart contracts that power the Hermez network.

+ 17
- 0
common/tx.go

@ -3,8 +3,10 @@ package common
import ( import (
"database/sql/driver" "database/sql/driver"
"encoding/hex" "encoding/hex"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"strings"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
@ -56,6 +58,21 @@ func (txid TxID) String() string {
return "0x" + hex.EncodeToString(txid[:]) return "0x" + hex.EncodeToString(txid[:])
} }
// NewTxIDFromString returns a string hexadecimal representation of the TxID
func NewTxIDFromString(idStr string) (TxID, error) {
txid := TxID{}
idStr = strings.TrimPrefix(idStr, "0x")
decoded, err := hex.DecodeString(idStr)
if err != nil {
return TxID{}, err
}
if len(decoded) != TxIDLen {
return txid, errors.New("Invalid idStr")
}
copy(txid[:], decoded)
return txid, nil
}
// TxType is a string that represents the type of a Hermez network transaction // TxType is a string that represents the type of a Hermez network transaction
type TxType string type TxType string

+ 184
- 24
db/historydb/historydb.go

@ -18,6 +18,13 @@ import (
"github.com/russross/meddler" "github.com/russross/meddler"
) )
const (
// OrderAsc indicates ascending order when using pagination
OrderAsc = "ASC"
// OrderDesc indicates descending order when using pagination
OrderDesc = "DESC"
)
// TODO(Edu): Document here how HistoryDB is kept consistent // TODO(Edu): Document here how HistoryDB is kept consistent
// HistoryDB persist the historic of the rollup // HistoryDB persist the historic of the rollup
@ -445,25 +452,47 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
// return db.SlicePtrsToSlice(txs).([]common.Tx), err // return db.SlicePtrsToSlice(txs).([]common.Tx), err
// } // }
// GetHistoryTx returns a tx from the DB given a TxID
func (hdb *HistoryDB) GetHistoryTx(txID common.TxID) (*HistoryTx, error) {
tx := &HistoryTx{}
err := meddler.QueryRow(
hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.amount_usd,
tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin,
tx.from_eth_addr, tx.from_bjj, tx.load_amount,
tx.load_amount_usd, tx.fee, tx.fee_usd, tx.nonce,
token.token_id, token.eth_block_num AS token_block,
token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
token.usd_update, block.timestamp
FROM tx INNER JOIN token ON tx.token_id = token.token_id
INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE tx.id = $1;`, txID,
)
return tx, err
}
// GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct // GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct
// and pagination info
func (hdb *HistoryDB) GetHistoryTxs( func (hdb *HistoryDB) GetHistoryTxs(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKey, ethAddr *ethCommon.Address, bjj *babyjub.PublicKey,
tokenID, idx, batchNum *uint, txType *common.TxType,
offset, limit *uint, last bool,
) ([]HistoryTx, int, error) {
tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType,
fromItem, limit *uint, order string,
) ([]HistoryTx, *db.Pagination, error) {
if ethAddr != nil && bjj != nil { if ethAddr != nil && bjj != nil {
return nil, 0, errors.New("ethAddr and bjj are incompatible")
return nil, nil, errors.New("ethAddr and bjj are incompatible")
} }
var query string var query string
var args []interface{} var args []interface{}
queryStr := `SELECT tx.is_l1, tx.id, tx.type, tx.position, tx.from_idx, tx.to_idx,
tx.amount, tx.token_id, tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num,
tx.user_origin, tx.from_eth_addr, tx.from_bjj, tx.load_amount, tx.fee, tx.nonce,
queryStr := `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.amount_usd,
tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin,
tx.from_eth_addr, tx.from_bjj, tx.load_amount,
tx.load_amount_usd, tx.fee, tx.fee_usd, tx.nonce,
token.token_id, token.eth_block_num AS token_block, token.token_id, token.eth_block_num AS token_block,
token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
token.usd_update, block.timestamp, count(*) OVER() AS total_items
FROM tx
INNER JOIN token ON tx.token_id = token.token_id
token.usd_update, block.timestamp, count(*) OVER() AS total_items,
MIN(tx.item_id) OVER() AS first_item, MAX(tx.item_id) OVER() AS last_item
FROM tx INNER JOIN token ON tx.token_id = token.token_id
INNER JOIN block ON tx.eth_block_num = block.eth_block_num ` INNER JOIN block ON tx.eth_block_num = block.eth_block_num `
// Apply filters // Apply filters
nextIsAnd := false nextIsAnd := false
@ -523,33 +552,164 @@ func (hdb *HistoryDB) GetHistoryTxs(
} }
queryStr += "tx.type = ? " queryStr += "tx.type = ? "
args = append(args, txType) args = append(args, txType)
// nextIsAnd = true
nextIsAnd = true
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
if order == OrderAsc {
queryStr += "tx.item_id >= ? "
} else {
queryStr += "tx.item_id <= ? "
}
args = append(args, fromItem)
nextIsAnd = true
} }
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "tx.batch_num IS NOT NULL "
// pagination // pagination
if last {
queryStr += "ORDER BY (batch_num, position) DESC NULLS FIRST "
queryStr += "ORDER BY tx.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else { } else {
queryStr += "ORDER BY (batch_num, position) ASC NULLS LAST "
queryStr += fmt.Sprintf("OFFSET %d ", *offset)
queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr) query = hdb.db.Rebind(queryStr)
// log.Debug(query)
log.Debug(query)
txsPtrs := []*HistoryTx{} txsPtrs := []*HistoryTx{}
if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil {
return nil, 0, err
return nil, nil, err
} }
txs := db.SlicePtrsToSlice(txsPtrs).([]HistoryTx) txs := db.SlicePtrsToSlice(txsPtrs).([]HistoryTx)
if len(txs) == 0 { if len(txs) == 0 {
return nil, 0, sql.ErrNoRows
} else if last {
tmp := []HistoryTx{}
for i := len(txs) - 1; i >= 0; i-- {
tmp = append(tmp, txs[i])
return nil, nil, sql.ErrNoRows
}
return txs, &db.Pagination{
TotalItems: txs[0].TotalItems,
FirstItem: txs[0].FirstItem,
LastItem: txs[0].LastItem,
}, nil
}
// GetExit returns a exit from the DB
func (hdb *HistoryDB) GetExit(batchNum *uint, idx *common.Idx) (*HistoryExit, error) {
exit := &HistoryExit{}
err := meddler.QueryRow(
hdb.db, exit, `SELECT exit_tree.*, token.token_id, token.eth_block_num AS token_block,
token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update
FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx
INNER JOIN token ON account.token_id = token.token_id
WHERE exit_tree.batch_num = $1 AND exit_tree.account_idx = $2;`, batchNum, idx,
)
return exit, err
}
// GetExits returns a list of exits from the DB and pagination info
func (hdb *HistoryDB) GetExits(
ethAddr *ethCommon.Address, bjj *babyjub.PublicKey,
tokenID *common.TokenID, idx *common.Idx, batchNum *uint,
fromItem, limit *uint, order string,
) ([]HistoryExit, *db.Pagination, error) {
if ethAddr != nil && bjj != nil {
return nil, nil, errors.New("ethAddr and bjj are incompatible")
}
var query string
var args []interface{}
queryStr := `SELECT exit_tree.*, token.token_id, token.eth_block_num AS token_block,
token.eth_addr, token.name, token.symbol, token.decimals, token.usd,
token.usd_update, COUNT(*) OVER() AS total_items, MIN(exit_tree.item_id) OVER() AS first_item, MAX(exit_tree.item_id) OVER() AS last_item
FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx
INNER JOIN token ON account.token_id = token.token_id `
// Apply filters
nextIsAnd := false
// ethAddr filter
if ethAddr != nil {
queryStr += "WHERE account.eth_addr = ? "
nextIsAnd = true
args = append(args, ethAddr)
} else if bjj != nil { // bjj filter
queryStr += "WHERE account.bjj = ? "
nextIsAnd = true
args = append(args, bjj)
}
// tokenID filter
if tokenID != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "account.token_id = ? "
args = append(args, tokenID)
nextIsAnd = true
}
// idx filter
if idx != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "exit_tree.account_idx = ? "
args = append(args, idx)
nextIsAnd = true
}
// batchNum filter
if batchNum != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
}
queryStr += "exit_tree.batch_num = ? "
args = append(args, batchNum)
nextIsAnd = true
}
if fromItem != nil {
if nextIsAnd {
queryStr += "AND "
} else {
queryStr += "WHERE "
} }
txs = tmp
if order == OrderAsc {
queryStr += "exit_tree.item_id >= ? "
} else {
queryStr += "exit_tree.item_id <= ? "
}
args = append(args, fromItem)
// nextIsAnd = true
}
// pagination
queryStr += "ORDER BY exit_tree.item_id "
if order == OrderAsc {
queryStr += " ASC "
} else {
queryStr += " DESC "
}
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr)
// log.Debug(query)
exits := []*HistoryExit{}
if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil {
return nil, nil, err
}
if len(exits) == 0 {
return nil, nil, sql.ErrNoRows
} }
return txs, txs[0].TotalItems, nil
return db.SlicePtrsToSlice(exits).([]HistoryExit), &db.Pagination{
TotalItems: exits[0].TotalItems,
FirstItem: exits[0].FirstItem,
LastItem: exits[0].LastItem,
}, nil
} }
// // GetTx returns a tx from the DB // // GetTx returns a tx from the DB

+ 8
- 2
db/historydb/historydb_test.go

@ -213,6 +213,7 @@ func TestTxs(t *testing.T) {
/* /*
Uncomment once the transaction generation is fixed Uncomment once the transaction generation is fixed
!! test that batches that forge user L1s !!
!! Missing tests to check that historic USD is not set if USDUpdate is too old (24h) !! !! Missing tests to check that historic USD is not set if USDUpdate is too old (24h) !!
// Generate fake L1 txs // Generate fake L1 txs
@ -333,9 +334,14 @@ func TestExitTree(t *testing.T) {
blocks := setTestBlocks(0, 10) blocks := setTestBlocks(0, 10)
batches := test.GenBatches(nBatches, blocks) batches := test.GenBatches(nBatches, blocks)
err := historyDB.AddBatches(batches) err := historyDB.AddBatches(batches)
const nTokens = 50
tokens := test.GenTokens(nTokens, blocks)
assert.NoError(t, historyDB.AddTokens(tokens))
assert.NoError(t, err) assert.NoError(t, err)
exitTree := test.GenExitTree(nBatches)
const nAccounts = 3
accs := test.GenAccounts(nAccounts, 0, tokens, nil, nil, batches)
assert.NoError(t, historyDB.AddAccounts(accs))
exitTree := test.GenExitTree(nBatches, batches, accs)
err = historyDB.AddExitTree(exitTree) err = historyDB.AddExitTree(exitTree)
assert.NoError(t, err) assert.NoError(t, err)
} }

+ 28
- 0
db/historydb/views.go

@ -7,6 +7,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/iden3/go-merkletree"
) )
// HistoryTx is a representation of a generic Tx with additional information // HistoryTx is a representation of a generic Tx with additional information
@ -15,6 +16,7 @@ type HistoryTx struct {
// Generic // Generic
IsL1 bool `meddler:"is_l1"` IsL1 bool `meddler:"is_l1"`
TxID common.TxID `meddler:"id"` TxID common.TxID `meddler:"id"`
ItemID int `meddler:"item_id"`
Type common.TxType `meddler:"type"` Type common.TxType `meddler:"type"`
Position int `meddler:"position"` Position int `meddler:"position"`
FromIdx *common.Idx `meddler:"from_idx"` FromIdx *common.Idx `meddler:"from_idx"`
@ -38,6 +40,8 @@ type HistoryTx struct {
// API extras // API extras
Timestamp time.Time `meddler:"timestamp,utctime"` Timestamp time.Time `meddler:"timestamp,utctime"`
TotalItems int `meddler:"total_items"` TotalItems int `meddler:"total_items"`
FirstItem int `meddler:"first_item"`
LastItem int `meddler:"last_item"`
TokenID common.TokenID `meddler:"token_id"` TokenID common.TokenID `meddler:"token_id"`
TokenEthBlockNum int64 `meddler:"token_block"` TokenEthBlockNum int64 `meddler:"token_block"`
TokenEthAddr ethCommon.Address `meddler:"eth_addr"` TokenEthAddr ethCommon.Address `meddler:"eth_addr"`
@ -86,3 +90,27 @@ type TokenRead struct {
USD *float64 `json:"USD" meddler:"usd"` USD *float64 `json:"USD" meddler:"usd"`
USDUpdate *time.Time `json:"fiatUpdate" meddler:"usd_update,utctime"` USDUpdate *time.Time `json:"fiatUpdate" meddler:"usd_update,utctime"`
} }
// HistoryExit is a representation of a exit with additional information
// required by the API, and extracted by joining token table
type HistoryExit struct {
ItemID int `meddler:"item_id"`
BatchNum common.BatchNum `meddler:"batch_num"`
AccountIdx common.Idx `meddler:"account_idx"`
MerkleProof *merkletree.CircomVerifierProof `meddler:"merkle_proof,json"`
Balance *big.Int `meddler:"balance,bigint"`
InstantWithdrawn *int64 `meddler:"instant_withdrawn"`
DelayedWithdrawRequest *int64 `meddler:"delayed_withdraw_request"`
DelayedWithdrawn *int64 `meddler:"delayed_withdrawn"`
TotalItems int `meddler:"total_items"`
FirstItem int `meddler:"first_item"`
LastItem int `meddler:"last_item"`
TokenID common.TokenID `meddler:"token_id"`
TokenEthBlockNum int64 `meddler:"token_block"`
TokenEthAddr ethCommon.Address `meddler:"eth_addr"`
TokenName string `meddler:"name"`
TokenSymbol string `meddler:"symbol"`
TokenDecimals uint64 `meddler:"decimals"`
TokenUSD *float64 `meddler:"usd"`
TokenUSDUpdate *time.Time `meddler:"usd_update"`
}

+ 32
- 25
db/migrations/0001.sql

@ -28,17 +28,6 @@ CREATE TABLE batch (
total_fees_usd NUMERIC total_fees_usd NUMERIC
); );
CREATE TABLE exit_tree (
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
account_idx BIGINT,
merkle_proof BYTEA NOT NULL,
balance BYTEA NOT NULL,
instant_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL,
delayed_withdraw_request BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL,
delayed_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL,
PRIMARY KEY (batch_num, account_idx)
);
CREATE TABLE bid ( CREATE TABLE bid (
slot_num BIGINT NOT NULL, slot_num BIGINT NOT NULL,
bid_value BYTEA NOT NULL, bid_value BYTEA NOT NULL,
@ -58,6 +47,25 @@ CREATE TABLE token (
usd_update TIMESTAMP WITHOUT TIME ZONE usd_update TIMESTAMP WITHOUT TIME ZONE
); );
CREATE TABLE account (
idx BIGINT PRIMARY KEY,
token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE,
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
bjj BYTEA NOT NULL,
eth_addr BYTEA NOT NULL
);
CREATE TABLE exit_tree (
item_id SERIAL PRIMARY KEY,
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
account_idx BIGINT REFERENCES account (idx) ON DELETE CASCADE,
merkle_proof BYTEA NOT NULL,
balance BYTEA NOT NULL,
instant_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL,
delayed_withdraw_request BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL,
delayed_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL
);
-- +migrate StatementBegin -- +migrate StatementBegin
CREATE FUNCTION set_token_usd_update() CREATE FUNCTION set_token_usd_update()
RETURNS TRIGGER RETURNS TRIGGER
@ -75,10 +83,13 @@ LANGUAGE plpgsql;
CREATE TRIGGER trigger_token_usd_update BEFORE UPDATE OR INSERT ON token CREATE TRIGGER trigger_token_usd_update BEFORE UPDATE OR INSERT ON token
FOR EACH ROW EXECUTE PROCEDURE set_token_usd_update(); FOR EACH ROW EXECUTE PROCEDURE set_token_usd_update();
CREATE SEQUENCE tx_item_id;
CREATE TABLE tx ( CREATE TABLE tx (
-- Generic TX -- Generic TX
item_id INTEGER PRIMARY KEY DEFAULT nextval('tx_item_id'),
is_l1 BOOLEAN NOT NULL, is_l1 BOOLEAN NOT NULL,
id BYTEA PRIMARY KEY,
id BYTEA,
type VARCHAR(40) NOT NULL, type VARCHAR(40) NOT NULL,
position INT NOT NULL, position INT NOT NULL,
from_idx BIGINT, from_idx BIGINT,
@ -103,8 +114,6 @@ CREATE TABLE tx (
nonce BIGINT nonce BIGINT
); );
CREATE INDEX tx_order ON tx (batch_num, position);
-- +migrate StatementBegin -- +migrate StatementBegin
CREATE FUNCTION fee_percentage(NUMERIC) CREATE FUNCTION fee_percentage(NUMERIC)
RETURNS NUMERIC RETURNS NUMERIC
@ -412,9 +421,10 @@ BEGIN
usd / POWER(10, decimals), usd_update, timestamp FROM token INNER JOIN block on token.eth_block_num = block.eth_block_num WHERE token_id = NEW.token_id; usd / POWER(10, decimals), usd_update, timestamp FROM token INNER JOIN block on token.eth_block_num = block.eth_block_num WHERE token_id = NEW.token_id;
IF _tx_timestamp - interval '24 hours' < _usd_update AND _tx_timestamp + interval '24 hours' > _usd_update THEN IF _tx_timestamp - interval '24 hours' < _usd_update AND _tx_timestamp + interval '24 hours' > _usd_update THEN
NEW."amount_usd" = (SELECT _value * NEW.amount_f); NEW."amount_usd" = (SELECT _value * NEW.amount_f);
NEW."load_amount_usd" = (SELECT _value * NEW.load_amount_f);
IF NOT NEW.is_l1 THEN IF NOT NEW.is_l1 THEN
NEW."fee_usd" = (SELECT NEW."amount_usd" * fee_percentage(NEW.fee::NUMERIC)); NEW."fee_usd" = (SELECT NEW."amount_usd" * fee_percentage(NEW.fee::NUMERIC));
ELSE
NEW."load_amount_usd" = (SELECT _value * NEW.load_amount_f);
END IF; END IF;
END IF; END IF;
RETURN NEW; RETURN NEW;
@ -433,8 +443,13 @@ $BODY$
BEGIN BEGIN
IF NEW.forge_l1_txs_num IS NOT NULL THEN IF NEW.forge_l1_txs_num IS NOT NULL THEN
UPDATE tx UPDATE tx
SET batch_num = NEW.batch_num
WHERE user_origin AND NEW.forge_l1_txs_num = to_forge_l1_txs_num;
SET item_id = nextval('tx_item_id'), batch_num = NEW.batch_num
WHERE id IN (
SELECT id FROM tx
WHERE user_origin AND NEW.forge_l1_txs_num = to_forge_l1_txs_num
ORDER BY position
FOR UPDATE
);
END IF; END IF;
RETURN NEW; RETURN NEW;
END; END;
@ -444,14 +459,6 @@ LANGUAGE plpgsql;
CREATE TRIGGER trigger_forge_l1_txs AFTER INSERT ON batch CREATE TRIGGER trigger_forge_l1_txs AFTER INSERT ON batch
FOR EACH ROW EXECUTE PROCEDURE forge_l1_user_txs(); FOR EACH ROW EXECUTE PROCEDURE forge_l1_user_txs();
CREATE TABLE account (
idx BIGINT PRIMARY KEY,
token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE,
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
bjj BYTEA NOT NULL,
eth_addr BYTEA NOT NULL
);
CREATE TABLE rollup_vars ( CREATE TABLE rollup_vars (
eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE, eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE,
forge_l1_timeout BYTEA NOT NULL, forge_l1_timeout BYTEA NOT NULL,

+ 15
- 0
db/utils.go

@ -177,3 +177,18 @@ func SlicePtrsToSlice(slice interface{}) interface{} {
} }
return res.Interface() return res.Interface()
} }
// Pagination give information on the items of a query
type Pagination struct {
TotalItems int `json:"totalItems"`
FirstItem int `json:"firstItem"`
LastItem int `json:"lastItem"`
FirstReturnedItem int `json:"-"`
LastReturnedItem int `json:"-"`
}
// Paginationer is an interface that allows getting pagination info on any struct
type Paginationer interface {
GetPagination() *Pagination
Len() int
}

+ 21
- 5
test/historydb.go

@ -68,7 +68,7 @@ func GenBatches(nBatches int, blocks []common.Block) []common.Batch {
} }
if i%2 == 0 { if i%2 == 0 {
toForge := new(int64) toForge := new(int64)
*toForge = int64(i)
*toForge = int64(i + 1)
batch.ForgeL1TxsNum = toForge batch.ForgeL1TxsNum = toForge
} }
batches = append(batches, batch) batches = append(batches, batch)
@ -142,7 +142,9 @@ func GenL1Txs(
panic(err) panic(err)
} }
tx = *nTx tx = *nTx
if batches[i%len(batches)].ForgeL1TxsNum != nil {
if !tx.UserOrigin {
tx.BatchNum = &batches[i%len(batches)].BatchNum
} else if batches[i%len(batches)].ForgeL1TxsNum != nil {
// Add already forged txs // Add already forged txs
tx.BatchNum = &batches[i%len(batches)].BatchNum tx.BatchNum = &batches[i%len(batches)].BatchNum
setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs) setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs)
@ -332,15 +334,15 @@ func GenBids(nBids int, blocks []common.Block, coords []common.Coordinator) []co
// GenExitTree generates an exitTree (as an array of Exits) // GenExitTree generates an exitTree (as an array of Exits)
//nolint:gomnd //nolint:gomnd
func GenExitTree(n int) []common.ExitInfo {
func GenExitTree(n int, batches []common.Batch, accounts []common.Account) []common.ExitInfo {
exitTree := make([]common.ExitInfo, n) exitTree := make([]common.ExitInfo, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
exitTree[i] = common.ExitInfo{ exitTree[i] = common.ExitInfo{
BatchNum: common.BatchNum(i + 1),
BatchNum: batches[i%len(batches)].BatchNum,
InstantWithdrawn: nil, InstantWithdrawn: nil,
DelayedWithdrawRequest: nil, DelayedWithdrawRequest: nil,
DelayedWithdrawn: nil, DelayedWithdrawn: nil,
AccountIdx: common.Idx(i * 10),
AccountIdx: accounts[i%len(accounts)].Idx,
MerkleProof: &merkletree.CircomVerifierProof{ MerkleProof: &merkletree.CircomVerifierProof{
Root: &merkletree.Hash{byte(i), byte(i + 1)}, Root: &merkletree.Hash{byte(i), byte(i + 1)},
Siblings: []*big.Int{ Siblings: []*big.Int{
@ -356,6 +358,20 @@ func GenExitTree(n int) []common.ExitInfo {
}, },
Balance: big.NewInt(int64(i) * 1000), Balance: big.NewInt(int64(i) * 1000),
} }
if i%2 == 0 {
instant := new(int64)
*instant = int64(batches[(i+1)%len(batches)].BatchNum)
exitTree[i].InstantWithdrawn = instant
} else if i%3 == 0 {
delayedReq := new(int64)
*delayedReq = int64(batches[(i+1)%len(batches)].BatchNum)
exitTree[i].DelayedWithdrawRequest = delayedReq
if i%9 == 0 {
delayed := new(int64)
*delayed = int64(batches[(i+2)%len(batches)].BatchNum)
exitTree[i].DelayedWithdrawn = delayed
}
}
} }
return exitTree return exitTree
} }

Loading…
Cancel
Save