mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Impl api get batch(es)
This commit is contained in:
@@ -40,7 +40,7 @@ const apiURL = "http://localhost" + apiPort + "/"
|
|||||||
type testCommon struct {
|
type testCommon struct {
|
||||||
blocks []common.Block
|
blocks []common.Block
|
||||||
tokens []tokenAPI
|
tokens []tokenAPI
|
||||||
batches []common.Batch
|
batches []testBatch
|
||||||
coordinators []coordinatorAPI
|
coordinators []coordinatorAPI
|
||||||
usrAddr string
|
usrAddr string
|
||||||
usrBjj string
|
usrBjj string
|
||||||
@@ -651,7 +651,7 @@ func TestMain(m *testing.M) {
|
|||||||
tc = testCommon{
|
tc = testCommon{
|
||||||
blocks: blocks,
|
blocks: blocks,
|
||||||
tokens: tokensUSD,
|
tokens: tokensUSD,
|
||||||
batches: batches,
|
batches: genTestBatches(blocks, batches),
|
||||||
coordinators: apiCoordinators,
|
coordinators: apiCoordinators,
|
||||||
usrAddr: ethAddrToHez(usrAddr),
|
usrAddr: ethAddrToHez(usrAddr),
|
||||||
usrBjj: bjjToString(usrBjj),
|
usrBjj: bjjToString(usrBjj),
|
||||||
@@ -1208,7 +1208,6 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
jsonTxBytes, err := json.Marshal(tx)
|
jsonTxBytes, err := json.Marshal(tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
||||||
fmt.Println(string(jsonTxBytes))
|
|
||||||
assert.NoError(
|
assert.NoError(
|
||||||
t, doGoodReq(
|
t, doGoodReq(
|
||||||
"POST",
|
"POST",
|
||||||
|
|||||||
116
api/batch.go
Normal file
116
api/batch.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getBatches(c *gin.Context) {
|
||||||
|
// Get query parameters
|
||||||
|
// minBatchNum
|
||||||
|
minBatchNum, err := parseQueryUint("minBatchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// maxBatchNum
|
||||||
|
maxBatchNum, err := parseQueryUint("maxBatchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// slotNum
|
||||||
|
slotNum, err := parseQueryUint("slotNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// forgerAddr
|
||||||
|
forgerAddr, err := parseQueryEthAddr("forgerAddr", c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// pagination
|
||||||
|
fromItem, order, limit, err := parsePagination(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch batches from historyDB
|
||||||
|
batches, pagination, err := h.GetBatchesAPI(
|
||||||
|
minBatchNum, maxBatchNum, slotNum, forgerAddr, fromItem, limit, order,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build succesfull response
|
||||||
|
type batchesResponse struct {
|
||||||
|
Batches []historydb.BatchAPI `json:"batches"`
|
||||||
|
Pagination *db.Pagination `json:"pagination"`
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, &batchesResponse{
|
||||||
|
Batches: batches,
|
||||||
|
Pagination: pagination,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBatch(c *gin.Context) {
|
||||||
|
// Get batchNum
|
||||||
|
batchNum, err := parseParamUint("batchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if batchNum == nil { // batchNum is required
|
||||||
|
retBadReq(errors.New("Invalid batchNum"), c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch batch from historyDB
|
||||||
|
batch, err := h.GetBatchAPI(common.BatchNum(*batchNum))
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// JSON response
|
||||||
|
c.JSON(http.StatusOK, batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fullBatch struct {
|
||||||
|
Batch *historydb.BatchAPI
|
||||||
|
Txs []historyTxAPI
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFullBatch(c *gin.Context) {
|
||||||
|
// Get batchNum
|
||||||
|
batchNum, err := parseParamUint("batchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if batchNum == nil {
|
||||||
|
retBadReq(errors.New("Invalid batchNum"), c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch batch from historyDB
|
||||||
|
batch, err := h.GetBatchAPI(common.BatchNum(*batchNum))
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch txs from historyDB
|
||||||
|
// TODO
|
||||||
|
txs := []historyTxAPI{}
|
||||||
|
// JSON response
|
||||||
|
c.JSON(http.StatusOK, fullBatch{
|
||||||
|
Batch: batch,
|
||||||
|
Txs: txs,
|
||||||
|
})
|
||||||
|
}
|
||||||
243
api/batch_test.go
Normal file
243
api/batch_test.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testBatch struct {
|
||||||
|
ItemID int `json:"itemId"`
|
||||||
|
BatchNum common.BatchNum `json:"batchNum"`
|
||||||
|
EthBlockNum int64 `json:"ethereumBlockNum"`
|
||||||
|
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
ForgerAddr ethCommon.Address `json:"forgerAddr"`
|
||||||
|
CollectedFees map[common.TokenID]string `json:"collectedFees"`
|
||||||
|
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD"`
|
||||||
|
StateRoot string `json:"stateRoot"`
|
||||||
|
NumAccounts int `json:"numAccounts"`
|
||||||
|
ExitRoot string `json:"exitRoot"`
|
||||||
|
ForgeL1TxsNum *int64 `json:"forgeL1TransactionsNum"`
|
||||||
|
SlotNum int64 `json:"slotNum"`
|
||||||
|
}
|
||||||
|
type testBatchesResponse struct {
|
||||||
|
Batches []testBatch `json:"batches"`
|
||||||
|
Pagination *db.Pagination `json:"pagination"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testBatchesResponse) GetPagination() *db.Pagination {
|
||||||
|
if t.Batches[0].ItemID < t.Batches[len(t.Batches)-1].ItemID {
|
||||||
|
t.Pagination.FirstReturnedItem = t.Batches[0].ItemID
|
||||||
|
t.Pagination.LastReturnedItem = t.Batches[len(t.Batches)-1].ItemID
|
||||||
|
} else {
|
||||||
|
t.Pagination.LastReturnedItem = t.Batches[0].ItemID
|
||||||
|
t.Pagination.FirstReturnedItem = t.Batches[len(t.Batches)-1].ItemID
|
||||||
|
}
|
||||||
|
return t.Pagination
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testBatchesResponse) Len() int {
|
||||||
|
return len(t.Batches)
|
||||||
|
}
|
||||||
|
|
||||||
|
func genTestBatches(blocks []common.Block, cBatches []common.Batch) []testBatch {
|
||||||
|
tBatches := []testBatch{}
|
||||||
|
for _, cBatch := range cBatches {
|
||||||
|
block := common.Block{}
|
||||||
|
found := false
|
||||||
|
for _, b := range blocks {
|
||||||
|
if b.EthBlockNum == cBatch.EthBlockNum {
|
||||||
|
block = b
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
panic("block not found")
|
||||||
|
}
|
||||||
|
collectedFees := make(map[common.TokenID]string)
|
||||||
|
for k, v := range cBatch.CollectedFees {
|
||||||
|
collectedFees[k] = v.String()
|
||||||
|
}
|
||||||
|
tBatch := testBatch{
|
||||||
|
BatchNum: cBatch.BatchNum,
|
||||||
|
EthBlockNum: cBatch.EthBlockNum,
|
||||||
|
EthBlockHash: block.Hash,
|
||||||
|
Timestamp: block.Timestamp,
|
||||||
|
ForgerAddr: cBatch.ForgerAddr,
|
||||||
|
CollectedFees: collectedFees,
|
||||||
|
TotalFeesUSD: cBatch.TotalFeesUSD,
|
||||||
|
StateRoot: cBatch.StateRoot.String(),
|
||||||
|
NumAccounts: cBatch.NumAccounts,
|
||||||
|
ExitRoot: cBatch.ExitRoot.String(),
|
||||||
|
ForgeL1TxsNum: cBatch.ForgeL1TxsNum,
|
||||||
|
SlotNum: cBatch.SlotNum,
|
||||||
|
}
|
||||||
|
tBatches = append(tBatches, tBatch)
|
||||||
|
}
|
||||||
|
return tBatches
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatches(t *testing.T) {
|
||||||
|
endpoint := apiURL + "batches"
|
||||||
|
fetchedBatches := []testBatch{}
|
||||||
|
appendIter := func(intr interface{}) {
|
||||||
|
for i := 0; i < len(intr.(*testBatchesResponse).Batches); i++ {
|
||||||
|
tmp, err := copystructure.Copy(intr.(*testBatchesResponse).Batches[i])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fetchedBatches = append(fetchedBatches, tmp.(testBatch))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Get all (no filters)
|
||||||
|
limit := 3
|
||||||
|
path := fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit)
|
||||||
|
err := doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assertBatches(t, tc.batches, fetchedBatches)
|
||||||
|
|
||||||
|
// minBatchNum
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 2
|
||||||
|
minBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
||||||
|
path = fmt.Sprintf("%s?minBatchNum=%d&limit=%d&fromItem=", endpoint, minBatchNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
minBatchNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].BatchNum > minBatchNum {
|
||||||
|
minBatchNumBatches = append(minBatchNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, minBatchNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// maxBatchNum
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 1
|
||||||
|
maxBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
||||||
|
path = fmt.Sprintf("%s?maxBatchNum=%d&limit=%d&fromItem=", endpoint, maxBatchNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
maxBatchNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].BatchNum < maxBatchNum {
|
||||||
|
maxBatchNumBatches = append(maxBatchNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, maxBatchNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// slotNum
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 5
|
||||||
|
slotNum := tc.batches[len(tc.batches)/2].SlotNum
|
||||||
|
path = fmt.Sprintf("%s?slotNum=%d&limit=%d&fromItem=", endpoint, slotNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
slotNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].SlotNum == slotNum {
|
||||||
|
slotNumBatches = append(slotNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, slotNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// forgerAddr
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 10
|
||||||
|
forgerAddr := tc.batches[len(tc.batches)/2].ForgerAddr
|
||||||
|
path = fmt.Sprintf("%s?forgerAddr=%s&limit=%d&fromItem=", endpoint, forgerAddr.String(), limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
forgerAddrBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].ForgerAddr == forgerAddr {
|
||||||
|
forgerAddrBatches = append(forgerAddrBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, forgerAddrBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// All, in reverse order
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 6
|
||||||
|
path = fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderDesc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
flippedBatches := []testBatch{}
|
||||||
|
for i := len(tc.batches) - 1; i >= 0; i-- {
|
||||||
|
flippedBatches = append(flippedBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
assertBatches(t, flippedBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// Mixed filters
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 1
|
||||||
|
maxBatchNum = tc.batches[len(tc.batches)-len(tc.batches)/4].BatchNum
|
||||||
|
minBatchNum = tc.batches[len(tc.batches)/4].BatchNum
|
||||||
|
path = fmt.Sprintf("%s?minBatchNum=%d&maxBatchNum=%d&limit=%d&fromItem=", endpoint, minBatchNum, maxBatchNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
minMaxBatchNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].BatchNum < maxBatchNum && tc.batches[i].BatchNum > minBatchNum {
|
||||||
|
minMaxBatchNumBatches = append(minMaxBatchNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, minMaxBatchNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// 400
|
||||||
|
// Invalid minBatchNum
|
||||||
|
path = fmt.Sprintf("%s?minBatchNum=%d", endpoint, -2)
|
||||||
|
err = doBadReq("GET", path, nil, 400)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Invalid forgerAddr
|
||||||
|
path = fmt.Sprintf("%s?forgerAddr=%s", endpoint, "0xG0000001")
|
||||||
|
err = doBadReq("GET", path, nil, 400)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// 404
|
||||||
|
path = fmt.Sprintf("%s?slotNum=%d&minBatchNum=%d", endpoint, 1, 25)
|
||||||
|
err = doBadReq("GET", path, nil, 404)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatch(t *testing.T) {
|
||||||
|
endpoint := apiURL + "batches/"
|
||||||
|
for _, batch := range tc.batches {
|
||||||
|
fetchedBatch := testBatch{}
|
||||||
|
assert.NoError(
|
||||||
|
t, doGoodReq(
|
||||||
|
"GET",
|
||||||
|
endpoint+strconv.Itoa(int(batch.BatchNum)),
|
||||||
|
nil, &fetchedBatch,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
assertBatch(t, batch, fetchedBatch)
|
||||||
|
}
|
||||||
|
// 400
|
||||||
|
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
|
||||||
|
// 404
|
||||||
|
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertBatches(t *testing.T, expected, actual []testBatch) {
|
||||||
|
assert.Equal(t, len(expected), len(actual))
|
||||||
|
for i := 0; i < len(expected); i++ {
|
||||||
|
assertBatch(t, expected[i], actual[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertBatch(t *testing.T, expected, actual testBatch) {
|
||||||
|
assert.Equal(t, expected.Timestamp.Unix(), actual.Timestamp.Unix())
|
||||||
|
expected.Timestamp = actual.Timestamp
|
||||||
|
actual.ItemID = expected.ItemID
|
||||||
|
assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
@@ -178,49 +178,6 @@ func getExit(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, apiExits[0])
|
c.JSON(http.StatusOK, apiExits[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHistoryTxs(c *gin.Context) {
|
|
||||||
// Get query parameters
|
|
||||||
tokenID, addr, bjj, idx, err := parseAccountFilters(c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// BatchNum
|
|
||||||
batchNum, err := parseQueryUint("batchNum", nil, 0, maxUint32, c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TxType
|
|
||||||
txType, err := parseQueryTxType(c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Pagination
|
|
||||||
fromItem, order, limit, err := parsePagination(c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch txs from historyDB
|
|
||||||
txs, pagination, err := h.GetHistoryTxs(
|
|
||||||
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
retSQLErr(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build succesfull response
|
|
||||||
apiTxs := historyTxsToAPI(txs)
|
|
||||||
c.JSON(http.StatusOK, &historyTxsAPI{
|
|
||||||
Txs: apiTxs,
|
|
||||||
Pagination: pagination,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHistoryTx(c *gin.Context) {
|
func getHistoryTx(c *gin.Context) {
|
||||||
// Get TxID
|
// Get TxID
|
||||||
txID, err := parseParamTxID(c)
|
txID, err := parseParamTxID(c)
|
||||||
@@ -239,18 +196,6 @@ func getHistoryTx(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, apiTxs[0])
|
c.JSON(http.StatusOK, apiTxs[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBatches(c *gin.Context) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBatch(c *gin.Context) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFullBatch(c *gin.Context) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSlots(c *gin.Context) {
|
func getSlots(c *gin.Context) {
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -349,7 +294,7 @@ func getCoordinators(c *gin.Context) {
|
|||||||
func getCoordinator(c *gin.Context) {
|
func getCoordinator(c *gin.Context) {
|
||||||
// Get bidderAddr
|
// Get bidderAddr
|
||||||
const name = "bidderAddr"
|
const name = "bidderAddr"
|
||||||
bidderAddr, err := parseEthAddr(c, name)
|
bidderAddr, err := parseParamEthAddr(name, c)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
retBadReq(err, c)
|
retBadReq(err, c)
|
||||||
|
|||||||
@@ -311,13 +311,25 @@ func hezStringToBJJ(bjjStr, name string) (*babyjub.PublicKey, error) {
|
|||||||
return bjj, nil
|
return bjj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseEthAddr(c paramer, name string) (*ethCommon.Address, error) {
|
func parseQueryEthAddr(name string, c querier) (*ethCommon.Address, error) {
|
||||||
|
addrStr := c.Query(name)
|
||||||
|
if addrStr == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return parseEthAddr(addrStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseParamEthAddr(name string, c paramer) (*ethCommon.Address, error) {
|
||||||
addrStr := c.Param(name)
|
addrStr := c.Param(name)
|
||||||
if addrStr == "" {
|
if addrStr == "" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
return parseEthAddr(addrStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEthAddr(ethAddrStr string) (*ethCommon.Address, error) {
|
||||||
var addr ethCommon.Address
|
var addr ethCommon.Address
|
||||||
err := addr.UnmarshalText([]byte(addrStr))
|
err := addr.UnmarshalText([]byte(ethAddrStr))
|
||||||
return &addr, err
|
return &addr, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -309,16 +309,16 @@ func TestParseEthAddr(t *testing.T) {
|
|||||||
ethAddr := ethCommon.BigToAddress(big.NewInt(int64(123456)))
|
ethAddr := ethCommon.BigToAddress(big.NewInt(int64(123456)))
|
||||||
// Default
|
// Default
|
||||||
c.m[name] = ""
|
c.m[name] = ""
|
||||||
res, err := parseEthAddr(c, name)
|
res, err := parseQueryEthAddr(name, c)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, res)
|
assert.Nil(t, res)
|
||||||
// Incorrect
|
// Incorrect
|
||||||
c.m[name] = "0x12345678"
|
c.m[name] = "0x12345678"
|
||||||
_, err = parseEthAddr(c, name)
|
_, err = parseQueryEthAddr(name, c)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
// Correct
|
// Correct
|
||||||
c.m[name] = ethAddr.String()
|
c.m[name] = ethAddr.String()
|
||||||
res, err = parseEthAddr(c, name)
|
res, err = parseQueryEthAddr(name, c)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, ethAddr, *res)
|
assert.Equal(t, ethAddr, *res)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -605,13 +605,13 @@ paths:
|
|||||||
- name: minBatchNum
|
- name: minBatchNum
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
description: Include only `batchNum < minBatchNum` batches.
|
description: Include only `batchNum > minBatchNum` batches.
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/BatchNum'
|
$ref: '#/components/schemas/BatchNum'
|
||||||
- name: maxBatchNum
|
- name: maxBatchNum
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
description: Include only `batchNum > maxBatchNum` batches.
|
description: Include only `batchNum < maxBatchNum` batches.
|
||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
- name: slotNum
|
- name: slotNum
|
||||||
@@ -1492,7 +1492,7 @@ components:
|
|||||||
type: string
|
type: string
|
||||||
description: BigInt is an integer encoded as a string for numbers that are very large.
|
description: BigInt is an integer encoded as a string for numbers that are very large.
|
||||||
example: "8708856933496328593"
|
example: "8708856933496328593"
|
||||||
pattern: "^[0-9]$"
|
pattern: "^\\d+$"
|
||||||
FeeSelector:
|
FeeSelector:
|
||||||
type: integer
|
type: integer
|
||||||
description: Index of the fee type to select, more info [here](https://idocs.hermez.io/#/spec/zkrollup/fee-table?id=transaction-fee-table).
|
description: Index of the fee type to select, more info [here](https://idocs.hermez.io/#/spec/zkrollup/fee-table?id=transaction-fee-table).
|
||||||
@@ -1734,22 +1734,19 @@ components:
|
|||||||
description: Token name.
|
description: Token name.
|
||||||
example: "Dai"
|
example: "Dai"
|
||||||
CollectedFees:
|
CollectedFees:
|
||||||
type: array
|
|
||||||
description: Collected fees by the forger of the batch. A maximum of 64 different tokens can be used.
|
|
||||||
items:
|
|
||||||
type: object
|
type: object
|
||||||
properties:
|
description: Collected fees by the forger of the batch, represented by a map of tokenId => amount. A maximum of 64 different tokens can be used.
|
||||||
tokenId:
|
additionalProperties:
|
||||||
$ref: '#/components/schemas/TokenId'
|
type: string
|
||||||
amount:
|
example:
|
||||||
allOf:
|
1234: "425632785672345647"
|
||||||
- $ref: '#/components/schemas/BigInt'
|
4321: "86538967235465432654352"
|
||||||
- description: Ammount of collected tokens
|
|
||||||
- example: "53"
|
|
||||||
Batch:
|
Batch:
|
||||||
type: object
|
type: object
|
||||||
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
||||||
properties:
|
properties:
|
||||||
|
itemId:
|
||||||
|
$ref: '#/components/schemas/ItemId'
|
||||||
batchNum:
|
batchNum:
|
||||||
$ref: '#/components/schemas/BatchNum'
|
$ref: '#/components/schemas/BatchNum'
|
||||||
ethereumBlockNum:
|
ethereumBlockNum:
|
||||||
@@ -1784,12 +1781,27 @@ components:
|
|||||||
- description: Root of the exit Merkle Tree associated to this batch.
|
- description: Root of the exit Merkle Tree associated to this batch.
|
||||||
- example: "2734657026572a8708d883"
|
- example: "2734657026572a8708d883"
|
||||||
forgeL1TransactionsNum:
|
forgeL1TransactionsNum:
|
||||||
allOf:
|
type: integer
|
||||||
- $ref: '#/components/schemas/ToForgeL1TransactionsNum'
|
description: Identifier that corresponds to the group of L1 transactions forged in the current batch.
|
||||||
- description: Identifier that corresponds to the group of L1 transactions forged in the current batch.
|
example: 5
|
||||||
- example: 5
|
nullable: true
|
||||||
slotNum:
|
slotNum:
|
||||||
$ref: '#/components/schemas/SlotNum'
|
$ref: '#/components/schemas/SlotNum'
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- itemId
|
||||||
|
- batchNum
|
||||||
|
- ethereumBlockNum
|
||||||
|
- ethereumBlockHash
|
||||||
|
- timestamp
|
||||||
|
- forgerAddr
|
||||||
|
- collectedFees
|
||||||
|
- historicTotalCollectedFeesUSD
|
||||||
|
- stateRoot
|
||||||
|
- numAccounts
|
||||||
|
- exitRoot
|
||||||
|
- forgeL1TransactionsNum
|
||||||
|
- slotNum
|
||||||
FullBatch:
|
FullBatch:
|
||||||
type: object
|
type: object
|
||||||
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
||||||
@@ -1801,6 +1813,10 @@ components:
|
|||||||
description: List of forged transactions in the batch
|
description: List of forged transactions in the batch
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/HistoryTransaction'
|
$ref: '#/components/schemas/HistoryTransaction'
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- batch
|
||||||
|
- transactions
|
||||||
Hash:
|
Hash:
|
||||||
type: string
|
type: string
|
||||||
description: hashed data
|
description: hashed data
|
||||||
@@ -1821,6 +1837,10 @@ components:
|
|||||||
$ref: '#/components/schemas/Batch'
|
$ref: '#/components/schemas/Batch'
|
||||||
pagination:
|
pagination:
|
||||||
$ref: '#/components/schemas/PaginationInfo'
|
$ref: '#/components/schemas/PaginationInfo'
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- batches
|
||||||
|
- pagination
|
||||||
Coordinator:
|
Coordinator:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|||||||
50
api/txshistory.go
Normal file
50
api/txshistory.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getHistoryTxs(c *gin.Context) {
|
||||||
|
// Get query parameters
|
||||||
|
tokenID, addr, bjj, idx, err := parseAccountFilters(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// BatchNum
|
||||||
|
batchNum, err := parseQueryUint("batchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TxType
|
||||||
|
txType, err := parseQueryTxType(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Pagination
|
||||||
|
fromItem, order, limit, err := parsePagination(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch txs from historyDB
|
||||||
|
txs, pagination, err := h.GetHistoryTxs(
|
||||||
|
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build succesfull response
|
||||||
|
apiTxs := historyTxsToAPI(txs)
|
||||||
|
c.JSON(http.StatusOK, &historyTxsAPI{
|
||||||
|
Txs: apiTxs,
|
||||||
|
Pagination: pagination,
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -71,10 +71,11 @@ func (b BigIntStr) Value() (driver.Value, error) {
|
|||||||
return base64.StdEncoding.EncodeToString(bigInt.Bytes()), nil
|
return base64.StdEncoding.EncodeToString(bigInt.Bytes()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CollectedFees is used to retrieve common.batch.CollectedFee from the DB
|
||||||
type CollectedFees map[common.TokenID]BigIntStr
|
type CollectedFees map[common.TokenID]BigIntStr
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals a json representation of map[common.TokenID]*big.Int
|
||||||
func (c *CollectedFees) UnmarshalJSON(text []byte) error {
|
func (c *CollectedFees) UnmarshalJSON(text []byte) error {
|
||||||
fmt.Println(string(text))
|
|
||||||
bigIntMap := make(map[common.TokenID]*big.Int)
|
bigIntMap := make(map[common.TokenID]*big.Int)
|
||||||
if err := json.Unmarshal(text, &bigIntMap); err != nil {
|
if err := json.Unmarshal(text, &bigIntMap); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -86,19 +87,6 @@ func (c *CollectedFees) UnmarshalJSON(text []byte) error {
|
|||||||
}
|
}
|
||||||
*c = CollectedFees(bStrMap)
|
*c = CollectedFees(bStrMap)
|
||||||
return nil
|
return nil
|
||||||
// fmt.Println(string(text))
|
|
||||||
// *b = BigIntStr(string(text))
|
|
||||||
// return nil
|
|
||||||
// bigInt := &big.Int{}
|
|
||||||
// if err := bigInt.UnmarshalText(text); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// bigIntStr := NewBigIntStr(bigInt)
|
|
||||||
// if bigIntStr == nil {
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
// *b = *bigIntStr
|
|
||||||
// return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez fotmat (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez fotmat (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||||
_ "github.com/mattn/go-sqlite3" // sqlite driver
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
|
_ "github.com/mattn/go-sqlite3" //nolint sqlite driver
|
||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -29,10 +28,10 @@ func TestMain(m *testing.M) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
db, err = sql.Open("sqlite3", dir+"sqlite.db")
|
db, err = sql.Open("sqlite3", dir+"sqlite.db")
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
defer os.RemoveAll(dir) //nolint
|
||||||
schema := `CREATE TABLE test (i BLOB);`
|
schema := `CREATE TABLE test (i BLOB);`
|
||||||
if _, err := db.Exec(schema); err != nil {
|
if _, err := db.Exec(schema); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|||||||
@@ -150,15 +150,113 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBatch return the batch with the given batchNum
|
// GetBatchAPI return the batch with the given batchNum
|
||||||
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (HistoryBatch, error) {
|
func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
var batch *common.Batch
|
batch := &BatchAPI{}
|
||||||
return batch, meddler.QueryRow(
|
return batch, meddler.QueryRow(
|
||||||
hdb.db, &batch,
|
hdb.db, batch,
|
||||||
"SELECT * FROM batch WHERE batch_num == $1;", batchNum,
|
`SELECT batch.*, block.timestamp, block.hash
|
||||||
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
||||||
|
WHERE batch_num = $1;`, batchNum,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBatchesAPI return the batches applying the given filters
|
||||||
|
func (hdb *HistoryDB) GetBatchesAPI(
|
||||||
|
minBatchNum, maxBatchNum, slotNum *uint,
|
||||||
|
forgerAddr *ethCommon.Address,
|
||||||
|
fromItem, limit *uint, order string,
|
||||||
|
) ([]BatchAPI, *db.Pagination, error) {
|
||||||
|
var query string
|
||||||
|
var args []interface{}
|
||||||
|
queryStr := `SELECT batch.*, block.timestamp, block.hash,
|
||||||
|
count(*) OVER() AS total_items, MIN(batch.item_id) OVER() AS first_item,
|
||||||
|
MAX(batch.item_id) OVER() AS last_item
|
||||||
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num `
|
||||||
|
// Apply filters
|
||||||
|
nextIsAnd := false
|
||||||
|
// minBatchNum filter
|
||||||
|
if minBatchNum != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.batch_num > ? "
|
||||||
|
args = append(args, minBatchNum)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// maxBatchNum filter
|
||||||
|
if maxBatchNum != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.batch_num < ? "
|
||||||
|
args = append(args, maxBatchNum)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// slotNum filter
|
||||||
|
if slotNum != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.slot_num = ? "
|
||||||
|
args = append(args, slotNum)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// forgerAddr filter
|
||||||
|
if forgerAddr != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.forger_addr = ? "
|
||||||
|
args = append(args, forgerAddr)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// pagination
|
||||||
|
if fromItem != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
if order == OrderAsc {
|
||||||
|
queryStr += "batch.item_id >= ? "
|
||||||
|
} else {
|
||||||
|
queryStr += "batch.item_id <= ? "
|
||||||
|
}
|
||||||
|
args = append(args, fromItem)
|
||||||
|
}
|
||||||
|
queryStr += "ORDER BY batch.item_id "
|
||||||
|
if order == OrderAsc {
|
||||||
|
queryStr += " ASC "
|
||||||
|
} else {
|
||||||
|
queryStr += " DESC "
|
||||||
|
}
|
||||||
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
|
query = hdb.db.Rebind(queryStr)
|
||||||
|
log.Debug(query)
|
||||||
|
batchPtrs := []*BatchAPI{}
|
||||||
|
if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
|
||||||
|
if len(batches) == 0 {
|
||||||
|
return nil, nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return batches, &db.Pagination{
|
||||||
|
TotalItems: batches[0].TotalItems,
|
||||||
|
FirstItem: batches[0].FirstItem,
|
||||||
|
LastItem: batches[0].LastItem,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to
|
// GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to
|
||||||
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
|
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
|
||||||
var batches []*common.Batch
|
var batches []*common.Batch
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db"
|
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
"github.com/iden3/go-merkletree"
|
"github.com/iden3/go-merkletree"
|
||||||
)
|
)
|
||||||
@@ -133,20 +133,21 @@ type HistoryCoordinator struct {
|
|||||||
LastItem int `meddler:"last_item"`
|
LastItem int `meddler:"last_item"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HistoryBatch is a representation of a batch with additional information
|
// BatchAPI is a representation of a batch with additional information
|
||||||
// required by the API, and extracted by joining block table
|
// required by the API, and extracted by joining block table
|
||||||
type HistoryBatch struct {
|
type BatchAPI struct {
|
||||||
ItemID int `json:"itemId" meddler:"item_id"`
|
ItemID int `json:"itemId" meddler:"item_id"`
|
||||||
BatchNum common.BatchNum `json:"batchNum" meddler:"batch_num"`
|
BatchNum common.BatchNum `json:"batchNum" meddler:"batch_num"`
|
||||||
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
||||||
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash" meddler:"hash"`
|
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash" meddler:"hash"`
|
||||||
Timestamp time.Time `json:"timestamp" meddler:"timestamp,utctime"`
|
Timestamp time.Time `json:"timestamp" meddler:"timestamp,utctime"`
|
||||||
ForgerAddr ethCommon.Address `json:"forgerAddr" meddler:"forger_addr"`
|
ForgerAddr ethCommon.Address `json:"forgerAddr" meddler:"forger_addr"`
|
||||||
CollectedFees map[common.TokenID]db.BigIntStr `json:"collectedFees" meddler:"fees_collected,json"`
|
CollectedFees apitypes.CollectedFees `json:"collectedFees" meddler:"fees_collected,json"`
|
||||||
TotalFeesUSD float64 `json:"historicTotalCollectedFeesUSD" meddler:"total_fees_usd"`
|
// CollectedFees map[common.TokenID]*big.Int `json:"collectedFees" meddler:"fees_collected,json"`
|
||||||
StateRoot db.BigIntStr `json:"stateRoot" meddler:"state_root"`
|
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD" meddler:"total_fees_usd"`
|
||||||
|
StateRoot apitypes.BigIntStr `json:"stateRoot" meddler:"state_root"`
|
||||||
NumAccounts int `json:"numAccounts" meddler:"num_accounts"`
|
NumAccounts int `json:"numAccounts" meddler:"num_accounts"`
|
||||||
ExitRoot db.BigIntStr `json:"exitRoot" meddler:"exit_root"`
|
ExitRoot apitypes.BigIntStr `json:"exitRoot" meddler:"exit_root"`
|
||||||
ForgeL1TxsNum *int64 `json:"forgeL1TransactionsNum" meddler:"forge_l1_txs_num"`
|
ForgeL1TxsNum *int64 `json:"forgeL1TransactionsNum" meddler:"forge_l1_txs_num"`
|
||||||
SlotNum int64 `json:"slotNum" meddler:"slot_num"`
|
SlotNum int64 `json:"slotNum" meddler:"slot_num"`
|
||||||
TotalItems int `json:"-" meddler:"total_items"`
|
TotalItems int `json:"-" meddler:"total_items"`
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ CREATE TABLE coordinator (
|
|||||||
|
|
||||||
CREATE TABLE batch (
|
CREATE TABLE batch (
|
||||||
item_id SERIAL PRIMARY KEY,
|
item_id SERIAL PRIMARY KEY,
|
||||||
batch_num BIGINT NOT NULL,
|
batch_num BIGINT UNIQUE NOT NULL,
|
||||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||||
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
||||||
fees_collected BYTEA NOT NULL,
|
fees_collected BYTEA NOT NULL,
|
||||||
|
|||||||
@@ -192,11 +192,3 @@ type Paginationer interface {
|
|||||||
GetPagination() *Pagination
|
GetPagination() *Pagination
|
||||||
Len() int
|
Len() int
|
||||||
}
|
}
|
||||||
|
|
||||||
// BigIntStr is used to Marshal *big.Int directly into strings
|
|
||||||
type BigIntStr big.Int
|
|
||||||
|
|
||||||
func (b BigIntStr) MarshalText() ([]byte, error) {
|
|
||||||
bigInt := big.Int(b)
|
|
||||||
return []byte((&bigInt).String()), nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -60,10 +60,10 @@ func GenBatches(nBatches int, blocks []common.Block) []common.Batch {
|
|||||||
//nolint:gomnd
|
//nolint:gomnd
|
||||||
ForgerAddr: ethCommon.BigToAddress(big.NewInt(6886723)),
|
ForgerAddr: ethCommon.BigToAddress(big.NewInt(6886723)),
|
||||||
CollectedFees: collectedFees,
|
CollectedFees: collectedFees,
|
||||||
StateRoot: big.NewInt(int64(i) * 5), //nolint:gomnd
|
StateRoot: big.NewInt(int64(i+1) * 5), //nolint:gomnd
|
||||||
//nolint:gomnd
|
//nolint:gomnd
|
||||||
NumAccounts: 30,
|
NumAccounts: 30,
|
||||||
ExitRoot: big.NewInt(int64(i) * 16), //nolint:gomnd
|
ExitRoot: big.NewInt(int64(i+1) * 16), //nolint:gomnd
|
||||||
SlotNum: int64(i),
|
SlotNum: int64(i),
|
||||||
}
|
}
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
|
|||||||
Reference in New Issue
Block a user