mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Merge pull request #226 from hermeznetwork/feature/api-batches
Feature/api batches
This commit is contained in:
@@ -40,7 +40,7 @@ const apiURL = "http://localhost" + apiPort + "/"
|
|||||||
type testCommon struct {
|
type testCommon struct {
|
||||||
blocks []common.Block
|
blocks []common.Block
|
||||||
tokens []tokenAPI
|
tokens []tokenAPI
|
||||||
batches []common.Batch
|
batches []testBatch
|
||||||
coordinators []coordinatorAPI
|
coordinators []coordinatorAPI
|
||||||
usrAddr string
|
usrAddr string
|
||||||
usrBjj string
|
usrBjj string
|
||||||
@@ -651,7 +651,7 @@ func TestMain(m *testing.M) {
|
|||||||
tc = testCommon{
|
tc = testCommon{
|
||||||
blocks: blocks,
|
blocks: blocks,
|
||||||
tokens: tokensUSD,
|
tokens: tokensUSD,
|
||||||
batches: batches,
|
batches: genTestBatches(blocks, batches),
|
||||||
coordinators: apiCoordinators,
|
coordinators: apiCoordinators,
|
||||||
usrAddr: ethAddrToHez(usrAddr),
|
usrAddr: ethAddrToHez(usrAddr),
|
||||||
usrBjj: bjjToString(usrBjj),
|
usrBjj: bjjToString(usrBjj),
|
||||||
@@ -1208,7 +1208,6 @@ func TestPoolTxs(t *testing.T) {
|
|||||||
jsonTxBytes, err := json.Marshal(tx)
|
jsonTxBytes, err := json.Marshal(tx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
jsonTxReader := bytes.NewReader(jsonTxBytes)
|
||||||
fmt.Println(string(jsonTxBytes))
|
|
||||||
assert.NoError(
|
assert.NoError(
|
||||||
t, doGoodReq(
|
t, doGoodReq(
|
||||||
"POST",
|
"POST",
|
||||||
|
|||||||
116
api/batch.go
Normal file
116
api/batch.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getBatches(c *gin.Context) {
|
||||||
|
// Get query parameters
|
||||||
|
// minBatchNum
|
||||||
|
minBatchNum, err := parseQueryUint("minBatchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// maxBatchNum
|
||||||
|
maxBatchNum, err := parseQueryUint("maxBatchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// slotNum
|
||||||
|
slotNum, err := parseQueryUint("slotNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// forgerAddr
|
||||||
|
forgerAddr, err := parseQueryEthAddr("forgerAddr", c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// pagination
|
||||||
|
fromItem, order, limit, err := parsePagination(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch batches from historyDB
|
||||||
|
batches, pagination, err := h.GetBatchesAPI(
|
||||||
|
minBatchNum, maxBatchNum, slotNum, forgerAddr, fromItem, limit, order,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build succesfull response
|
||||||
|
type batchesResponse struct {
|
||||||
|
Batches []historydb.BatchAPI `json:"batches"`
|
||||||
|
Pagination *db.Pagination `json:"pagination"`
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, &batchesResponse{
|
||||||
|
Batches: batches,
|
||||||
|
Pagination: pagination,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBatch(c *gin.Context) {
|
||||||
|
// Get batchNum
|
||||||
|
batchNum, err := parseParamUint("batchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if batchNum == nil { // batchNum is required
|
||||||
|
retBadReq(errors.New("Invalid batchNum"), c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch batch from historyDB
|
||||||
|
batch, err := h.GetBatchAPI(common.BatchNum(*batchNum))
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// JSON response
|
||||||
|
c.JSON(http.StatusOK, batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fullBatch struct {
|
||||||
|
Batch *historydb.BatchAPI
|
||||||
|
Txs []historyTxAPI
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFullBatch(c *gin.Context) {
|
||||||
|
// Get batchNum
|
||||||
|
batchNum, err := parseParamUint("batchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if batchNum == nil {
|
||||||
|
retBadReq(errors.New("Invalid batchNum"), c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch batch from historyDB
|
||||||
|
batch, err := h.GetBatchAPI(common.BatchNum(*batchNum))
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Fetch txs from historyDB
|
||||||
|
// TODO
|
||||||
|
txs := []historyTxAPI{}
|
||||||
|
// JSON response
|
||||||
|
c.JSON(http.StatusOK, fullBatch{
|
||||||
|
Batch: batch,
|
||||||
|
Txs: txs,
|
||||||
|
})
|
||||||
|
}
|
||||||
243
api/batch_test.go
Normal file
243
api/batch_test.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testBatch struct {
|
||||||
|
ItemID int `json:"itemId"`
|
||||||
|
BatchNum common.BatchNum `json:"batchNum"`
|
||||||
|
EthBlockNum int64 `json:"ethereumBlockNum"`
|
||||||
|
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
ForgerAddr ethCommon.Address `json:"forgerAddr"`
|
||||||
|
CollectedFees map[common.TokenID]string `json:"collectedFees"`
|
||||||
|
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD"`
|
||||||
|
StateRoot string `json:"stateRoot"`
|
||||||
|
NumAccounts int `json:"numAccounts"`
|
||||||
|
ExitRoot string `json:"exitRoot"`
|
||||||
|
ForgeL1TxsNum *int64 `json:"forgeL1TransactionsNum"`
|
||||||
|
SlotNum int64 `json:"slotNum"`
|
||||||
|
}
|
||||||
|
type testBatchesResponse struct {
|
||||||
|
Batches []testBatch `json:"batches"`
|
||||||
|
Pagination *db.Pagination `json:"pagination"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testBatchesResponse) GetPagination() *db.Pagination {
|
||||||
|
if t.Batches[0].ItemID < t.Batches[len(t.Batches)-1].ItemID {
|
||||||
|
t.Pagination.FirstReturnedItem = t.Batches[0].ItemID
|
||||||
|
t.Pagination.LastReturnedItem = t.Batches[len(t.Batches)-1].ItemID
|
||||||
|
} else {
|
||||||
|
t.Pagination.LastReturnedItem = t.Batches[0].ItemID
|
||||||
|
t.Pagination.FirstReturnedItem = t.Batches[len(t.Batches)-1].ItemID
|
||||||
|
}
|
||||||
|
return t.Pagination
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testBatchesResponse) Len() int {
|
||||||
|
return len(t.Batches)
|
||||||
|
}
|
||||||
|
|
||||||
|
func genTestBatches(blocks []common.Block, cBatches []common.Batch) []testBatch {
|
||||||
|
tBatches := []testBatch{}
|
||||||
|
for _, cBatch := range cBatches {
|
||||||
|
block := common.Block{}
|
||||||
|
found := false
|
||||||
|
for _, b := range blocks {
|
||||||
|
if b.EthBlockNum == cBatch.EthBlockNum {
|
||||||
|
block = b
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
panic("block not found")
|
||||||
|
}
|
||||||
|
collectedFees := make(map[common.TokenID]string)
|
||||||
|
for k, v := range cBatch.CollectedFees {
|
||||||
|
collectedFees[k] = v.String()
|
||||||
|
}
|
||||||
|
tBatch := testBatch{
|
||||||
|
BatchNum: cBatch.BatchNum,
|
||||||
|
EthBlockNum: cBatch.EthBlockNum,
|
||||||
|
EthBlockHash: block.Hash,
|
||||||
|
Timestamp: block.Timestamp,
|
||||||
|
ForgerAddr: cBatch.ForgerAddr,
|
||||||
|
CollectedFees: collectedFees,
|
||||||
|
TotalFeesUSD: cBatch.TotalFeesUSD,
|
||||||
|
StateRoot: cBatch.StateRoot.String(),
|
||||||
|
NumAccounts: cBatch.NumAccounts,
|
||||||
|
ExitRoot: cBatch.ExitRoot.String(),
|
||||||
|
ForgeL1TxsNum: cBatch.ForgeL1TxsNum,
|
||||||
|
SlotNum: cBatch.SlotNum,
|
||||||
|
}
|
||||||
|
tBatches = append(tBatches, tBatch)
|
||||||
|
}
|
||||||
|
return tBatches
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatches(t *testing.T) {
|
||||||
|
endpoint := apiURL + "batches"
|
||||||
|
fetchedBatches := []testBatch{}
|
||||||
|
appendIter := func(intr interface{}) {
|
||||||
|
for i := 0; i < len(intr.(*testBatchesResponse).Batches); i++ {
|
||||||
|
tmp, err := copystructure.Copy(intr.(*testBatchesResponse).Batches[i])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fetchedBatches = append(fetchedBatches, tmp.(testBatch))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Get all (no filters)
|
||||||
|
limit := 3
|
||||||
|
path := fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit)
|
||||||
|
err := doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assertBatches(t, tc.batches, fetchedBatches)
|
||||||
|
|
||||||
|
// minBatchNum
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 2
|
||||||
|
minBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
||||||
|
path = fmt.Sprintf("%s?minBatchNum=%d&limit=%d&fromItem=", endpoint, minBatchNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
minBatchNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].BatchNum > minBatchNum {
|
||||||
|
minBatchNumBatches = append(minBatchNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, minBatchNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// maxBatchNum
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 1
|
||||||
|
maxBatchNum := tc.batches[len(tc.batches)/2].BatchNum
|
||||||
|
path = fmt.Sprintf("%s?maxBatchNum=%d&limit=%d&fromItem=", endpoint, maxBatchNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
maxBatchNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].BatchNum < maxBatchNum {
|
||||||
|
maxBatchNumBatches = append(maxBatchNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, maxBatchNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// slotNum
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 5
|
||||||
|
slotNum := tc.batches[len(tc.batches)/2].SlotNum
|
||||||
|
path = fmt.Sprintf("%s?slotNum=%d&limit=%d&fromItem=", endpoint, slotNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
slotNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].SlotNum == slotNum {
|
||||||
|
slotNumBatches = append(slotNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, slotNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// forgerAddr
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 10
|
||||||
|
forgerAddr := tc.batches[len(tc.batches)/2].ForgerAddr
|
||||||
|
path = fmt.Sprintf("%s?forgerAddr=%s&limit=%d&fromItem=", endpoint, forgerAddr.String(), limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
forgerAddrBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].ForgerAddr == forgerAddr {
|
||||||
|
forgerAddrBatches = append(forgerAddrBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, forgerAddrBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// All, in reverse order
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 6
|
||||||
|
path = fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderDesc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
flippedBatches := []testBatch{}
|
||||||
|
for i := len(tc.batches) - 1; i >= 0; i-- {
|
||||||
|
flippedBatches = append(flippedBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
assertBatches(t, flippedBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// Mixed filters
|
||||||
|
fetchedBatches = []testBatch{}
|
||||||
|
limit = 1
|
||||||
|
maxBatchNum = tc.batches[len(tc.batches)-len(tc.batches)/4].BatchNum
|
||||||
|
minBatchNum = tc.batches[len(tc.batches)/4].BatchNum
|
||||||
|
path = fmt.Sprintf("%s?minBatchNum=%d&maxBatchNum=%d&limit=%d&fromItem=", endpoint, minBatchNum, maxBatchNum, limit)
|
||||||
|
err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
minMaxBatchNumBatches := []testBatch{}
|
||||||
|
for i := 0; i < len(tc.batches); i++ {
|
||||||
|
if tc.batches[i].BatchNum < maxBatchNum && tc.batches[i].BatchNum > minBatchNum {
|
||||||
|
minMaxBatchNumBatches = append(minMaxBatchNumBatches, tc.batches[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertBatches(t, minMaxBatchNumBatches, fetchedBatches)
|
||||||
|
|
||||||
|
// 400
|
||||||
|
// Invalid minBatchNum
|
||||||
|
path = fmt.Sprintf("%s?minBatchNum=%d", endpoint, -2)
|
||||||
|
err = doBadReq("GET", path, nil, 400)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Invalid forgerAddr
|
||||||
|
path = fmt.Sprintf("%s?forgerAddr=%s", endpoint, "0xG0000001")
|
||||||
|
err = doBadReq("GET", path, nil, 400)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// 404
|
||||||
|
path = fmt.Sprintf("%s?slotNum=%d&minBatchNum=%d", endpoint, 1, 25)
|
||||||
|
err = doBadReq("GET", path, nil, 404)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatch(t *testing.T) {
|
||||||
|
endpoint := apiURL + "batches/"
|
||||||
|
for _, batch := range tc.batches {
|
||||||
|
fetchedBatch := testBatch{}
|
||||||
|
assert.NoError(
|
||||||
|
t, doGoodReq(
|
||||||
|
"GET",
|
||||||
|
endpoint+strconv.Itoa(int(batch.BatchNum)),
|
||||||
|
nil, &fetchedBatch,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
assertBatch(t, batch, fetchedBatch)
|
||||||
|
}
|
||||||
|
// 400
|
||||||
|
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400))
|
||||||
|
// 404
|
||||||
|
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404))
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertBatches(t *testing.T, expected, actual []testBatch) {
|
||||||
|
assert.Equal(t, len(expected), len(actual))
|
||||||
|
for i := 0; i < len(expected); i++ {
|
||||||
|
assertBatch(t, expected[i], actual[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertBatch(t *testing.T, expected, actual testBatch) {
|
||||||
|
assert.Equal(t, expected.Timestamp.Unix(), actual.Timestamp.Unix())
|
||||||
|
expected.Timestamp = actual.Timestamp
|
||||||
|
actual.ItemID = expected.ItemID
|
||||||
|
assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
@@ -178,49 +178,6 @@ func getExit(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, apiExits[0])
|
c.JSON(http.StatusOK, apiExits[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHistoryTxs(c *gin.Context) {
|
|
||||||
// Get query parameters
|
|
||||||
tokenID, addr, bjj, idx, err := parseAccountFilters(c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// BatchNum
|
|
||||||
batchNum, err := parseQueryUint("batchNum", nil, 0, maxUint32, c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TxType
|
|
||||||
txType, err := parseQueryTxType(c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Pagination
|
|
||||||
fromItem, order, limit, err := parsePagination(c)
|
|
||||||
if err != nil {
|
|
||||||
retBadReq(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch txs from historyDB
|
|
||||||
txs, pagination, err := h.GetHistoryTxs(
|
|
||||||
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
retSQLErr(err, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build succesfull response
|
|
||||||
apiTxs := historyTxsToAPI(txs)
|
|
||||||
c.JSON(http.StatusOK, &historyTxsAPI{
|
|
||||||
Txs: apiTxs,
|
|
||||||
Pagination: pagination,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHistoryTx(c *gin.Context) {
|
func getHistoryTx(c *gin.Context) {
|
||||||
// Get TxID
|
// Get TxID
|
||||||
txID, err := parseParamTxID(c)
|
txID, err := parseParamTxID(c)
|
||||||
@@ -239,18 +196,6 @@ func getHistoryTx(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, apiTxs[0])
|
c.JSON(http.StatusOK, apiTxs[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBatches(c *gin.Context) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBatch(c *gin.Context) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFullBatch(c *gin.Context) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSlots(c *gin.Context) {
|
func getSlots(c *gin.Context) {
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -349,7 +294,7 @@ func getCoordinators(c *gin.Context) {
|
|||||||
func getCoordinator(c *gin.Context) {
|
func getCoordinator(c *gin.Context) {
|
||||||
// Get bidderAddr
|
// Get bidderAddr
|
||||||
const name = "bidderAddr"
|
const name = "bidderAddr"
|
||||||
bidderAddr, err := parseEthAddr(c, name)
|
bidderAddr, err := parseParamEthAddr(name, c)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
retBadReq(err, c)
|
retBadReq(err, c)
|
||||||
|
|||||||
@@ -311,13 +311,25 @@ func hezStringToBJJ(bjjStr, name string) (*babyjub.PublicKey, error) {
|
|||||||
return bjj, nil
|
return bjj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseEthAddr(c paramer, name string) (*ethCommon.Address, error) {
|
func parseQueryEthAddr(name string, c querier) (*ethCommon.Address, error) {
|
||||||
|
addrStr := c.Query(name)
|
||||||
|
if addrStr == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return parseEthAddr(addrStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseParamEthAddr(name string, c paramer) (*ethCommon.Address, error) {
|
||||||
addrStr := c.Param(name)
|
addrStr := c.Param(name)
|
||||||
if addrStr == "" {
|
if addrStr == "" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
return parseEthAddr(addrStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEthAddr(ethAddrStr string) (*ethCommon.Address, error) {
|
||||||
var addr ethCommon.Address
|
var addr ethCommon.Address
|
||||||
err := addr.UnmarshalText([]byte(addrStr))
|
err := addr.UnmarshalText([]byte(ethAddrStr))
|
||||||
return &addr, err
|
return &addr, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -309,16 +309,16 @@ func TestParseEthAddr(t *testing.T) {
|
|||||||
ethAddr := ethCommon.BigToAddress(big.NewInt(int64(123456)))
|
ethAddr := ethCommon.BigToAddress(big.NewInt(int64(123456)))
|
||||||
// Default
|
// Default
|
||||||
c.m[name] = ""
|
c.m[name] = ""
|
||||||
res, err := parseEthAddr(c, name)
|
res, err := parseQueryEthAddr(name, c)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, res)
|
assert.Nil(t, res)
|
||||||
// Incorrect
|
// Incorrect
|
||||||
c.m[name] = "0x12345678"
|
c.m[name] = "0x12345678"
|
||||||
_, err = parseEthAddr(c, name)
|
_, err = parseQueryEthAddr(name, c)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
// Correct
|
// Correct
|
||||||
c.m[name] = ethAddr.String()
|
c.m[name] = ethAddr.String()
|
||||||
res, err = parseEthAddr(c, name)
|
res, err = parseQueryEthAddr(name, c)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, ethAddr, *res)
|
assert.Equal(t, ethAddr, *res)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -605,13 +605,13 @@ paths:
|
|||||||
- name: minBatchNum
|
- name: minBatchNum
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
description: Include only `batchNum < minBatchNum` batches.
|
description: Include only `batchNum > minBatchNum` batches.
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/BatchNum'
|
$ref: '#/components/schemas/BatchNum'
|
||||||
- name: maxBatchNum
|
- name: maxBatchNum
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
description: Include only `batchNum > maxBatchNum` batches.
|
description: Include only `batchNum < maxBatchNum` batches.
|
||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
- name: slotNum
|
- name: slotNum
|
||||||
@@ -1492,6 +1492,7 @@ components:
|
|||||||
type: string
|
type: string
|
||||||
description: BigInt is an integer encoded as a string for numbers that are very large.
|
description: BigInt is an integer encoded as a string for numbers that are very large.
|
||||||
example: "8708856933496328593"
|
example: "8708856933496328593"
|
||||||
|
pattern: "^\\d+$"
|
||||||
FeeSelector:
|
FeeSelector:
|
||||||
type: integer
|
type: integer
|
||||||
description: Index of the fee type to select, more info [here](https://idocs.hermez.io/#/spec/zkrollup/fee-table?id=transaction-fee-table).
|
description: Index of the fee type to select, more info [here](https://idocs.hermez.io/#/spec/zkrollup/fee-table?id=transaction-fee-table).
|
||||||
@@ -1733,22 +1734,19 @@ components:
|
|||||||
description: Token name.
|
description: Token name.
|
||||||
example: "Dai"
|
example: "Dai"
|
||||||
CollectedFees:
|
CollectedFees:
|
||||||
type: array
|
|
||||||
description: Collected fees by the forger of the batch. A maximum of 64 different tokens can be used.
|
|
||||||
items:
|
|
||||||
type: object
|
type: object
|
||||||
properties:
|
description: Collected fees by the forger of the batch, represented by a map of tokenId => amount. A maximum of 64 different tokens can be used.
|
||||||
tokenId:
|
additionalProperties:
|
||||||
$ref: '#/components/schemas/TokenId'
|
type: string
|
||||||
amount:
|
example:
|
||||||
allOf:
|
1234: "425632785672345647"
|
||||||
- $ref: '#/components/schemas/BigInt'
|
4321: "86538967235465432654352"
|
||||||
- description: Ammount of collected tokens
|
|
||||||
- example: "53"
|
|
||||||
Batch:
|
Batch:
|
||||||
type: object
|
type: object
|
||||||
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
||||||
properties:
|
properties:
|
||||||
|
itemId:
|
||||||
|
$ref: '#/components/schemas/ItemId'
|
||||||
batchNum:
|
batchNum:
|
||||||
$ref: '#/components/schemas/BatchNum'
|
$ref: '#/components/schemas/BatchNum'
|
||||||
ethereumBlockNum:
|
ethereumBlockNum:
|
||||||
@@ -1783,12 +1781,27 @@ components:
|
|||||||
- description: Root of the exit Merkle Tree associated to this batch.
|
- description: Root of the exit Merkle Tree associated to this batch.
|
||||||
- example: "2734657026572a8708d883"
|
- example: "2734657026572a8708d883"
|
||||||
forgeL1TransactionsNum:
|
forgeL1TransactionsNum:
|
||||||
allOf:
|
type: integer
|
||||||
- $ref: '#/components/schemas/ToForgeL1TransactionsNum'
|
description: Identifier that corresponds to the group of L1 transactions forged in the current batch.
|
||||||
- description: Identifier that corresponds to the group of L1 transactions forged in the current batch.
|
example: 5
|
||||||
- example: 5
|
nullable: true
|
||||||
slotNum:
|
slotNum:
|
||||||
$ref: '#/components/schemas/SlotNum'
|
$ref: '#/components/schemas/SlotNum'
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- itemId
|
||||||
|
- batchNum
|
||||||
|
- ethereumBlockNum
|
||||||
|
- ethereumBlockHash
|
||||||
|
- timestamp
|
||||||
|
- forgerAddr
|
||||||
|
- collectedFees
|
||||||
|
- historicTotalCollectedFeesUSD
|
||||||
|
- stateRoot
|
||||||
|
- numAccounts
|
||||||
|
- exitRoot
|
||||||
|
- forgeL1TransactionsNum
|
||||||
|
- slotNum
|
||||||
FullBatch:
|
FullBatch:
|
||||||
type: object
|
type: object
|
||||||
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
description: Group of transactions forged in a coordinator and sent and validated in Ethereum.
|
||||||
@@ -1800,6 +1813,10 @@ components:
|
|||||||
description: List of forged transactions in the batch
|
description: List of forged transactions in the batch
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/HistoryTransaction'
|
$ref: '#/components/schemas/HistoryTransaction'
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- batch
|
||||||
|
- transactions
|
||||||
Hash:
|
Hash:
|
||||||
type: string
|
type: string
|
||||||
description: hashed data
|
description: hashed data
|
||||||
@@ -1820,6 +1837,10 @@ components:
|
|||||||
$ref: '#/components/schemas/Batch'
|
$ref: '#/components/schemas/Batch'
|
||||||
pagination:
|
pagination:
|
||||||
$ref: '#/components/schemas/PaginationInfo'
|
$ref: '#/components/schemas/PaginationInfo'
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- batches
|
||||||
|
- pagination
|
||||||
Coordinator:
|
Coordinator:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|||||||
50
api/txshistory.go
Normal file
50
api/txshistory.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getHistoryTxs(c *gin.Context) {
|
||||||
|
// Get query parameters
|
||||||
|
tokenID, addr, bjj, idx, err := parseAccountFilters(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// BatchNum
|
||||||
|
batchNum, err := parseQueryUint("batchNum", nil, 0, maxUint32, c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TxType
|
||||||
|
txType, err := parseQueryTxType(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Pagination
|
||||||
|
fromItem, order, limit, err := parsePagination(c)
|
||||||
|
if err != nil {
|
||||||
|
retBadReq(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch txs from historyDB
|
||||||
|
txs, pagination, err := h.GetHistoryTxs(
|
||||||
|
addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
retSQLErr(err, c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build succesfull response
|
||||||
|
apiTxs := historyTxsToAPI(txs)
|
||||||
|
c.JSON(http.StatusOK, &historyTxsAPI{
|
||||||
|
Txs: apiTxs,
|
||||||
|
Pagination: pagination,
|
||||||
|
})
|
||||||
|
}
|
||||||
195
apitypes/apitypes.go
Normal file
195
apitypes/apitypes.go
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
package apitypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigIntStr is used to scan/value *big.Int directly into strings from/to sql DBs.
|
||||||
|
// It assumes that *big.Int are inserted/fetched to/from the DB using the BigIntMeddler meddler
|
||||||
|
// defined at github.com/hermeznetwork/hermez-node/db
|
||||||
|
type BigIntStr string
|
||||||
|
|
||||||
|
// NewBigIntStr creates a *BigIntStr from a *big.Int.
|
||||||
|
// If the provided bigInt is nil the returned *BigIntStr will also be nil
|
||||||
|
func NewBigIntStr(bigInt *big.Int) *BigIntStr {
|
||||||
|
if bigInt == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bigIntStr := BigIntStr(bigInt.String())
|
||||||
|
return &bigIntStr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements Scanner for database/sql
|
||||||
|
func (b *BigIntStr) Scan(src interface{}) error {
|
||||||
|
// decode base64 src
|
||||||
|
var decoded []byte
|
||||||
|
var err error
|
||||||
|
if srcStr, ok := src.(string); ok {
|
||||||
|
// src is a string
|
||||||
|
decoded, err = base64.StdEncoding.DecodeString(srcStr)
|
||||||
|
} else if srcBytes, ok := src.([]byte); ok {
|
||||||
|
// src is []byte
|
||||||
|
decoded, err = base64.StdEncoding.DecodeString(string(srcBytes))
|
||||||
|
} else {
|
||||||
|
// unexpected src
|
||||||
|
return fmt.Errorf("can't scan %T into apitypes.BigIntStr", src)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// decoded bytes to *big.Int
|
||||||
|
bigInt := &big.Int{}
|
||||||
|
bigInt = bigInt.SetBytes(decoded)
|
||||||
|
// *big.Int to BigIntStr
|
||||||
|
bigIntStr := NewBigIntStr(bigInt)
|
||||||
|
if bigIntStr == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
*b = *bigIntStr
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements valuer for database/sql
|
||||||
|
func (b BigIntStr) Value() (driver.Value, error) {
|
||||||
|
// string to *big.Int
|
||||||
|
bigInt := &big.Int{}
|
||||||
|
bigInt, ok := bigInt.SetString(string(b), 10)
|
||||||
|
if !ok || bigInt == nil {
|
||||||
|
return nil, errors.New("invalid representation of a *big.Int")
|
||||||
|
}
|
||||||
|
// *big.Int to base64
|
||||||
|
return base64.StdEncoding.EncodeToString(bigInt.Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectedFees is used to retrieve common.batch.CollectedFee from the DB
|
||||||
|
type CollectedFees map[common.TokenID]BigIntStr
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals a json representation of map[common.TokenID]*big.Int
|
||||||
|
func (c *CollectedFees) UnmarshalJSON(text []byte) error {
|
||||||
|
bigIntMap := make(map[common.TokenID]*big.Int)
|
||||||
|
if err := json.Unmarshal(text, &bigIntMap); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bStrMap := make(map[common.TokenID]BigIntStr)
|
||||||
|
for k, v := range bigIntMap {
|
||||||
|
bStr := NewBigIntStr(v)
|
||||||
|
bStrMap[k] = *bStr
|
||||||
|
}
|
||||||
|
*c = CollectedFees(bStrMap)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HezEthAddr is used to scan/value Ethereum Address directly into strings that follow the Ethereum address hez fotmat (^hez:0x[a-fA-F0-9]{40}$) from/to sql DBs.
|
||||||
|
// It assumes that Ethereum Address are inserted/fetched to/from the DB using the default Scan/Value interface
|
||||||
|
type HezEthAddr string
|
||||||
|
|
||||||
|
// NewHezEthAddr creates a HezEthAddr from an Ethereum addr
|
||||||
|
func NewHezEthAddr(addr ethCommon.Address) HezEthAddr {
|
||||||
|
return HezEthAddr("hez:" + addr.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToEthAddr returns an Ethereum Address created from HezEthAddr
|
||||||
|
func (a HezEthAddr) ToEthAddr() (ethCommon.Address, error) {
|
||||||
|
addrStr := strings.TrimPrefix(string(a), "hez:")
|
||||||
|
var addr ethCommon.Address
|
||||||
|
return addr, addr.UnmarshalText([]byte(addrStr))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements Scanner for database/sql
|
||||||
|
func (a *HezEthAddr) Scan(src interface{}) error {
|
||||||
|
ethAddr := ðCommon.Address{}
|
||||||
|
if err := ethAddr.Scan(src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ethAddr == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
*a = NewHezEthAddr(*ethAddr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements valuer for database/sql
|
||||||
|
func (a HezEthAddr) Value() (driver.Value, error) {
|
||||||
|
ethAddr, err := a.ToEthAddr()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ethAddr.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HezBJJ is used to scan/value *babyjub.PublicKey directly into strings that follow the BJJ public key hez fotmat (^hez:[A-Za-z0-9_-]{44}$) from/to sql DBs.
|
||||||
|
// It assumes that *babyjub.PublicKey are inserted/fetched to/from the DB using the default Scan/Value interface
|
||||||
|
type HezBJJ string
|
||||||
|
|
||||||
|
// NewHezBJJ creates a HezBJJ from a *babyjub.PublicKey.
|
||||||
|
// Calling this method with a nil bjj causes panic
|
||||||
|
func NewHezBJJ(bjj *babyjub.PublicKey) HezBJJ {
|
||||||
|
pkComp := [32]byte(bjj.Compress())
|
||||||
|
sum := pkComp[0]
|
||||||
|
for i := 1; i < len(pkComp); i++ {
|
||||||
|
sum += pkComp[i]
|
||||||
|
}
|
||||||
|
bjjSum := append(pkComp[:], sum)
|
||||||
|
return HezBJJ("hez:" + base64.RawURLEncoding.EncodeToString(bjjSum))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToBJJ returns a *babyjub.PublicKey created from HezBJJ
|
||||||
|
func (b HezBJJ) ToBJJ() (*babyjub.PublicKey, error) {
|
||||||
|
const decodedLen = 33
|
||||||
|
const encodedLen = 44
|
||||||
|
formatErr := errors.New("invalid BJJ format. Must follow this regex: ^hez:[A-Za-z0-9_-]{44}$")
|
||||||
|
encoded := strings.TrimPrefix(string(b), "hez:")
|
||||||
|
if len(encoded) != encodedLen {
|
||||||
|
return nil, formatErr
|
||||||
|
}
|
||||||
|
decoded, err := base64.RawURLEncoding.DecodeString(encoded)
|
||||||
|
if err != nil {
|
||||||
|
return nil, formatErr
|
||||||
|
}
|
||||||
|
if len(decoded) != decodedLen {
|
||||||
|
return nil, formatErr
|
||||||
|
}
|
||||||
|
bjjBytes := [decodedLen - 1]byte{}
|
||||||
|
copy(bjjBytes[:decodedLen-1], decoded[:decodedLen-1])
|
||||||
|
sum := bjjBytes[0]
|
||||||
|
for i := 1; i < len(bjjBytes); i++ {
|
||||||
|
sum += bjjBytes[i]
|
||||||
|
}
|
||||||
|
if decoded[decodedLen-1] != sum {
|
||||||
|
return nil, errors.New("checksum verification failed")
|
||||||
|
}
|
||||||
|
bjjComp := babyjub.PublicKeyComp(bjjBytes)
|
||||||
|
return bjjComp.Decompress()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements Scanner for database/sql
|
||||||
|
func (b *HezBJJ) Scan(src interface{}) error {
|
||||||
|
bjj := &babyjub.PublicKey{}
|
||||||
|
if err := bjj.Scan(src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bjj == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
*b = NewHezBJJ(bjj)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements valuer for database/sql
|
||||||
|
func (b HezBJJ) Value() (driver.Value, error) {
|
||||||
|
bjj, err := b.ToBJJ()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bjj.Value()
|
||||||
|
}
|
||||||
285
apitypes/apitypes_test.go
Normal file
285
apitypes/apitypes_test.go
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
package apitypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
dbUtils "github.com/hermeznetwork/hermez-node/db"
|
||||||
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
|
_ "github.com/mattn/go-sqlite3" //nolint sqlite driver
|
||||||
|
"github.com/russross/meddler"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
var db *sql.DB
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
// Register meddler
|
||||||
|
meddler.Default = meddler.SQLite
|
||||||
|
meddler.Register("bigint", dbUtils.BigIntMeddler{})
|
||||||
|
meddler.Register("bigintnull", dbUtils.BigIntNullMeddler{})
|
||||||
|
// Create temporary sqlite DB
|
||||||
|
dir, err := ioutil.TempDir("", "db")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
db, err = sql.Open("sqlite3", dir+"sqlite.db")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir) //nolint
|
||||||
|
schema := `CREATE TABLE test (i BLOB);`
|
||||||
|
if _, err := db.Exec(schema); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
// Run tests
|
||||||
|
result := m.Run()
|
||||||
|
os.Exit(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigIntStrScannerValuer(t *testing.T) {
|
||||||
|
// Clean DB
|
||||||
|
_, err := db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Example structs
|
||||||
|
type bigInMeddlerStruct struct {
|
||||||
|
I *big.Int `meddler:"i,bigint"` // note the bigint that instructs meddler to use BigIntMeddler
|
||||||
|
}
|
||||||
|
type bigIntStrStruct struct {
|
||||||
|
I BigIntStr `meddler:"i"` // note that no meddler is specified, and Scan/Value will be used
|
||||||
|
}
|
||||||
|
type bigInMeddlerStructNil struct {
|
||||||
|
I *big.Int `meddler:"i,bigintnull"` // note the bigint that instructs meddler to use BigIntNullMeddler
|
||||||
|
}
|
||||||
|
type bigIntStrStructNil struct {
|
||||||
|
I *BigIntStr `meddler:"i"` // note that no meddler is specified, and Scan/Value will be used
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not nil case
|
||||||
|
// Insert into DB using meddler
|
||||||
|
const x = int64(12345)
|
||||||
|
fromMeddler := bigInMeddlerStruct{
|
||||||
|
I: big.NewInt(x),
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromMeddler)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using BigIntStr
|
||||||
|
toBigIntStr := bigIntStrStruct{}
|
||||||
|
err = meddler.QueryRow(db, &toBigIntStr, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, fromMeddler.I.String(), string(toBigIntStr.I))
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using BigIntStr
|
||||||
|
fromBigIntStr := bigIntStrStruct{
|
||||||
|
I: "54321",
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromBigIntStr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using meddler
|
||||||
|
toMeddler := bigInMeddlerStruct{}
|
||||||
|
err = meddler.QueryRow(db, &toMeddler, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, string(fromBigIntStr.I), toMeddler.I.String())
|
||||||
|
|
||||||
|
// Nil case
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using meddler
|
||||||
|
fromMeddlerNil := bigInMeddlerStructNil{
|
||||||
|
I: nil,
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromMeddlerNil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using BigIntStr
|
||||||
|
foo := BigIntStr("foo")
|
||||||
|
toBigIntStrNil := bigIntStrStructNil{
|
||||||
|
I: &foo, // check that this will be set to nil, not because of not being initialized
|
||||||
|
}
|
||||||
|
err = meddler.QueryRow(db, &toBigIntStrNil, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, toBigIntStrNil.I)
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using BigIntStr
|
||||||
|
fromBigIntStrNil := bigIntStrStructNil{
|
||||||
|
I: nil,
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromBigIntStrNil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using meddler
|
||||||
|
toMeddlerNil := bigInMeddlerStructNil{
|
||||||
|
I: big.NewInt(x), // check that this will be set to nil, not because of not being initialized
|
||||||
|
}
|
||||||
|
err = meddler.QueryRow(db, &toMeddlerNil, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, toMeddlerNil.I)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHezEthAddr(t *testing.T) {
|
||||||
|
// Clean DB
|
||||||
|
_, err := db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Example structs
|
||||||
|
type ethAddrStruct struct {
|
||||||
|
I ethCommon.Address `meddler:"i"`
|
||||||
|
}
|
||||||
|
type hezEthAddrStruct struct {
|
||||||
|
I HezEthAddr `meddler:"i"`
|
||||||
|
}
|
||||||
|
type ethAddrStructNil struct {
|
||||||
|
I *ethCommon.Address `meddler:"i"`
|
||||||
|
}
|
||||||
|
type hezEthAddrStructNil struct {
|
||||||
|
I *HezEthAddr `meddler:"i"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not nil case
|
||||||
|
// Insert into DB using ethCommon.Address Scan/Value
|
||||||
|
fromEth := ethAddrStruct{
|
||||||
|
I: ethCommon.BigToAddress(big.NewInt(73737373)),
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromEth)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using HezEthAddr Scan/Value
|
||||||
|
toHezEth := hezEthAddrStruct{}
|
||||||
|
err = meddler.QueryRow(db, &toHezEth, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, NewHezEthAddr(fromEth.I), toHezEth.I)
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using HezEthAddr Scan/Value
|
||||||
|
fromHezEth := hezEthAddrStruct{
|
||||||
|
I: NewHezEthAddr(ethCommon.BigToAddress(big.NewInt(3786872586))),
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromHezEth)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using ethCommon.Address Scan/Value
|
||||||
|
toEth := ethAddrStruct{}
|
||||||
|
err = meddler.QueryRow(db, &toEth, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, fromHezEth.I, NewHezEthAddr(toEth.I))
|
||||||
|
|
||||||
|
// Nil case
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using ethCommon.Address Scan/Value
|
||||||
|
fromEthNil := ethAddrStructNil{
|
||||||
|
I: nil,
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromEthNil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using HezEthAddr Scan/Value
|
||||||
|
foo := HezEthAddr("foo")
|
||||||
|
toHezEthNil := hezEthAddrStructNil{
|
||||||
|
I: &foo, // check that this will be set to nil, not because of not being initialized
|
||||||
|
}
|
||||||
|
err = meddler.QueryRow(db, &toHezEthNil, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, toHezEthNil.I)
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using HezEthAddr Scan/Value
|
||||||
|
fromHezEthNil := hezEthAddrStructNil{
|
||||||
|
I: nil,
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromHezEthNil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using ethCommon.Address Scan/Value
|
||||||
|
fooAddr := ethCommon.BigToAddress(big.NewInt(1))
|
||||||
|
toEthNil := ethAddrStructNil{
|
||||||
|
I: &fooAddr, // check that this will be set to nil, not because of not being initialized
|
||||||
|
}
|
||||||
|
err = meddler.QueryRow(db, &toEthNil, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, toEthNil.I)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHezBJJ(t *testing.T) {
|
||||||
|
// Clean DB
|
||||||
|
_, err := db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Example structs
|
||||||
|
type bjjStruct struct {
|
||||||
|
I *babyjub.PublicKey `meddler:"i"`
|
||||||
|
}
|
||||||
|
type hezBJJStruct struct {
|
||||||
|
I HezBJJ `meddler:"i"`
|
||||||
|
}
|
||||||
|
type hezBJJStructNil struct {
|
||||||
|
I *HezBJJ `meddler:"i"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not nil case
|
||||||
|
// Insert into DB using *babyjub.PublicKey Scan/Value
|
||||||
|
priv := babyjub.NewRandPrivKey()
|
||||||
|
fromBJJ := bjjStruct{
|
||||||
|
I: priv.Public(),
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromBJJ)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using HezBJJ Scan/Value
|
||||||
|
toHezBJJ := hezBJJStruct{}
|
||||||
|
err = meddler.QueryRow(db, &toHezBJJ, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, NewHezBJJ(fromBJJ.I), toHezBJJ.I)
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using HezBJJ Scan/Value
|
||||||
|
fromHezBJJ := hezBJJStruct{
|
||||||
|
I: NewHezBJJ(priv.Public()),
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromHezBJJ)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using *babyjub.PublicKey Scan/Value
|
||||||
|
toBJJ := bjjStruct{}
|
||||||
|
err = meddler.QueryRow(db, &toBJJ, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, fromHezBJJ.I, NewHezBJJ(toBJJ.I))
|
||||||
|
|
||||||
|
// Nil case
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using *babyjub.PublicKey Scan/Value
|
||||||
|
fromBJJNil := bjjStruct{
|
||||||
|
I: nil,
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromBJJNil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using HezBJJ Scan/Value
|
||||||
|
foo := HezBJJ("foo")
|
||||||
|
toHezBJJNil := hezBJJStructNil{
|
||||||
|
I: &foo, // check that this will be set to nil, not because of not being initialized
|
||||||
|
}
|
||||||
|
err = meddler.QueryRow(db, &toHezBJJNil, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, toHezBJJNil.I)
|
||||||
|
// Clean DB
|
||||||
|
_, err = db.Exec("delete from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Insert into DB using HezBJJ Scan/Value
|
||||||
|
fromHezBJJNil := hezBJJStructNil{
|
||||||
|
I: nil,
|
||||||
|
}
|
||||||
|
err = meddler.Insert(db, "test", &fromHezBJJNil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Read from DB using *babyjub.PublicKey Scan/Value
|
||||||
|
toBJJNil := bjjStruct{
|
||||||
|
I: priv.Public(), // check that this will be set to nil, not because of not being initialized
|
||||||
|
}
|
||||||
|
err = meddler.QueryRow(db, &toBJJNil, "select * from test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, toBJJNil.I)
|
||||||
|
}
|
||||||
@@ -14,7 +14,7 @@ const batchNumBytesLen = 8
|
|||||||
type Batch struct {
|
type Batch struct {
|
||||||
BatchNum BatchNum `meddler:"batch_num"`
|
BatchNum BatchNum `meddler:"batch_num"`
|
||||||
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
|
EthBlockNum int64 `meddler:"eth_block_num"` // Ethereum block in which the batch is forged
|
||||||
ForgerAddr ethCommon.Address `meddler:"forger_addr"` // TODO: Should this be retrieved via slot reference?
|
ForgerAddr ethCommon.Address `meddler:"forger_addr"`
|
||||||
CollectedFees map[TokenID]*big.Int `meddler:"fees_collected,json"`
|
CollectedFees map[TokenID]*big.Int `meddler:"fees_collected,json"`
|
||||||
StateRoot *big.Int `meddler:"state_root,bigint"`
|
StateRoot *big.Int `meddler:"state_root,bigint"`
|
||||||
NumAccounts int `meddler:"num_accounts"`
|
NumAccounts int `meddler:"num_accounts"`
|
||||||
|
|||||||
@@ -150,6 +150,113 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBatchAPI return the batch with the given batchNum
|
||||||
|
func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
|
||||||
|
batch := &BatchAPI{}
|
||||||
|
return batch, meddler.QueryRow(
|
||||||
|
hdb.db, batch,
|
||||||
|
`SELECT batch.*, block.timestamp, block.hash
|
||||||
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
|
||||||
|
WHERE batch_num = $1;`, batchNum,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBatchesAPI return the batches applying the given filters
|
||||||
|
func (hdb *HistoryDB) GetBatchesAPI(
|
||||||
|
minBatchNum, maxBatchNum, slotNum *uint,
|
||||||
|
forgerAddr *ethCommon.Address,
|
||||||
|
fromItem, limit *uint, order string,
|
||||||
|
) ([]BatchAPI, *db.Pagination, error) {
|
||||||
|
var query string
|
||||||
|
var args []interface{}
|
||||||
|
queryStr := `SELECT batch.*, block.timestamp, block.hash,
|
||||||
|
count(*) OVER() AS total_items, MIN(batch.item_id) OVER() AS first_item,
|
||||||
|
MAX(batch.item_id) OVER() AS last_item
|
||||||
|
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num `
|
||||||
|
// Apply filters
|
||||||
|
nextIsAnd := false
|
||||||
|
// minBatchNum filter
|
||||||
|
if minBatchNum != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.batch_num > ? "
|
||||||
|
args = append(args, minBatchNum)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// maxBatchNum filter
|
||||||
|
if maxBatchNum != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.batch_num < ? "
|
||||||
|
args = append(args, maxBatchNum)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// slotNum filter
|
||||||
|
if slotNum != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.slot_num = ? "
|
||||||
|
args = append(args, slotNum)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// forgerAddr filter
|
||||||
|
if forgerAddr != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
queryStr += "batch.forger_addr = ? "
|
||||||
|
args = append(args, forgerAddr)
|
||||||
|
nextIsAnd = true
|
||||||
|
}
|
||||||
|
// pagination
|
||||||
|
if fromItem != nil {
|
||||||
|
if nextIsAnd {
|
||||||
|
queryStr += "AND "
|
||||||
|
} else {
|
||||||
|
queryStr += "WHERE "
|
||||||
|
}
|
||||||
|
if order == OrderAsc {
|
||||||
|
queryStr += "batch.item_id >= ? "
|
||||||
|
} else {
|
||||||
|
queryStr += "batch.item_id <= ? "
|
||||||
|
}
|
||||||
|
args = append(args, fromItem)
|
||||||
|
}
|
||||||
|
queryStr += "ORDER BY batch.item_id "
|
||||||
|
if order == OrderAsc {
|
||||||
|
queryStr += " ASC "
|
||||||
|
} else {
|
||||||
|
queryStr += " DESC "
|
||||||
|
}
|
||||||
|
queryStr += fmt.Sprintf("LIMIT %d;", *limit)
|
||||||
|
query = hdb.db.Rebind(queryStr)
|
||||||
|
log.Debug(query)
|
||||||
|
batchPtrs := []*BatchAPI{}
|
||||||
|
if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
|
||||||
|
if len(batches) == 0 {
|
||||||
|
return nil, nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return batches, &db.Pagination{
|
||||||
|
TotalItems: batches[0].TotalItems,
|
||||||
|
FirstItem: batches[0].FirstItem,
|
||||||
|
LastItem: batches[0].LastItem,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to
|
// GetBatches retrieve batches from the DB, given a range of batch numbers defined by from and to
|
||||||
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
|
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
|
||||||
var batches []*common.Batch
|
var batches []*common.Batch
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/apitypes"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
"github.com/iden3/go-merkletree"
|
"github.com/iden3/go-merkletree"
|
||||||
@@ -131,3 +132,25 @@ type HistoryCoordinator struct {
|
|||||||
FirstItem int `meddler:"first_item"`
|
FirstItem int `meddler:"first_item"`
|
||||||
LastItem int `meddler:"last_item"`
|
LastItem int `meddler:"last_item"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BatchAPI is a representation of a batch with additional information
|
||||||
|
// required by the API, and extracted by joining block table
|
||||||
|
type BatchAPI struct {
|
||||||
|
ItemID int `json:"itemId" meddler:"item_id"`
|
||||||
|
BatchNum common.BatchNum `json:"batchNum" meddler:"batch_num"`
|
||||||
|
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
|
||||||
|
EthBlockHash ethCommon.Hash `json:"ethereumBlockHash" meddler:"hash"`
|
||||||
|
Timestamp time.Time `json:"timestamp" meddler:"timestamp,utctime"`
|
||||||
|
ForgerAddr ethCommon.Address `json:"forgerAddr" meddler:"forger_addr"`
|
||||||
|
CollectedFees apitypes.CollectedFees `json:"collectedFees" meddler:"fees_collected,json"`
|
||||||
|
// CollectedFees map[common.TokenID]*big.Int `json:"collectedFees" meddler:"fees_collected,json"`
|
||||||
|
TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD" meddler:"total_fees_usd"`
|
||||||
|
StateRoot apitypes.BigIntStr `json:"stateRoot" meddler:"state_root"`
|
||||||
|
NumAccounts int `json:"numAccounts" meddler:"num_accounts"`
|
||||||
|
ExitRoot apitypes.BigIntStr `json:"exitRoot" meddler:"exit_root"`
|
||||||
|
ForgeL1TxsNum *int64 `json:"forgeL1TransactionsNum" meddler:"forge_l1_txs_num"`
|
||||||
|
SlotNum int64 `json:"slotNum" meddler:"slot_num"`
|
||||||
|
TotalItems int `json:"-" meddler:"total_items"`
|
||||||
|
FirstItem int `json:"-" meddler:"first_item"`
|
||||||
|
LastItem int `json:"-" meddler:"last_item"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,7 +16,8 @@ CREATE TABLE coordinator (
|
|||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE batch (
|
CREATE TABLE batch (
|
||||||
batch_num BIGINT PRIMARY KEY,
|
item_id SERIAL PRIMARY KEY,
|
||||||
|
batch_num BIGINT UNIQUE NOT NULL,
|
||||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||||
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
||||||
fees_collected BYTEA NOT NULL,
|
fees_collected BYTEA NOT NULL,
|
||||||
|
|||||||
2
go.mod
2
go.mod
@@ -4,6 +4,7 @@ go 1.14
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.3.1
|
||||||
|
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015
|
||||||
github.com/dghubble/sling v1.3.0
|
github.com/dghubble/sling v1.3.0
|
||||||
github.com/ethereum/go-ethereum v1.9.17
|
github.com/ethereum/go-ethereum v1.9.17
|
||||||
github.com/getkin/kin-openapi v0.22.0
|
github.com/getkin/kin-openapi v0.22.0
|
||||||
@@ -16,6 +17,7 @@ require (
|
|||||||
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
|
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
|
||||||
github.com/jmoiron/sqlx v1.2.0
|
github.com/jmoiron/sqlx v1.2.0
|
||||||
github.com/lib/pq v1.8.0
|
github.com/lib/pq v1.8.0
|
||||||
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||||
github.com/mitchellh/copystructure v1.0.0
|
github.com/mitchellh/copystructure v1.0.0
|
||||||
github.com/rogpeppe/go-internal v1.6.1 // indirect
|
github.com/rogpeppe/go-internal v1.6.1 // indirect
|
||||||
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
|
||||||
|
|||||||
@@ -60,10 +60,10 @@ func GenBatches(nBatches int, blocks []common.Block) []common.Batch {
|
|||||||
//nolint:gomnd
|
//nolint:gomnd
|
||||||
ForgerAddr: ethCommon.BigToAddress(big.NewInt(6886723)),
|
ForgerAddr: ethCommon.BigToAddress(big.NewInt(6886723)),
|
||||||
CollectedFees: collectedFees,
|
CollectedFees: collectedFees,
|
||||||
StateRoot: big.NewInt(int64(i) * 5), //nolint:gomnd
|
StateRoot: big.NewInt(int64(i+1) * 5), //nolint:gomnd
|
||||||
//nolint:gomnd
|
//nolint:gomnd
|
||||||
NumAccounts: 30,
|
NumAccounts: 30,
|
||||||
ExitRoot: big.NewInt(int64(i) * 16), //nolint:gomnd
|
ExitRoot: big.NewInt(int64(i+1) * 16), //nolint:gomnd
|
||||||
SlotNum: int64(i),
|
SlotNum: int64(i),
|
||||||
}
|
}
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
|
|||||||
Reference in New Issue
Block a user