Update coordinator, call all api update functions
- Common:
- Rename Block.EthBlockNum to Block.Num to avoid unneeded repetition
- API:
- Add UpdateNetworkInfoBlock to update just block information, to be
used when the node is not yet synchronized
- Node:
- Call API.UpdateMetrics and UpdateRecommendedFee in a loop, with
configurable time intervals
- Synchronizer:
- When mapping events by TxHash, use an array to support the possibility
of multiple calls of the same function happening in the same
transaction (for example, a smart contract in a single transaction
could call withdraw with delay twice, which would generate 2 withdraw
events, and 2 deposit events).
- In Stats, keep entire LastBlock instead of just the blockNum
- In Stats, add lastL1BatchBlock
- Test Stats and SCVars
- Coordinator:
- Enable writing the BatchInfo in every step of the pipeline to disk
(with JSON text files) for debugging purposes.
- Move the Pipeline functionality from the Coordinator to its own struct
(Pipeline)
- Implement shouldL1lL2Batch
- In TxManager, implement logic to perform several attempts when doing
ethereum node RPC calls before considering the error. (Both for calls
to forgeBatch and transaction receipt)
- In TxManager, reorganize the flow and note the specific points in
which actions are made when err != nil
- HistoryDB:
- Implement GetLastL1BatchBlockNum: returns the blockNum of the latest
forged l1Batch, to help the coordinator decide when to forge an
L1Batch.
- EthereumClient and test.Client:
- Update EthBlockByNumber to return the last block when the passed
number is -1.
4 years ago |
|
package api
import ( "fmt" "strconv" "testing" "time"
ethCommon "github.com/ethereum/go-ethereum/common" "github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/mitchellh/copystructure" "github.com/stretchr/testify/assert" )
type testBatch struct { ItemID uint64 `json:"itemId"` BatchNum common.BatchNum `json:"batchNum"` EthBlockNum int64 `json:"ethereumBlockNum"` EthBlockHash ethCommon.Hash `json:"ethereumBlockHash"` Timestamp time.Time `json:"timestamp"` ForgerAddr ethCommon.Address `json:"forgerAddr"` CollectedFees map[common.TokenID]string `json:"collectedFees"` TotalFeesUSD *float64 `json:"historicTotalCollectedFeesUSD"` StateRoot string `json:"stateRoot"` NumAccounts int `json:"numAccounts"` ExitRoot string `json:"exitRoot"` ForgeL1TxsNum *int64 `json:"forgeL1TransactionsNum"` SlotNum int64 `json:"slotNum"` ForgedTxs int `json:"forgedTransactions"` } type testBatchesResponse struct { Batches []testBatch `json:"batches"` PendingItems uint64 `json:"pendingItems"` }
func (t testBatchesResponse) GetPending() (pendingItems, lastItemID uint64) { if len(t.Batches) == 0 { return 0, 0 } pendingItems = t.PendingItems lastItemID = t.Batches[len(t.Batches)-1].ItemID return pendingItems, lastItemID }
func (t testBatchesResponse) Len() int { return len(t.Batches) }
func (t testBatchesResponse) New() Pendinger { return &testBatchesResponse{} }
type testFullBatch struct { Batch testBatch `json:"batch"` Txs []testTx `json:"transactions"` }
func genTestBatches( blocks []common.Block, cBatches []common.Batch, txs []testTx, ) ([]testBatch, []testFullBatch) { tBatches := []testBatch{} for i := 0; i < len(cBatches); i++ { block := common.Block{} found := false for _, b := range blocks { if b.Num == cBatches[i].EthBlockNum { block = b found = true break } } if !found { panic("block not found") } collectedFees := make(map[common.TokenID]string) for k, v := range cBatches[i].CollectedFees { collectedFees[k] = v.String() } forgedTxs := 0 for _, tx := range txs { if tx.BatchNum != nil && *tx.BatchNum == cBatches[i].BatchNum { forgedTxs++ } } tBatch := testBatch{ BatchNum: cBatches[i].BatchNum, EthBlockNum: cBatches[i].EthBlockNum, EthBlockHash: block.Hash, Timestamp: block.Timestamp, ForgerAddr: cBatches[i].ForgerAddr, CollectedFees: collectedFees, TotalFeesUSD: cBatches[i].TotalFeesUSD, StateRoot: cBatches[i].StateRoot.String(), NumAccounts: cBatches[i].NumAccounts, ExitRoot: cBatches[i].ExitRoot.String(), ForgeL1TxsNum: cBatches[i].ForgeL1TxsNum, SlotNum: cBatches[i].SlotNum, ForgedTxs: forgedTxs, } tBatches = append(tBatches, tBatch) } fullBatches := []testFullBatch{} for i := 0; i < len(tBatches); i++ { forgedTxs := []testTx{} for j := 0; j < len(txs); j++ { if txs[j].BatchNum != nil && *txs[j].BatchNum == tBatches[i].BatchNum { forgedTxs = append(forgedTxs, txs[j]) } } fullBatches = append(fullBatches, testFullBatch{ Batch: tBatches[i], Txs: forgedTxs, }) } return tBatches, fullBatches }
func TestGetBatches(t *testing.T) { endpoint := apiURL + "batches" fetchedBatches := []testBatch{} appendIter := func(intr interface{}) { for i := 0; i < len(intr.(*testBatchesResponse).Batches); i++ { tmp, err := copystructure.Copy(intr.(*testBatchesResponse).Batches[i]) if err != nil { panic(err) } fetchedBatches = append(fetchedBatches, tmp.(testBatch)) } } // Get all (no filters)
limit := 3 path := fmt.Sprintf("%s?limit=%d", endpoint, limit) err := doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) assertBatches(t, tc.batches, fetchedBatches)
// minBatchNum
fetchedBatches = []testBatch{} limit = 2 minBatchNum := tc.batches[len(tc.batches)/2].BatchNum path = fmt.Sprintf("%s?minBatchNum=%d&limit=%d", endpoint, minBatchNum, limit) err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) minBatchNumBatches := []testBatch{} for i := 0; i < len(tc.batches); i++ { if tc.batches[i].BatchNum > minBatchNum { minBatchNumBatches = append(minBatchNumBatches, tc.batches[i]) } } assertBatches(t, minBatchNumBatches, fetchedBatches)
// maxBatchNum
fetchedBatches = []testBatch{} limit = 1 maxBatchNum := tc.batches[len(tc.batches)/2].BatchNum path = fmt.Sprintf("%s?maxBatchNum=%d&limit=%d", endpoint, maxBatchNum, limit) err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) maxBatchNumBatches := []testBatch{} for i := 0; i < len(tc.batches); i++ { if tc.batches[i].BatchNum < maxBatchNum { maxBatchNumBatches = append(maxBatchNumBatches, tc.batches[i]) } } assertBatches(t, maxBatchNumBatches, fetchedBatches)
// slotNum
fetchedBatches = []testBatch{} limit = 5 slotNum := tc.batches[len(tc.batches)/2].SlotNum path = fmt.Sprintf("%s?slotNum=%d&limit=%d", endpoint, slotNum, limit) err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) slotNumBatches := []testBatch{} for i := 0; i < len(tc.batches); i++ { if tc.batches[i].SlotNum == slotNum { slotNumBatches = append(slotNumBatches, tc.batches[i]) } } assertBatches(t, slotNumBatches, fetchedBatches)
// forgerAddr
fetchedBatches = []testBatch{} limit = 10 forgerAddr := tc.batches[len(tc.batches)/2].ForgerAddr path = fmt.Sprintf("%s?forgerAddr=%s&limit=%d", endpoint, forgerAddr.String(), limit) err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) forgerAddrBatches := []testBatch{} for i := 0; i < len(tc.batches); i++ { if tc.batches[i].ForgerAddr == forgerAddr { forgerAddrBatches = append(forgerAddrBatches, tc.batches[i]) } } assertBatches(t, forgerAddrBatches, fetchedBatches)
// All, in reverse order
fetchedBatches = []testBatch{} limit = 6 path = fmt.Sprintf("%s?limit=%d", endpoint, limit) err = doGoodReqPaginated(path, historydb.OrderDesc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) flippedBatches := []testBatch{} for i := len(tc.batches) - 1; i >= 0; i-- { flippedBatches = append(flippedBatches, tc.batches[i]) } assertBatches(t, flippedBatches, fetchedBatches)
// Mixed filters
fetchedBatches = []testBatch{} limit = 1 maxBatchNum = tc.batches[len(tc.batches)-len(tc.batches)/4].BatchNum minBatchNum = tc.batches[len(tc.batches)/4].BatchNum path = fmt.Sprintf("%s?minBatchNum=%d&maxBatchNum=%d&limit=%d", endpoint, minBatchNum, maxBatchNum, limit) err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) minMaxBatchNumBatches := []testBatch{} for i := 0; i < len(tc.batches); i++ { if tc.batches[i].BatchNum < maxBatchNum && tc.batches[i].BatchNum > minBatchNum { minMaxBatchNumBatches = append(minMaxBatchNumBatches, tc.batches[i]) } } assertBatches(t, minMaxBatchNumBatches, fetchedBatches)
// Empty array
fetchedBatches = []testBatch{} path = fmt.Sprintf("%s?slotNum=%d&minBatchNum=%d", endpoint, 1, 25) err = doGoodReqPaginated(path, historydb.OrderAsc, &testBatchesResponse{}, appendIter) assert.NoError(t, err) assertBatches(t, []testBatch{}, fetchedBatches)
// 400
// Invalid minBatchNum
path = fmt.Sprintf("%s?minBatchNum=%d", endpoint, -2) err = doBadReq("GET", path, nil, 400) assert.NoError(t, err) // Invalid forgerAddr
path = fmt.Sprintf("%s?forgerAddr=%s", endpoint, "0xG0000001") err = doBadReq("GET", path, nil, 400) assert.NoError(t, err) }
func TestGetBatch(t *testing.T) { endpoint := apiURL + "batches/" for _, batch := range tc.batches { fetchedBatch := testBatch{} assert.NoError( t, doGoodReq( "GET", endpoint+strconv.Itoa(int(batch.BatchNum)), nil, &fetchedBatch, ), ) assertBatch(t, batch, fetchedBatch) } // 400
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400)) // 404
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404)) }
func TestGetFullBatch(t *testing.T) { endpoint := apiURL + "full-batches/" for _, fullBatch := range tc.fullBatches { fetchedFullBatch := testFullBatch{} assert.NoError( t, doGoodReq( "GET", endpoint+strconv.Itoa(int(fullBatch.Batch.BatchNum)), nil, &fetchedFullBatch, ), ) assertBatch(t, fullBatch.Batch, fetchedFullBatch.Batch) assertTxs(t, fullBatch.Txs, fetchedFullBatch.Txs) } // 400
assert.NoError(t, doBadReq("GET", endpoint+"foo", nil, 400)) // 404
assert.NoError(t, doBadReq("GET", endpoint+"99999", nil, 404)) }
func assertBatches(t *testing.T, expected, actual []testBatch) { assert.Equal(t, len(expected), len(actual)) for i := 0; i < len(expected); i++ { assertBatch(t, expected[i], actual[i]) } }
func assertBatch(t *testing.T, expected, actual testBatch) { assert.Equal(t, expected.Timestamp.Unix(), actual.Timestamp.Unix()) expected.Timestamp = actual.Timestamp actual.ItemID = expected.ItemID assert.Equal(t, expected, actual) }
|