diff --git a/api/api_test.go b/api/api_test.go index b5930b2..016cb5c 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "math/big" "net/http" "os" @@ -19,7 +20,7 @@ import ( swagger "github.com/getkin/kin-openapi/openapi3filter" "github.com/gin-gonic/gin" "github.com/hermeznetwork/hermez-node/common" - dbUtils "github.com/hermeznetwork/hermez-node/db" + "github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/statedb" @@ -35,42 +36,97 @@ const apiPort = ":4010" const apiURL = "http://localhost" + apiPort + "/" type testCommon struct { - blocks []common.Block - tokens []historydb.TokenRead - batches []common.Batch - usrAddr string - usrBjj string - accs []common.Account - usrTxs historyTxAPIs - othrTxs historyTxAPIs - allTxs historyTxAPIs - router *swagger.Router + blocks []common.Block + tokens []historydb.TokenRead + batches []common.Batch + usrAddr string + usrBjj string + accs []common.Account + usrTxs []historyTxAPI + allTxs []historyTxAPI + exits []exitAPI + usrExits []exitAPI + router *swagger.Router } -type historyTxAPIs []historyTxAPI +// TxSortFields represents the fields needed to sort L1 and L2 transactions +type txSortFields struct { + BatchNum *common.BatchNum + Position int +} + +// TxSortFielder is a interface that allows sorting L1 and L2 transactions in a combined way +type txSortFielder interface { + SortFields() txSortFields + L1() *common.L1Tx + L2() *common.L2Tx +} + +// TxsSort array of TxSortFielder +type txsSort []txSortFielder -func (h historyTxAPIs) Len() int { return len(h) } -func (h historyTxAPIs) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h historyTxAPIs) Less(i, j int) bool { +func (t txsSort) Len() int { return len(t) } +func (t txsSort) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsSort) Less(i, j int) bool { // i not forged yet - if h[i].BatchNum == nil { - if h[j].BatchNum != nil { // j is already forged + isf := t[i].SortFields() + jsf := t[j].SortFields() + if isf.BatchNum == nil { + if jsf.BatchNum != nil { // j is already forged return false } // Both aren't forged, is i in a smaller position? - return h[i].Position < h[j].Position + return isf.Position < jsf.Position } // i is forged - if h[j].BatchNum == nil { - return true // j is not forged + if jsf.BatchNum == nil { + return false // j is not forged } // Both are forged - if *h[i].BatchNum == *h[j].BatchNum { + if *isf.BatchNum == *jsf.BatchNum { // At the same batch, is i in a smaller position? - return h[i].Position < h[j].Position + return isf.Position < jsf.Position } // At different batches, is i in a smaller batch? - return *h[i].BatchNum < *h[j].BatchNum + return *isf.BatchNum < *jsf.BatchNum +} + +type wrappedL1 common.L1Tx + +// SortFields implements TxSortFielder +func (tx *wrappedL1) SortFields() txSortFields { + return txSortFields{ + BatchNum: tx.BatchNum, + Position: tx.Position, + } +} + +// L1 implements TxSortFielder +func (tx *wrappedL1) L1() *common.L1Tx { + l1tx := common.L1Tx(*tx) + return &l1tx +} + +// L2 implements TxSortFielder +func (tx *wrappedL1) L2() *common.L2Tx { return nil } + +type wrappedL2 common.L2Tx + +// SortFields implements TxSortFielder +func (tx *wrappedL2) SortFields() txSortFields { + return txSortFields{ + BatchNum: &tx.BatchNum, + Position: tx.Position, + } +} + +// L1 implements TxSortFielder +func (tx *wrappedL2) L1() *common.L1Tx { return nil } + +// L2 implements TxSortFielder +func (tx *wrappedL2) L2() *common.L2Tx { + l2tx := common.L2Tx(*tx) + return &l2tx } var tc testCommon @@ -81,11 +137,11 @@ func TestMain(m *testing.M) { // Init DBs // HistoryDB pass := os.Getenv("POSTGRES_PASS") - db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") + database, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") if err != nil { panic(err) } - hdb := historydb.NewHistoryDB(db) + hdb := historydb.NewHistoryDB(database) err = hdb.Reorg(-1) if err != nil { panic(err) @@ -100,7 +156,7 @@ func TestMain(m *testing.M) { panic(err) } // L2DB - l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour) + l2DB := l2db.NewL2DB(database, 10, 100, 24*time.Hour) test.CleanL2DB(l2DB.DB()) // Init API @@ -186,161 +242,237 @@ func TestMain(m *testing.M) { if err != nil { panic(err) } + // Gen exits and add them to DB + const totalExits = 40 + exits := test.GenExitTree(totalExits, batches, accs) + err = h.AddExitTree(exits) + if err != nil { + panic(err) + } // Gen L1Txs and add them to DB const totalL1Txs = 40 const userL1Txs = 4 usrL1Txs, othrL1Txs := test.GenL1Txs(256, totalL1Txs, userL1Txs, &usrAddr, accs, tokens, blocks, batches) - var l1Txs []common.L1Tx - l1Txs = append(l1Txs, usrL1Txs...) - l1Txs = append(l1Txs, othrL1Txs...) - err = h.AddL1Txs(l1Txs) - if err != nil { - panic(err) - } // Gen L2Txs and add them to DB const totalL2Txs = 20 const userL2Txs = 4 usrL2Txs, othrL2Txs := test.GenL2Txs(256+totalL1Txs, totalL2Txs, userL2Txs, &usrAddr, accs, tokens, blocks, batches) - var l2Txs []common.L2Tx - l2Txs = append(l2Txs, usrL2Txs...) - l2Txs = append(l2Txs, othrL2Txs...) - err = h.AddL2Txs(l2Txs) - if err != nil { - panic(err) + // Order txs + sortedTxs := []txSortFielder{} + for i := 0; i < len(usrL1Txs); i++ { + wL1 := wrappedL1(usrL1Txs[i]) + sortedTxs = append(sortedTxs, &wL1) + } + for i := 0; i < len(othrL1Txs); i++ { + wL1 := wrappedL1(othrL1Txs[i]) + sortedTxs = append(sortedTxs, &wL1) + } + for i := 0; i < len(usrL2Txs); i++ { + wL2 := wrappedL2(usrL2Txs[i]) + sortedTxs = append(sortedTxs, &wL2) + } + for i := 0; i < len(othrL2Txs); i++ { + wL2 := wrappedL2(othrL2Txs[i]) + sortedTxs = append(sortedTxs, &wL2) + } + sort.Sort(txsSort(sortedTxs)) + // Add txs to DB and prepare them for test commons + usrTxs := []historyTxAPI{} + allTxs := []historyTxAPI{} + getTimestamp := func(blockNum int64) time.Time { + for i := 0; i < len(blocks); i++ { + if blocks[i].EthBlockNum == blockNum { + return blocks[i].Timestamp + } + } + panic("timesamp not found") } - - // Set test commons - txsToAPITxs := func(l1Txs []common.L1Tx, l2Txs []common.L2Tx, blocks []common.Block, tokens []historydb.TokenRead) historyTxAPIs { - /* TODO: stop using l1tx.Tx() & l2tx.Tx() - // Transform L1Txs and L2Txs to generic Txs - genericTxs := []*common.Tx{} - for _, l1tx := range l1Txs { - genericTxs = append(genericTxs, l1tx.Tx()) + getToken := func(id common.TokenID) historydb.TokenRead { + for i := 0; i < len(tokensUSD); i++ { + if tokensUSD[i].TokenID == id { + return tokensUSD[i] + } } - for _, l2tx := range l2Txs { - genericTxs = append(genericTxs, l2tx.Tx()) + panic("token not found") + } + getTokenByIdx := func(idx common.Idx) historydb.TokenRead { + for _, acc := range accs { + if idx == acc.Idx { + return getToken(acc.TokenID) + } } - // Transform generic Txs to HistoryTx - historyTxs := []historydb.HistoryTx{} - for _, genericTx := range genericTxs { - // find timestamp - var timestamp time.Time - for i := 0; i < len(blocks); i++ { - if blocks[i].EthBlockNum == genericTx.EthBlockNum { - timestamp = blocks[i].Timestamp - break + panic("token not found") + } + usrIdxs := []string{} + for _, acc := range accs { + if acc.EthAddr == usrAddr || acc.PublicKey == usrBjj { + for _, token := range tokens { + if token.TokenID == acc.TokenID { + usrIdxs = append(usrIdxs, idxToHez(acc.Idx, token.Symbol)) } } - // find token - var token historydb.TokenRead - if genericTx.IsL1 { - tokenID := genericTx.TokenID - found := false - for i := 0; i < len(tokens); i++ { - if tokens[i].TokenID == tokenID { - token = tokens[i] - found = true - break - } - } - if !found { - panic("Token not found") - } - } else { - var id common.TokenID - found := false - for _, acc := range accs { - if acc.Idx == genericTx.FromIdx { - found = true - id = acc.TokenID - break - } - } - if !found { - panic("tokenID not found") - } - found = false - for i := 0; i < len(tokensUSD); i++ { - if tokensUSD[i].TokenID == id { - token = tokensUSD[i] - found = true - break - } - } - if !found { - panic("tokenID not found") - } + } + } + isUsrTx := func(tx historyTxAPI) bool { + for _, idx := range usrIdxs { + if tx.FromIdx != nil && *tx.FromIdx == idx { + return true + } + if tx.ToIdx == idx { + return true + } + } + return false + } + for _, genericTx := range sortedTxs { + l1 := genericTx.L1() + l2 := genericTx.L2() + if l1 != nil { + // Add L1 tx to DB + err = h.AddL1Txs([]common.L1Tx{*l1}) + if err != nil { + panic(err) + } + // L1Tx ==> historyTxAPI + token := getToken(l1.TokenID) + tx := historyTxAPI{ + IsL1: "L1", + TxID: l1.TxID.String(), + Type: l1.Type, + Position: l1.Position, + ToIdx: idxToHez(l1.ToIdx, token.Symbol), + Amount: l1.Amount.String(), + BatchNum: l1.BatchNum, + Timestamp: getTimestamp(l1.EthBlockNum), + L1Info: &l1Info{ + ToForgeL1TxsNum: l1.ToForgeL1TxsNum, + UserOrigin: l1.UserOrigin, + FromEthAddr: ethAddrToHez(l1.FromEthAddr), + FromBJJ: bjjToString(l1.FromBJJ), + LoadAmount: l1.LoadAmount.String(), + EthBlockNum: l1.EthBlockNum, + }, + Token: token, + } + if l1.FromIdx != 0 { + idxStr := idxToHez(l1.FromIdx, token.Symbol) + tx.FromIdx = &idxStr } - var usd, loadUSD, feeUSD *float64 if token.USD != nil { - noDecimalsUSD := *token.USD / math.Pow(10, float64(token.Decimals)) - usd = new(float64) - *usd = noDecimalsUSD * genericTx.AmountFloat - if genericTx.IsL1 { - loadUSD = new(float64) - *loadUSD = noDecimalsUSD * *genericTx.LoadAmountFloat - } else { - feeUSD = new(float64) - *feeUSD = *usd * genericTx.Fee.Percentage() + af := new(big.Float).SetInt(l1.Amount) + amountFloat, _ := af.Float64() + usd := *token.USD * amountFloat / math.Pow(10, float64(token.Decimals)) + tx.HistoricUSD = &usd + laf := new(big.Float).SetInt(l1.LoadAmount) + loadAmountFloat, _ := laf.Float64() + loadUSD := *token.USD * loadAmountFloat / math.Pow(10, float64(token.Decimals)) + tx.L1Info.HistoricLoadAmountUSD = &loadUSD + } + allTxs = append(allTxs, tx) + if isUsrTx(tx) { + usrTxs = append(usrTxs, tx) + } + } else { + // Add L2 tx to DB + err = h.AddL2Txs([]common.L2Tx{*l2}) + if err != nil { + panic(err) + } + // L2Tx ==> historyTxAPI + var tokenID common.TokenID + found := false + for _, acc := range accs { + if acc.Idx == l2.FromIdx { + found = true + tokenID = acc.TokenID + break } } - historyTx := &historydb.HistoryTx{ - IsL1: genericTx.IsL1, - TxID: genericTx.TxID, - Type: genericTx.Type, - Position: genericTx.Position, - ToIdx: genericTx.ToIdx, - Amount: genericTx.Amount, - HistoricUSD: usd, - BatchNum: genericTx.BatchNum, - EthBlockNum: genericTx.EthBlockNum, - ToForgeL1TxsNum: genericTx.ToForgeL1TxsNum, - UserOrigin: genericTx.UserOrigin, - FromBJJ: genericTx.FromBJJ, - LoadAmount: genericTx.LoadAmount, - HistoricLoadAmountUSD: loadUSD, - Fee: genericTx.Fee, - HistoricFeeUSD: feeUSD, - Nonce: genericTx.Nonce, - Timestamp: timestamp, - TokenID: token.TokenID, - TokenEthBlockNum: token.EthBlockNum, - TokenEthAddr: token.EthAddr, - TokenName: token.Name, - TokenSymbol: token.Symbol, - TokenDecimals: token.Decimals, - TokenUSD: token.USD, - TokenUSDUpdate: token.USDUpdate, + if !found { + panic("tokenID not found") + } + token := getToken(tokenID) + tx := historyTxAPI{ + IsL1: "L2", + TxID: l2.TxID.String(), + Type: l2.Type, + Position: l2.Position, + ToIdx: idxToHez(l2.ToIdx, token.Symbol), + Amount: l2.Amount.String(), + BatchNum: &l2.BatchNum, + Timestamp: getTimestamp(l2.EthBlockNum), + L2Info: &l2Info{ + Nonce: l2.Nonce, + Fee: l2.Fee, + }, + Token: token, } - if genericTx.FromIdx != 0 { - historyTx.FromIdx = &genericTx.FromIdx + if l2.FromIdx != 0 { + idxStr := idxToHez(l2.FromIdx, token.Symbol) + tx.FromIdx = &idxStr } - if !bytes.Equal(genericTx.FromEthAddr.Bytes(), common.EmptyAddr.Bytes()) { - historyTx.FromEthAddr = &genericTx.FromEthAddr + if token.USD != nil { + af := new(big.Float).SetInt(l2.Amount) + amountFloat, _ := af.Float64() + usd := *token.USD * amountFloat / math.Pow(10, float64(token.Decimals)) + tx.HistoricUSD = &usd + feeUSD := usd * l2.Fee.Percentage() + tx.HistoricUSD = &usd + tx.L2Info.HistoricFeeUSD = &feeUSD } - historyTxs = append(historyTxs, historyTx) + allTxs = append(allTxs, tx) + if isUsrTx(tx) { + usrTxs = append(usrTxs, tx) + } + } + } + // Transform exits to API + exitsToAPIExits := func(exits []common.ExitInfo, accs []common.Account, tokens []common.Token) []exitAPI { + historyExits := []historydb.HistoryExit{} + for _, exit := range exits { + token := getTokenByIdx(exit.AccountIdx) + historyExits = append(historyExits, historydb.HistoryExit{ + BatchNum: exit.BatchNum, + AccountIdx: exit.AccountIdx, + MerkleProof: exit.MerkleProof, + Balance: exit.Balance, + InstantWithdrawn: exit.InstantWithdrawn, + DelayedWithdrawRequest: exit.DelayedWithdrawRequest, + DelayedWithdrawn: exit.DelayedWithdrawn, + TokenID: token.TokenID, + TokenEthBlockNum: token.EthBlockNum, + TokenEthAddr: token.EthAddr, + TokenName: token.Name, + TokenSymbol: token.Symbol, + TokenDecimals: token.Decimals, + TokenUSD: token.USD, + TokenUSDUpdate: token.USDUpdate, + }) } - return historyTxAPIs(historyTxsToAPI(historyTxs)) - */ - return nil - } - usrTxs := txsToAPITxs(usrL1Txs, usrL2Txs, blocks, tokensUSD) - sort.Sort(usrTxs) - othrTxs := txsToAPITxs(othrL1Txs, othrL2Txs, blocks, tokensUSD) - sort.Sort(othrTxs) - allTxs := append(usrTxs, othrTxs...) - sort.Sort(allTxs) + return historyExitsToAPI(historyExits) + } + apiExits := exitsToAPIExits(exits, accs, tokens) + // sort.Sort(apiExits) + usrExits := []exitAPI{} + for _, exit := range apiExits { + for _, idx := range usrIdxs { + if idx == exit.AccountIdx { + usrExits = append(usrExits, exit) + } + } + } tc = testCommon{ - blocks: blocks, - tokens: tokensUSD, - batches: batches, - usrAddr: "hez:" + usrAddr.String(), - usrBjj: bjjToString(usrBjj), - accs: accs, - usrTxs: usrTxs, - othrTxs: othrTxs, - allTxs: allTxs, - router: router, + blocks: blocks, + tokens: tokensUSD, + batches: batches, + usrAddr: ethAddrToHez(usrAddr), + usrBjj: bjjToString(usrBjj), + accs: accs, + usrTxs: usrTxs, + allTxs: allTxs, + exits: apiExits, + usrExits: usrExits, + router: router, } // Run tests result := m.Run() @@ -348,17 +480,15 @@ func TestMain(m *testing.M) { if err := server.Shutdown(context.Background()); err != nil { panic(err) } - if err := db.Close(); err != nil { + if err := database.Close(); err != nil { panic(err) } os.Exit(result) } func TestGetHistoryTxs(t *testing.T) { - return - //nolint:govet this is a temp patch to avoid running the test endpoint := apiURL + "transactions-history" - fetchedTxs := historyTxAPIs{} + fetchedTxs := []historyTxAPI{} appendIter := func(intr interface{}) { for i := 0; i < len(intr.(*historyTxsAPI).Txs); i++ { tmp, err := copystructure.Copy(intr.(*historyTxsAPI).Txs[i]) @@ -370,41 +500,42 @@ func TestGetHistoryTxs(t *testing.T) { } // Get all (no filters) limit := 8 - path := fmt.Sprintf("%s?limit=%d&offset=", endpoint, limit) - err := doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) + path := fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit) + err := doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) assertHistoryTxAPIs(t, tc.allTxs, fetchedTxs) - // Get by ethAddr - fetchedTxs = historyTxAPIs{} - limit = 7 - path = fmt.Sprintf( - "%s?hermezEthereumAddress=%s&limit=%d&offset=", - endpoint, tc.usrAddr, limit, - ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) - assert.NoError(t, err) - assertHistoryTxAPIs(t, tc.usrTxs, fetchedTxs) - // Get by bjj - fetchedTxs = historyTxAPIs{} - limit = 6 - path = fmt.Sprintf( - "%s?BJJ=%s&limit=%d&offset=", - endpoint, tc.usrBjj, limit, - ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) - assert.NoError(t, err) - assertHistoryTxAPIs(t, tc.usrTxs, fetchedTxs) + // Uncomment once tx generation for tests is fixed + // // Get by ethAddr + // fetchedTxs = []historyTxAPI{} + // limit = 7 + // path = fmt.Sprintf( + // "%s?hermezEthereumAddress=%s&limit=%d&fromItem=", + // endpoint, tc.usrAddr, limit, + // ) + // err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) + // assert.NoError(t, err) + // assertHistoryTxAPIs(t, tc.usrTxs, fetchedTxs) + // // Get by bjj + // fetchedTxs = []historyTxAPI{} + // limit = 6 + // path = fmt.Sprintf( + // "%s?BJJ=%s&limit=%d&fromItem=", + // endpoint, tc.usrBjj, limit, + // ) + // err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) + // assert.NoError(t, err) + // assertHistoryTxAPIs(t, tc.usrTxs, fetchedTxs) // Get by tokenID - fetchedTxs = historyTxAPIs{} + fetchedTxs = []historyTxAPI{} limit = 5 tokenID := tc.allTxs[0].Token.TokenID path = fmt.Sprintf( - "%s?tokenId=%d&limit=%d&offset=", + "%s?tokenId=%d&limit=%d&fromItem=", endpoint, tokenID, limit, ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) + err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) - tokenIDTxs := historyTxAPIs{} + tokenIDTxs := []historyTxAPI{} for i := 0; i < len(tc.allTxs); i++ { if tc.allTxs[i].Token.TokenID == tokenID { tokenIDTxs = append(tokenIDTxs, tc.allTxs[i]) @@ -412,16 +543,16 @@ func TestGetHistoryTxs(t *testing.T) { } assertHistoryTxAPIs(t, tokenIDTxs, fetchedTxs) // idx - fetchedTxs = historyTxAPIs{} + fetchedTxs = []historyTxAPI{} limit = 4 idx := tc.allTxs[0].ToIdx path = fmt.Sprintf( - "%s?accountIndex=%s&limit=%d&offset=", + "%s?accountIndex=%s&limit=%d&fromItem=", endpoint, idx, limit, ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) + err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) - idxTxs := historyTxAPIs{} + idxTxs := []historyTxAPI{} for i := 0; i < len(tc.allTxs); i++ { if (tc.allTxs[i].FromIdx != nil && (*tc.allTxs[i].FromIdx)[6:] == idx[6:]) || tc.allTxs[i].ToIdx[6:] == idx[6:] { @@ -430,16 +561,16 @@ func TestGetHistoryTxs(t *testing.T) { } assertHistoryTxAPIs(t, idxTxs, fetchedTxs) // batchNum - fetchedTxs = historyTxAPIs{} + fetchedTxs = []historyTxAPI{} limit = 3 batchNum := tc.allTxs[0].BatchNum path = fmt.Sprintf( - "%s?batchNum=%d&limit=%d&offset=", + "%s?batchNum=%d&limit=%d&fromItem=", endpoint, *batchNum, limit, ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) + err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) - batchNumTxs := historyTxAPIs{} + batchNumTxs := []historyTxAPI{} for i := 0; i < len(tc.allTxs); i++ { if tc.allTxs[i].BatchNum != nil && *tc.allTxs[i].BatchNum == *batchNum { @@ -449,27 +580,28 @@ func TestGetHistoryTxs(t *testing.T) { assertHistoryTxAPIs(t, batchNumTxs, fetchedTxs) // type txTypes := []common.TxType{ - common.TxTypeExit, - common.TxTypeTransfer, - common.TxTypeDeposit, + // Uncomment once test gen is fixed + // common.TxTypeExit, + // common.TxTypeTransfer, + // common.TxTypeDeposit, common.TxTypeCreateAccountDeposit, - common.TxTypeCreateAccountDepositTransfer, - common.TxTypeDepositTransfer, + // common.TxTypeCreateAccountDepositTransfer, + // common.TxTypeDepositTransfer, common.TxTypeForceTransfer, - common.TxTypeForceExit, - common.TxTypeTransferToEthAddr, - common.TxTypeTransferToBJJ, + // common.TxTypeForceExit, + // common.TxTypeTransferToEthAddr, + // common.TxTypeTransferToBJJ, } for _, txType := range txTypes { - fetchedTxs = historyTxAPIs{} + fetchedTxs = []historyTxAPI{} limit = 2 path = fmt.Sprintf( - "%s?type=%s&limit=%d&offset=", + "%s?type=%s&limit=%d&fromItem=", endpoint, txType, limit, ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) + err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) - txTypeTxs := historyTxAPIs{} + txTypeTxs := []historyTxAPI{} for i := 0; i < len(tc.allTxs); i++ { if tc.allTxs[i].Type == txType { txTypeTxs = append(txTypeTxs, tc.allTxs[i]) @@ -478,15 +610,15 @@ func TestGetHistoryTxs(t *testing.T) { assertHistoryTxAPIs(t, txTypeTxs, fetchedTxs) } // Multiple filters - fetchedTxs = historyTxAPIs{} + fetchedTxs = []historyTxAPI{} limit = 1 path = fmt.Sprintf( - "%s?batchNum=%d&tokeId=%d&limit=%d&offset=", + "%s?batchNum=%d&tokenId=%d&limit=%d&fromItem=", endpoint, *batchNum, tokenID, limit, ) - err = doGoodReqPaginated(path, &historyTxsAPI{}, appendIter) + err = doGoodReqPaginated(path, historydb.OrderAsc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) - mixedTxs := historyTxAPIs{} + mixedTxs := []historyTxAPI{} for i := 0; i < len(tc.allTxs); i++ { if tc.allTxs[i].BatchNum != nil { if *tc.allTxs[i].BatchNum == *batchNum && tc.allTxs[i].Token.TokenID == tokenID { @@ -496,23 +628,16 @@ func TestGetHistoryTxs(t *testing.T) { } assertHistoryTxAPIs(t, mixedTxs, fetchedTxs) // All, in reverse order - fetchedTxs = historyTxAPIs{} + fetchedTxs = []historyTxAPI{} limit = 5 - path = fmt.Sprintf("%s?", endpoint) - appendIterRev := func(intr interface{}) { - tmpAll := historyTxAPIs{} - for i := 0; i < len(intr.(*historyTxsAPI).Txs); i++ { - tmp, err := copystructure.Copy(intr.(*historyTxsAPI).Txs[i]) - if err != nil { - panic(err) - } - tmpAll = append(tmpAll, tmp.(historyTxAPI)) - } - fetchedTxs = append(tmpAll, fetchedTxs...) - } - err = doGoodReqPaginatedReverse(path, &historyTxsAPI{}, appendIterRev, limit) + path = fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit) + err = doGoodReqPaginated(path, historydb.OrderDesc, &historyTxsAPI{}, appendIter) assert.NoError(t, err) - assertHistoryTxAPIs(t, tc.allTxs, fetchedTxs) + flipedTxs := []historyTxAPI{} + for i := 0; i < len(tc.allTxs); i++ { + flipedTxs = append(flipedTxs, tc.allTxs[len(tc.allTxs)-1-i]) + } + assertHistoryTxAPIs(t, flipedTxs, fetchedTxs) // 400 path = fmt.Sprintf( "%s?accountIndex=%s&hermezEthereumAddress=%s", @@ -527,15 +652,33 @@ func TestGetHistoryTxs(t *testing.T) { path = fmt.Sprintf("%s?batchNum=999999", endpoint) err = doBadReq("GET", path, nil, 404) assert.NoError(t, err) - path = fmt.Sprintf("%s?limit=1000&offset=1000", endpoint) + path = fmt.Sprintf("%s?limit=1000&fromItem=999999", endpoint) err = doBadReq("GET", path, nil, 404) assert.NoError(t, err) } -//nolint:govet this is a temp patch to avoid running the test -func assertHistoryTxAPIs(t *testing.T, expected, actual historyTxAPIs) { +func TestGetHistoryTx(t *testing.T) { + // Get all txs by their ID + endpoint := apiURL + "transactions-history/" + fetchedTxs := []historyTxAPI{} + for _, tx := range tc.allTxs { + fetchedTx := historyTxAPI{} + assert.NoError(t, doGoodReq("GET", endpoint+tx.TxID, nil, &fetchedTx)) + fetchedTxs = append(fetchedTxs, fetchedTx) + } + assertHistoryTxAPIs(t, tc.allTxs, fetchedTxs) + // 400 + err := doBadReq("GET", endpoint+"0x001", nil, 400) + assert.NoError(t, err) + // 404 + err = doBadReq("GET", endpoint+"0x00000000000001e240004700", nil, 404) + assert.NoError(t, err) +} + +func assertHistoryTxAPIs(t *testing.T, expected, actual []historyTxAPI) { require.Equal(t, len(expected), len(actual)) for i := 0; i < len(actual); i++ { //nolint len(actual) won't change within the loop + actual[i].ItemID = 0 assert.Equal(t, expected[i].Timestamp.Unix(), actual[i].Timestamp.Unix()) expected[i].Timestamp = actual[i].Timestamp if expected[i].Token.USDUpdate == nil { @@ -554,71 +697,219 @@ func assertHistoryTxAPIs(t *testing.T, expected, actual historyTxAPIs) { } } -//nolint:govet this is a temp patch to avoid running the test -func doGoodReqPaginated( - path string, - iterStruct paginationer, - appendIter func(res interface{}), -) error { - next := 0 - for { - // Call API to get this iteration items - if err := doGoodReq("GET", path+strconv.Itoa(next), nil, iterStruct); err != nil { - return err +func TestGetExits(t *testing.T) { + endpoint := apiURL + "exits" + fetchedExits := []exitAPI{} + appendIter := func(intr interface{}) { + for i := 0; i < len(intr.(*exitsAPI).Exits); i++ { + tmp, err := copystructure.Copy(intr.(*exitsAPI).Exits[i]) + if err != nil { + panic(err) + } + fetchedExits = append(fetchedExits, tmp.(exitAPI)) } - appendIter(iterStruct) - // Keep iterating? - pag := iterStruct.GetPagination() - if pag.LastReturnedItem == pag.TotalItems-1 { // No - break - } else { // Yes - next = int(pag.LastReturnedItem + 1) + } + // Get all (no filters) + limit := 8 + path := fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit) + err := doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + assertExitAPIs(t, tc.exits, fetchedExits) + + // Get by ethAddr + fetchedExits = []exitAPI{} + limit = 7 + path = fmt.Sprintf( + "%s?hermezEthereumAddress=%s&limit=%d&fromItem=", + endpoint, tc.usrAddr, limit, + ) + err = doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + assertExitAPIs(t, tc.usrExits, fetchedExits) + // Get by bjj + fetchedExits = []exitAPI{} + limit = 6 + path = fmt.Sprintf( + "%s?BJJ=%s&limit=%d&fromItem=", + endpoint, tc.usrBjj, limit, + ) + err = doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + assertExitAPIs(t, tc.usrExits, fetchedExits) + // Get by tokenID + fetchedExits = []exitAPI{} + limit = 5 + tokenID := tc.exits[0].Token.TokenID + path = fmt.Sprintf( + "%s?tokenId=%d&limit=%d&fromItem=", + endpoint, tokenID, limit, + ) + err = doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + tokenIDExits := []exitAPI{} + for i := 0; i < len(tc.exits); i++ { + if tc.exits[i].Token.TokenID == tokenID { + tokenIDExits = append(tokenIDExits, tc.exits[i]) } } - return nil + assertExitAPIs(t, tokenIDExits, fetchedExits) + // idx + fetchedExits = []exitAPI{} + limit = 4 + idx := tc.exits[0].AccountIdx + path = fmt.Sprintf( + "%s?accountIndex=%s&limit=%d&fromItem=", + endpoint, idx, limit, + ) + err = doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + idxExits := []exitAPI{} + for i := 0; i < len(tc.exits); i++ { + if tc.exits[i].AccountIdx[6:] == idx[6:] { + idxExits = append(idxExits, tc.exits[i]) + } + } + assertExitAPIs(t, idxExits, fetchedExits) + // batchNum + fetchedExits = []exitAPI{} + limit = 3 + batchNum := tc.exits[0].BatchNum + path = fmt.Sprintf( + "%s?batchNum=%d&limit=%d&fromItem=", + endpoint, batchNum, limit, + ) + err = doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + batchNumExits := []exitAPI{} + for i := 0; i < len(tc.exits); i++ { + if tc.exits[i].BatchNum == batchNum { + batchNumExits = append(batchNumExits, tc.exits[i]) + } + } + assertExitAPIs(t, batchNumExits, fetchedExits) + // Multiple filters + fetchedExits = []exitAPI{} + limit = 1 + path = fmt.Sprintf( + "%s?batchNum=%d&tokeId=%d&limit=%d&fromItem=", + endpoint, batchNum, tokenID, limit, + ) + err = doGoodReqPaginated(path, historydb.OrderAsc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + mixedExits := []exitAPI{} + flipedExits := []exitAPI{} + for i := 0; i < len(tc.exits); i++ { + if tc.exits[i].BatchNum == batchNum && tc.exits[i].Token.TokenID == tokenID { + mixedExits = append(mixedExits, tc.exits[i]) + } + flipedExits = append(flipedExits, tc.exits[len(tc.exits)-1-i]) + } + assertExitAPIs(t, mixedExits, fetchedExits) + // All, in reverse order + fetchedExits = []exitAPI{} + limit = 5 + path = fmt.Sprintf("%s?limit=%d&fromItem=", endpoint, limit) + err = doGoodReqPaginated(path, historydb.OrderDesc, &exitsAPI{}, appendIter) + assert.NoError(t, err) + assertExitAPIs(t, flipedExits, fetchedExits) + // 400 + path = fmt.Sprintf( + "%s?accountIndex=%s&hermezEthereumAddress=%s", + endpoint, idx, tc.usrAddr, + ) + err = doBadReq("GET", path, nil, 400) + assert.NoError(t, err) + path = fmt.Sprintf("%s?tokenId=X", endpoint) + err = doBadReq("GET", path, nil, 400) + assert.NoError(t, err) + // 404 + path = fmt.Sprintf("%s?batchNum=999999", endpoint) + err = doBadReq("GET", path, nil, 404) + assert.NoError(t, err) + path = fmt.Sprintf("%s?limit=1000&fromItem=1000", endpoint) + err = doBadReq("GET", path, nil, 404) + assert.NoError(t, err) +} + +func TestGetExit(t *testing.T) { + // Get all txs by their ID + endpoint := apiURL + "exits/" + fetchedExits := []exitAPI{} + for _, exit := range tc.exits { + fetchedExit := exitAPI{} + assert.NoError( + t, doGoodReq( + "GET", + fmt.Sprintf("%s%d/%s", endpoint, exit.BatchNum, exit.AccountIdx), + nil, &fetchedExit, + ), + ) + fetchedExits = append(fetchedExits, fetchedExit) + } + assertExitAPIs(t, tc.exits, fetchedExits) + // 400 + err := doBadReq("GET", endpoint+"1/haz:BOOM:1", nil, 400) + assert.NoError(t, err) + err = doBadReq("GET", endpoint+"-1/hez:BOOM:1", nil, 400) + assert.NoError(t, err) + // 404 + err = doBadReq("GET", endpoint+"494/hez:XXX:1", nil, 404) + assert.NoError(t, err) } -//nolint:govet this is a temp patch to avoid running the test -func doGoodReqPaginatedReverse( - path string, - iterStruct paginationer, +func assertExitAPIs(t *testing.T, expected, actual []exitAPI) { + require.Equal(t, len(expected), len(actual)) + for i := 0; i < len(actual); i++ { //nolint len(actual) won't change within the loop + actual[i].ItemID = 0 + if expected[i].Token.USDUpdate == nil { + assert.Equal(t, expected[i].Token.USDUpdate, actual[i].Token.USDUpdate) + } else { + assert.Equal(t, expected[i].Token.USDUpdate.Unix(), actual[i].Token.USDUpdate.Unix()) + expected[i].Token.USDUpdate = actual[i].Token.USDUpdate + } + assert.Equal(t, expected[i], actual[i]) + } +} + +func doGoodReqPaginated( + path, order string, + iterStruct db.Paginationer, appendIter func(res interface{}), - limit int, ) error { next := 0 - first := true for { // Call API to get this iteration items - if first { - first = false - pagQuery := fmt.Sprintf("last=true&limit=%d", limit) - if err := doGoodReq("GET", path+pagQuery, nil, iterStruct); err != nil { - return err - } + iterPath := path + if next == 0 && order == historydb.OrderDesc { + // Fetch first item in reverse order + iterPath += "99999" } else { - pagQuery := fmt.Sprintf("offset=%d&limit=%d", next, limit) - if err := doGoodReq("GET", path+pagQuery, nil, iterStruct); err != nil { - return err - } + // Fetch from next item or 0 if it's ascending order + iterPath += strconv.Itoa(next) + } + if err := doGoodReq("GET", iterPath+"&order="+order, nil, iterStruct); err != nil { + return err } appendIter(iterStruct) // Keep iterating? pag := iterStruct.GetPagination() - if iterStruct.Len() == pag.TotalItems || pag.LastReturnedItem-iterStruct.Len() == -1 { // No - break - } else { // Yes - prevOffset := next - next = pag.LastReturnedItem - iterStruct.Len() - limit + 1 - if next < 0 { - next = 0 - limit = prevOffset + if order == historydb.OrderAsc { + if pag.LastReturnedItem == pag.LastItem { // No + break + } else { // Yes + next = pag.LastReturnedItem + 1 + } + } else { + if pag.FirstReturnedItem == pag.FirstItem { // No + break + } else { // Yes + next = pag.FirstReturnedItem - 1 } } } return nil } -//nolint:govet this is a temp patch to avoid running the test func doGoodReq(method, path string, reqBody io.Reader, returnStruct interface{}) error { ctx := context.Background() client := &http.Client{} @@ -667,7 +958,6 @@ func doGoodReq(method, path string, reqBody io.Reader, returnStruct interface{}) return swagger.ValidateResponse(ctx, responseValidationInput) } -//nolint:govet this is a temp patch to avoid running the test func doBadReq(method, path string, reqBody io.Reader, expectedResponseCode int) error { ctx := context.Background() client := &http.Client{} diff --git a/api/dbtoapistructs.go b/api/dbtoapistructs.go index 97f6989..e175604 100644 --- a/api/dbtoapistructs.go +++ b/api/dbtoapistructs.go @@ -7,36 +7,52 @@ import ( ethCommon "github.com/ethereum/go-ethereum/common" "github.com/hermeznetwork/hermez-node/common" + "github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/iden3/go-iden3-crypto/babyjub" + "github.com/iden3/go-merkletree" ) -// Commons of the API +type errorMsg struct { + Message string +} -type pagination struct { - TotalItems int `json:"totalItems"` - LastReturnedItem int `json:"lastReturnedItem"` +func bjjToString(bjj *babyjub.PublicKey) string { + pkComp := [32]byte(bjj.Compress()) + sum := pkComp[0] + for i := 1; i < len(pkComp); i++ { + sum += pkComp[i] + } + bjjSum := append(pkComp[:], sum) + return "hez:" + base64.RawURLEncoding.EncodeToString(bjjSum) } -//nolint:govet this is a temp patch to avoid running the test -type paginationer interface { - GetPagination() pagination - Len() int +func ethAddrToHez(addr ethCommon.Address) string { + return "hez:" + addr.String() } -type errorMsg struct { - Message string +func idxToHez(idx common.Idx, tokenSymbol string) string { + return "hez:" + tokenSymbol + ":" + strconv.Itoa(int(idx)) } -// History Tx related +// History Tx type historyTxsAPI struct { Txs []historyTxAPI `json:"transactions"` - Pagination pagination `json:"pagination"` + Pagination *db.Pagination `json:"pagination"` } -func (htx *historyTxsAPI) GetPagination() pagination { return htx.Pagination } -func (htx *historyTxsAPI) Len() int { return len(htx.Txs) } +func (htx *historyTxsAPI) GetPagination() *db.Pagination { + if htx.Txs[0].ItemID < htx.Txs[len(htx.Txs)-1].ItemID { + htx.Pagination.FirstReturnedItem = htx.Txs[0].ItemID + htx.Pagination.LastReturnedItem = htx.Txs[len(htx.Txs)-1].ItemID + } else { + htx.Pagination.LastReturnedItem = htx.Txs[0].ItemID + htx.Pagination.FirstReturnedItem = htx.Txs[len(htx.Txs)-1].ItemID + } + return htx.Pagination +} +func (htx *historyTxsAPI) Len() int { return len(htx.Txs) } type l1Info struct { ToForgeL1TxsNum *int64 `json:"toForgeL1TransactionsNum"` @@ -57,6 +73,7 @@ type l2Info struct { type historyTxAPI struct { IsL1 string `json:"L1orL2"` TxID string `json:"id"` + ItemID int `json:"itemId"` Type common.TxType `json:"type"` Position int `json:"position"` FromIdx *string `json:"fromAccountIndex"` @@ -75,6 +92,7 @@ func historyTxsToAPI(dbTxs []historydb.HistoryTx) []historyTxAPI { for i := 0; i < len(dbTxs); i++ { apiTx := historyTxAPI{ TxID: dbTxs[i].TxID.String(), + ItemID: dbTxs[i].ItemID, Type: dbTxs[i].Type, Position: dbTxs[i].Position, ToIdx: idxToHez(dbTxs[i].ToIdx, dbTxs[i].TokenSymbol), @@ -124,20 +142,60 @@ func historyTxsToAPI(dbTxs []historydb.HistoryTx) []historyTxAPI { return apiTxs } -func bjjToString(bjj *babyjub.PublicKey) string { - pkComp := [32]byte(bjj.Compress()) - sum := pkComp[0] - for i := 1; i < len(pkComp); i++ { - sum += pkComp[i] - } - bjjSum := append(pkComp[:], sum) - return "hez:" + base64.RawURLEncoding.EncodeToString(bjjSum) +// Exit + +type exitsAPI struct { + Exits []exitAPI `json:"exits"` + Pagination *db.Pagination `json:"pagination"` } -func ethAddrToHez(addr ethCommon.Address) string { - return "hez:" + addr.String() +func (e *exitsAPI) GetPagination() *db.Pagination { + if e.Exits[0].ItemID < e.Exits[len(e.Exits)-1].ItemID { + e.Pagination.FirstReturnedItem = e.Exits[0].ItemID + e.Pagination.LastReturnedItem = e.Exits[len(e.Exits)-1].ItemID + } else { + e.Pagination.LastReturnedItem = e.Exits[0].ItemID + e.Pagination.FirstReturnedItem = e.Exits[len(e.Exits)-1].ItemID + } + return e.Pagination +} +func (e *exitsAPI) Len() int { return len(e.Exits) } + +type exitAPI struct { + ItemID int `json:"itemId"` + BatchNum common.BatchNum `json:"batchNum"` + AccountIdx string `json:"accountIndex"` + MerkleProof *merkletree.CircomVerifierProof `json:"merkleProof"` + Balance string `json:"balance"` + InstantWithdrawn *int64 `json:"instantWithdrawn"` + DelayedWithdrawRequest *int64 `json:"delayedWithdrawRequest"` + DelayedWithdrawn *int64 `json:"delayedWithdrawn"` + Token historydb.TokenRead `json:"token"` } -func idxToHez(idx common.Idx, tokenSymbol string) string { - return "hez:" + tokenSymbol + ":" + strconv.Itoa(int(idx)) +func historyExitsToAPI(dbExits []historydb.HistoryExit) []exitAPI { + apiExits := []exitAPI{} + for i := 0; i < len(dbExits); i++ { + apiExits = append(apiExits, exitAPI{ + ItemID: dbExits[i].ItemID, + BatchNum: dbExits[i].BatchNum, + AccountIdx: idxToHez(dbExits[i].AccountIdx, dbExits[i].TokenSymbol), + MerkleProof: dbExits[i].MerkleProof, + Balance: dbExits[i].Balance.String(), + InstantWithdrawn: dbExits[i].InstantWithdrawn, + DelayedWithdrawRequest: dbExits[i].DelayedWithdrawRequest, + DelayedWithdrawn: dbExits[i].DelayedWithdrawn, + Token: historydb.TokenRead{ + TokenID: dbExits[i].TokenID, + EthBlockNum: dbExits[i].TokenEthBlockNum, + EthAddr: dbExits[i].TokenEthAddr, + Name: dbExits[i].TokenName, + Symbol: dbExits[i].TokenSymbol, + Decimals: dbExits[i].TokenDecimals, + USD: dbExits[i].TokenUSD, + USDUpdate: dbExits[i].TokenUSDUpdate, + }, + }) + } + return apiExits } diff --git a/api/handlers.go b/api/handlers.go index 6b500dc..72fb44e 100644 --- a/api/handlers.go +++ b/api/handlers.go @@ -2,23 +2,25 @@ package api import ( "database/sql" - "errors" "net/http" "github.com/gin-gonic/gin" + "github.com/hermeznetwork/hermez-node/db/historydb" ) -// maxLimit is the max permited items to be returned in paginated responses -const maxLimit uint = 2049 +const ( + // maxLimit is the max permited items to be returned in paginated responses + maxLimit uint = 2049 -// dfltLast indicates how paginated endpoints use the query param last if not provided -const dfltLast = false + // dfltOrder indicates how paginated endpoints are ordered if not specified + dfltOrder = historydb.OrderAsc -// dfltLimit indicates the limit of returned items in paginated responses if the query param limit is not provided -const dfltLimit uint = 20 + // dfltLimit indicates the limit of returned items in paginated responses if the query param limit is not provided + dfltLimit uint = 20 -// 2^32 -1 -const maxUint32 = 4294967295 + // 2^32 -1 + maxUint32 = 4294967295 +) func postAccountCreationAuth(c *gin.Context) { @@ -45,45 +47,71 @@ func getAccount(c *gin.Context) { } func getExits(c *gin.Context) { - -} - -func getExit(c *gin.Context) { - -} - -func getHistoryTxs(c *gin.Context) { // Get query parameters - // TokenID - tokenID, err := parseQueryUint("tokenId", nil, 0, maxUint32, c) + // Account filters + tokenID, addr, bjj, idx, err := parseAccountFilters(c) if err != nil { retBadReq(err, c) return } - // Hez Eth addr - addr, err := parseQueryHezEthAddr(c) + // BatchNum + batchNum, err := parseQueryUint("batchNum", nil, 0, maxUint32, c) if err != nil { retBadReq(err, c) return } - // BJJ - bjj, err := parseQueryBJJ(c) + // Pagination + fromItem, order, limit, err := parsePagination(c) if err != nil { retBadReq(err, c) return } - if addr != nil && bjj != nil { - retBadReq(errors.New("bjj and hermezEthereumAddress params are incompatible"), c) + + // Fetch exits from historyDB + exits, pagination, err := h.GetExits( + addr, bjj, tokenID, idx, batchNum, fromItem, limit, order, + ) + if err != nil { + retSQLErr(err, c) + return + } + + // Build succesfull response + apiExits := historyExitsToAPI(exits) + c.JSON(http.StatusOK, &exitsAPI{ + Exits: apiExits, + Pagination: pagination, + }) +} + +func getExit(c *gin.Context) { + // Get batchNum and accountIndex + batchNum, err := parseParamUint("batchNum", nil, 0, maxUint32, c) + if err != nil { + retBadReq(err, c) return } - // Idx - idx, err := parseIdx(c) + idx, err := parseParamIdx(c) if err != nil { retBadReq(err, c) return } - if idx != nil && (addr != nil || bjj != nil || tokenID != nil) { - retBadReq(errors.New("accountIndex is incompatible with BJJ, hermezEthereumAddress and tokenId"), c) + // Fetch tx from historyDB + exit, err := h.GetExit(batchNum, idx) + if err != nil { + retSQLErr(err, c) + return + } + apiExits := historyExitsToAPI([]historydb.HistoryExit{*exit}) + // Build succesfull response + c.JSON(http.StatusOK, apiExits[0]) +} + +func getHistoryTxs(c *gin.Context) { + // Get query parameters + tokenID, addr, bjj, idx, err := parseAccountFilters(c) + if err != nil { + retBadReq(err, c) return } // BatchNum @@ -99,15 +127,15 @@ func getHistoryTxs(c *gin.Context) { return } // Pagination - offset, last, limit, err := parsePagination(c) + fromItem, order, limit, err := parsePagination(c) if err != nil { retBadReq(err, c) return } // Fetch txs from historyDB - txs, totalItems, err := h.GetHistoryTxs( - addr, bjj, tokenID, idx, batchNum, txType, offset, limit, *last, + txs, pagination, err := h.GetHistoryTxs( + addr, bjj, tokenID, idx, batchNum, txType, fromItem, limit, order, ) if err != nil { retSQLErr(err, c) @@ -116,21 +144,28 @@ func getHistoryTxs(c *gin.Context) { // Build succesfull response apiTxs := historyTxsToAPI(txs) - lastRet := int(*offset) + len(apiTxs) - 1 - if *last { - lastRet = totalItems - 1 - } c.JSON(http.StatusOK, &historyTxsAPI{ - Txs: apiTxs, - Pagination: pagination{ - TotalItems: totalItems, - LastReturnedItem: lastRet, - }, + Txs: apiTxs, + Pagination: pagination, }) } func getHistoryTx(c *gin.Context) { - + // Get TxID + txID, err := parseParamTxID(c) + if err != nil { + retBadReq(err, c) + return + } + // Fetch tx from historyDB + tx, err := h.GetHistoryTx(txID) + if err != nil { + retSQLErr(err, c) + return + } + apiTxs := historyTxsToAPI([]historydb.HistoryTx{*tx}) + // Build succesfull response + c.JSON(http.StatusOK, apiTxs[0]) } func getBatches(c *gin.Context) { diff --git a/api/parsers.go b/api/parsers.go index 5d93320..6b56808 100644 --- a/api/parsers.go +++ b/api/parsers.go @@ -9,56 +9,49 @@ import ( ethCommon "github.com/ethereum/go-ethereum/common" "github.com/hermeznetwork/hermez-node/common" + "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/iden3/go-iden3-crypto/babyjub" ) +// Query parsers + type querier interface { Query(string) string } -func parsePagination(c querier) (*uint, *bool, *uint, error) { - // Offset - offset := new(uint) - *offset = 0 - offset, err := parseQueryUint("offset", offset, 0, maxUint32, c) - if err != nil { - return nil, nil, nil, err - } - // Last - last := new(bool) - *last = dfltLast - last, err = parseQueryBool("last", last, c) +func parsePagination(c querier) (fromItem *uint, order string, limit *uint, err error) { + // FromItem + fromItem, err = parseQueryUint("fromItem", nil, 0, maxUint32, c) if err != nil { - return nil, nil, nil, err + return nil, "", nil, err } - if *last && (offset != nil && *offset > 0) { - return nil, nil, nil, errors.New( - "last and offset are incompatible, provide only one of them", + // Order + order = dfltOrder + const orderName = "order" + orderStr := c.Query(orderName) + if orderStr != "" && !(orderStr == historydb.OrderAsc || historydb.OrderDesc == orderStr) { + return nil, "", nil, errors.New( + "order must have the value " + historydb.OrderAsc + " or " + historydb.OrderDesc, ) } + if orderStr == historydb.OrderAsc { + order = historydb.OrderAsc + } else if orderStr == historydb.OrderDesc { + order = historydb.OrderDesc + } // Limit - limit := new(uint) + limit = new(uint) *limit = dfltLimit limit, err = parseQueryUint("limit", limit, 1, maxLimit, c) if err != nil { - return nil, nil, nil, err + return nil, "", nil, err } - return offset, last, limit, nil + return fromItem, order, limit, nil } func parseQueryUint(name string, dflt *uint, min, max uint, c querier) (*uint, error) { //nolint:SA4009 res may be not overwriten str := c.Query(name) - if str != "" { - resInt, err := strconv.Atoi(str) - if err != nil || resInt < 0 || resInt < int(min) || resInt > int(max) { - return nil, fmt.Errorf( - "Inavlid %s. Must be an integer within the range [%d, %d]", - name, min, max) - } - res := uint(resInt) - return &res, nil - } - return dflt, nil + return stringToUint(str, name, dflt, min, max) } func parseQueryBool(name string, dflt *bool, c querier) (*bool, error) { //nolint:SA4009 res may be not overwriten @@ -183,19 +176,105 @@ func parseQueryTxType(c querier) (*common.TxType, error) { ) } -func parseIdx(c querier) (*uint, error) { +func parseIdx(c querier) (*common.Idx, error) { const name = "accountIndex" - addrStr := c.Query(name) - if addrStr == "" { + idxStr := c.Query(name) + return stringToIdx(idxStr, name) +} + +func parseAccountFilters(c querier) (*common.TokenID, *ethCommon.Address, *babyjub.PublicKey, *common.Idx, error) { + // TokenID + tid, err := parseQueryUint("tokenId", nil, 0, maxUint32, c) + if err != nil { + return nil, nil, nil, nil, err + } + var tokenID *common.TokenID + if tid != nil { + tokenID = new(common.TokenID) + *tokenID = common.TokenID(*tid) + } + // Hez Eth addr + addr, err := parseQueryHezEthAddr(c) + if err != nil { + return nil, nil, nil, nil, err + } + // BJJ + bjj, err := parseQueryBJJ(c) + if err != nil { + return nil, nil, nil, nil, err + } + if addr != nil && bjj != nil { + return nil, nil, nil, nil, + errors.New("bjj and hermezEthereumAddress params are incompatible") + } + // Idx + idx, err := parseIdx(c) + if err != nil { + return nil, nil, nil, nil, err + } + if idx != nil && (addr != nil || bjj != nil || tokenID != nil) { + return nil, nil, nil, nil, + errors.New("accountIndex is incompatible with BJJ, hermezEthereumAddress and tokenId") + } + return tokenID, addr, bjj, idx, nil +} + +// Param parsers + +type paramer interface { + Param(string) string +} + +func parseParamTxID(c paramer) (common.TxID, error) { + const name = "id" + txIDStr := c.Param(name) + if txIDStr == "" { + return common.TxID{}, fmt.Errorf("%s is required", name) + } + txID, err := common.NewTxIDFromString(txIDStr) + if err != nil { + return common.TxID{}, fmt.Errorf("invalid %s", name) + } + return txID, nil +} + +func parseParamIdx(c paramer) (*common.Idx, error) { + const name = "accountIndex" + idxStr := c.Param(name) + return stringToIdx(idxStr, name) +} + +func parseParamUint(name string, dflt *uint, min, max uint, c paramer) (*uint, error) { //nolint:SA4009 res may be not overwriten + str := c.Param(name) + return stringToUint(str, name, dflt, min, max) +} + +func stringToIdx(idxStr, name string) (*common.Idx, error) { + if idxStr == "" { return nil, nil } - splitted := strings.Split(addrStr, ":") + splitted := strings.Split(idxStr, ":") const expectedLen = 3 - if len(splitted) != expectedLen { + if len(splitted) != expectedLen || splitted[0] != "hez" { return nil, fmt.Errorf( "invalid %s, must follow this: hez::index", name) } + // TODO: check that the tokenSymbol match the token related to the account index idxInt, err := strconv.Atoi(splitted[2]) - idx := uint(idxInt) + idx := common.Idx(idxInt) return &idx, err } + +func stringToUint(uintStr, name string, dflt *uint, min, max uint) (*uint, error) { + if uintStr != "" { + resInt, err := strconv.Atoi(uintStr) + if err != nil || resInt < 0 || resInt < int(min) || resInt > int(max) { + return nil, fmt.Errorf( + "Inavlid %s. Must be an integer within the range [%d, %d]", + name, min, max) + } + res := uint(resInt) + return &res, nil + } + return dflt, nil +} diff --git a/api/parsers_test.go b/api/parsers_test.go index 779904d..3824c03 100644 --- a/api/parsers_test.go +++ b/api/parsers_test.go @@ -111,52 +111,46 @@ func TestParseQueryBool(t *testing.T) { func TestParsePagination(t *testing.T) { c := &queryParser{} c.m = make(map[string]string) - // Offset out of range - c.m["offset"] = "-1" + // fromItem out of range + c.m["fromItem"] = "-1" _, _, _, err := parsePagination(c) assert.Error(t, err) - c.m["offset"] = strconv.Itoa(maxUint32 + 1) + c.m["fromItem"] = strconv.Itoa(maxUint32 + 1) _, _, _, err = parsePagination(c) assert.Error(t, err) - c.m["offset"] = "" - // Limit out of range - c.m["limit"] = "0" + c.m["fromItem"] = "" + // Bad order + c.m["order"] = "0" _, _, _, err = parsePagination(c) assert.Error(t, err) - c.m["limit"] = strconv.Itoa(int(maxLimit) + 1) - _, _, _, err = parsePagination(c) - assert.Error(t, err) - c.m["limit"] = "" - // Last and offset - c.m["offset"] = "1" - c.m["last"] = "true" + c.m["order"] = strconv.Itoa(int(maxLimit) + 1) _, _, _, err = parsePagination(c) assert.Error(t, err) // Default - c.m["offset"] = "" - c.m["last"] = "" + c.m["fromItem"] = "" + c.m["order"] = "" c.m["limit"] = "" - offset, last, limit, err := parsePagination(c) + fromItem, order, limit, err := parsePagination(c) assert.NoError(t, err) - assert.Equal(t, 0, int(*offset)) - assert.Equal(t, dfltLast, *last) + assert.Nil(t, fromItem) + assert.Equal(t, dfltOrder, order) assert.Equal(t, dfltLimit, *limit) // Correct - c.m["offset"] = "" - c.m["last"] = "true" + c.m["fromItem"] = "" + c.m["order"] = "ASC" c.m["limit"] = "25" - offset, last, limit, err = parsePagination(c) + fromItem, order, limit, err = parsePagination(c) assert.NoError(t, err) - assert.Equal(t, 0, int(*offset)) - assert.True(t, *last) + assert.Nil(t, fromItem) + assert.Equal(t, "ASC", order) assert.Equal(t, 25, int(*limit)) - c.m["offset"] = "25" - c.m["last"] = "false" + c.m["fromItem"] = "25" + c.m["order"] = "DESC" c.m["limit"] = "50" - offset, last, limit, err = parsePagination(c) + fromItem, order, limit, err = parsePagination(c) assert.NoError(t, err) - assert.Equal(t, 25, int(*offset)) - assert.False(t, *last) + assert.Equal(t, 25, int(*fromItem)) + assert.Equal(t, "DESC", order) assert.Equal(t, 50, int(*limit)) } diff --git a/api/swagger.yml b/api/swagger.yml index e0504bf..b709c8e 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -7,31 +7,27 @@ info: * Explorer: List transactions, slots, batches, ... * Exchange integrations + ### Pagination + + #### Usage + All the endpoints that return a list of undefined size use pagination. Unless the opposite is explicitly said. - All the retunred items are ordered by ascending chronological order. - This may not be trivial to deduce as the atributes used to order are not timestamps but the protocol ensures that those atributes follow the mentioned chronological order. - Each endpoint description clarify this in the `offset` description. + In order to use pagination, three query parameters are used: + * `fromItem`: indicates the first item to be returned. In general, this parameter shouldn't be provided in the first call to the endpoint, and use the `itemId` of the last returned item (+/-) 1, if the order is (ascending/descending). + * `order`: all pginated items are ordered chronologicaly. However the specific fields to guarantee this order depend on each endpoint. For this purpose, `itemId` is used (itemId follows ascending chronological order except for unforged L1 user transactions). If the parameter is not provided, ascending order will be used by default. + * `limit`: maximum amount of items to include in each response. Default is 20, maximum 2049. + + Responses for those endpoint will always include a `pagination` object. This object includes the total amount of items that the endpoint will return at a given time with the given filters. Apart from that, it also includes the `itemId` of the last and first items that will be returned (not in a single response but within the total items). These two properties can be used to know when to stop querying. - The response of the calls to these endpoints will always include a `pagination` object that includes `totalItems` and `lastReturnedItem`. - To iterate over the items the following query parameters are used: - - `offset`: Indicates the first item that will be returned. Defaul 0. Incompatible with `last`. - - `limit`: Indicates the maximum number of returned items. Default 20. Maximum 2049. - - `last`: When true the last `limit` items are returned. Default false. Incompatible with `offset`. + #### Reorgs and safetyness - Iterate items in ascending chronological order: + Since all the items are ordered chronologicaly, there are no safety problems when fetching items in ascending order, except for reorgs (more on this later). + On the other hand, when iterating in descending order, new items will be added at the beginning. This doesn't cause any safety problem, but to get those new items, it's necessary to start queryng without the `fromItem` set to `pagination.lastItem`. + To handle reorgs, the `itemId` can be used since it will change. This is important since other identifiers may be the same but with different content. As an example, if the batch 424 get's reorged, it will be deleted, but eventualy, a new batch 424 will appear with potentialy different content. - 1. Call the endpoint with no `offset` nor `last`. - 2. Call the endpoint with `offset=` until `lastReturnedItem == totalItems - 1`. + ### Signatures - Iterate items in descending chronological order: - - 1. Call the endpoint with `last`. - 2. Call the endpoint with `offset=`. Once the `calculated offset == 0`, it will be known that that call will return the first item and therefore no subsequent calls need to be done. - If the `totalItems` change while iterating, it means that new items have been added at the end of the list. To fetch this items, use the following: `offset=`, and from there iterate as decribed in *Iterate items in ascending chronological order*. - - **Note:** The returned list will alway be in ascending chronlogical order, so the returned arrays must be iterated from end to start in order to achieve reverse chronological order. - - **Note:** Pagination safety can be affected by Ethereum reorgs. In most of the cases this means that the last page can be changed, but older items should be safe. + The POST endpoint require to be signed using BabyJubJub or Ethereum keys. TODO: add references to libraries / examples / ... version: "0.0.1" title: Hermez Network API @@ -164,20 +160,22 @@ paths: type: string description: Comma separated list of token identifiers. example: "3,87,91" - - name: offset + - name: fromItem in: query required: false - description: | - - Order: accounts will be ordered by increasing account index. - - Default first item: the first account to be returned will be the one that has the smallest account index. + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Accounts will be ordered by increasing account index. schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -258,6 +256,12 @@ paths: description: Get exit information. This information is required to perform a withdraw. operationId: getExits parameters: + - name: tokenId + in: query + required: false + description: Only get exits of specific token + schema: + $ref: '#/components/schemas/TokenId' - name: hezEthereumAddress in: query description: Get exits associated to a Ethereum address. Incompatible with query `BJJ` and `accountIndex`. @@ -272,7 +276,7 @@ paths: $ref: '#/components/schemas/BJJ' - name: accountIndex in: query - description: Get exits associated to a specific account. Incompatible with queries `hezEthereumAddress` and `BJJ`. + description: Get exits associated to a specific account. Incompatible with queries `tokenId`, `hezEthereumAddress` and `BJJ`. required: false schema: $ref: '#/components/schemas/AccountIndex' @@ -282,20 +286,22 @@ paths: required: false schema: $ref: '#/components/schemas/BatchNum' - - name: offset + - name: fromItem in: query required: false - description: | - - Order: exits will be ordered by increasing (batchNum, accountIndex). - - Default first item: the first exit to be returned will be the one that has the smallest (baychNum, accountIndex). + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Exits will be ordered by increasing (batchNum, accountIndex). schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -459,7 +465,7 @@ paths: description: >- Get historical transactions. This endpoint will return all the different types of transactions except for: - Transactions that are still in the transaction pool of any coordinator. These transactions can be fetched using `GET /transactions-pool/{id}`. - - L1 transactions that have not been forged yet. These transactions can be fetched using `GET /transactions-history/{id}`. + - L1 transactions sent by users that have not been forged yet. These transactions can be fetched using `GET /transactions-history/{id}`. operationId: getHistoryTxs parameters: - name: tokenId @@ -498,20 +504,22 @@ paths: description: Only get transactions of a specific type. schema: $ref: '#/components/schemas/TransactionType' - - name: offset + - name: fromItem in: query required: false - description: | - - Order: History transactions will be ordered by increasing (batchNum, position). - - Default first item: the first transaction to be returned will be the one that has the smallest (batchNum, position). + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. History transactions will be ordered by (batchNum, position). schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -612,20 +620,22 @@ paths: description: Include only batches forged by `forgerAddr` schema: $ref: '#/components/schemas/EthereumAddress' - - name: offset + - name: fromItem in: query required: false - description: | - - Order: batches will be ordered by increasing `batchNum`. - - Default first item: the first batch to be returned will be the one that has the smallest `batchNum`. + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Batches will be ordered by increasing `batchNum`. schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -773,20 +783,22 @@ paths: description: If set to true, only include slots whose auction has finished. schema: type: boolean - - name: offset + - name: fromItem in: query required: false - description: | - - Order: slots will be ordered by increasing `slotNum`. - - Default first item: the first slot to be returned will be the one that has the smallest `slotNum`. + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Slots will be ordered by increasing `slotNum`. schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -880,20 +892,22 @@ paths: required: false schema: $ref: '#/components/schemas/EthereumAddress' - - name: offset + - name: fromItem in: query required: false - description: | - - Order: bids will be ordered by increasing (slotNum, bidValue)`. - - Default first item: the first bid to be returned will be the one that has the smallest (slotNum, bidValue). + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Bids will be ordered by increasing (slotNum, bidValue)`. schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -1031,20 +1045,22 @@ paths: description: Include token(s) by their names (or a substring of the name). schema: type: string - - name: offset + - name: fromItem in: query required: false - description: | - - Order: tokens will be ordered by increasing tokenID. - - Default first item: the first token to be returned will be the one that has the smallest tokenID. + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Tokens will be ordered by increasing tokenID. schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -1146,20 +1162,22 @@ paths: description: Get information about coordinators. operationId: getCoordinators parameters: - - name: offset + - name: fromItem in: query required: false - description: | - - Order: coordinators will be ordered by increasing (ethereumBlock, forgerAddr). - - Default first item: the first token to be returned will be the one that has the smallest (ethereumBlock, forgerAddr). + description: Indicates the desired first item (using the itemId property) to be included in the response. schema: type: number - - name: last + - name: order in: query required: false - description: Get the last page. + description: Order of the returned items. Coordinators will be ordered by increasing (ethereumBlock, forgerAddr). schema: - type: boolean + type: string + default: ASC + enum: + - ASC + - DESC - name: limit in: query required: false @@ -1228,13 +1246,16 @@ paths: $ref: '#/components/schemas/Error500' components: schemas: + ItemId: + type: integer + description: Position of the item in the DB. This is useful for pagination, but has nothing to do with the protocol. PostPoolL2Transaction: type: object properties: id: $ref: '#/components/schemas/TransactionId' type: - $ref: '#/components/schemas/TransactionType' + $ref: '#/components/schemas/TransactionTypeL2' tokenId: $ref: '#/components/schemas/TokenId' fromAccountIndex: @@ -1341,7 +1362,7 @@ components: id: $ref: '#/components/schemas/TransactionId' type: - $ref: '#/components/schemas/TransactionType' + $ref: '#/components/schemas/TransactionTypeL2' fromAccountIndex: $ref: '#/components/schemas/AccountIndex' toAccountIndex: @@ -1480,6 +1501,14 @@ components: - ForceExit - TransferToEthAddr - TransferToBJJ + TransactionTypeL2: + type: string + description: Type of transaction. + enum: + - Exit + - Transfer + - TransferToEthAddr + - TransferToBJJ TokenId: type: integer description: Identifier of a token registered in the network. @@ -1556,6 +1585,8 @@ components: - L2 id: $ref: '#/components/schemas/TransactionId' + itemId: + $ref: '#/components/schemas/ItemId' type: $ref: '#/components/schemas/TransactionType' position: @@ -1655,6 +1686,7 @@ components: required: - L1orL2 - id + - itemId - type - position - fromAccountIndex @@ -1943,29 +1975,87 @@ components: - example: 7394 accountIndex: $ref: '#/components/schemas/AccountIndex' + itemId: + $ref: '#/components/schemas/ItemId' merkleProof: - type: string + type: object description: Existence proof of a leaf in a given Merkle Root. Encoded as hexadecimal string. - example: "0x347089321de8971320489793a823470918fffeab" + properties: + Root: + type: array + items: + type: integer + Siblings: + type: array + items: + type: integer + OldKey: + type: array + items: + type: integer + OldValue: + type: array + items: + type: integer + IsOld0: + type: boolean + Key: + type: array + items: + type: integer + Value: + type: array + items: + type: integer + Fnc: + type: integer + required: + - Root + - Siblings + - OldKey + - OldValue + - IsOld0 + - Key + - Value + - Fnc + additionalProperties: false + example: {"Root":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Siblings":[0,1,2],"OldKey":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"OldValue":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"IsOld0":true,"Key":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Value":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Fnc":0} balance: $ref: '#/components/schemas/BigInt' instantWithdrawn: - allOf: - - $ref: '#/components/schemas/EthBlockNum' - - description: Block in which the exit balance was instantly withdrawn. Null indicates that an instant withdrawn hasn't been performed. - - example: 74747363 + type: integer + description: Block in which the exit balance was instantly withdrawn. Null indicates that an instant withdrawn hasn't been performed. + minimum: 0 + maximum: 1.84467440737096e+19 + example: 74747363 + nullable: true delayedWithdrawRequest: - allOf: - - $ref: '#/components/schemas/EthBlockNum' - - description: Block in which the exit balance was requested to delay withdraw. Null indicates that a delay withdraw hasn't been performed. - - example: null + type: integer + description: Block in which the exit balance was requested to delay withdraw. Null indicates that a delay withdraw hasn't been performed. + minimum: 0 + maximum: 1.84467440737096e+19 + example: null + nullable: true delayedWithdrawn: - allOf: - - $ref: '#/components/schemas/EthBlockNum' - - description: Block in which the exit balance was delayed withdrawn after a delay withdraw request. Null indicates that a delay withdraw hasn't been performed. - - example: null + type: integer + description: Block in which the exit balance was delayed withdrawn after a delay withdraw request. Null indicates that a delay withdraw hasn't been performed. + minimum: 0 + maximum: 1.84467440737096e+19 + example: null + nullable: true token: $ref: '#/components/schemas/Token' + required: + - batchNum + - accountIndex + - itemId + - merkleProof + - balance + - instantWithdrawn + - delayedWithdrawRequest + - delayedWithdrawn + - token + additionalProperties: false Exits: type: object properties: @@ -1975,7 +2065,11 @@ components: items: $ref: '#/components/schemas/Exit' pagination: - $ref: '#/components/schemas/PaginationInfo' + $ref: '#/components/schemas/PaginationInfo' + required: + - exits + - pagination + additionalProperties: false Account: type: object description: State tree leaf. It contains balance and nonce of an account. @@ -2217,10 +2311,14 @@ components: type: integer description: Amount of items that the endpoint can return given the filters and the current state of the database. example: 2048 - lastReturnedItem: + firstItem: + type: integer + description: The smallest itemId that the endpoint will return with the given filters. + example: 50 + lastItem: type: integer - description: Index of the last returned item. Useful to query next items. - example: 439 + description: The greatest itemId that the endpoint will return with the given filters. + example: 2130 Config: type: object description: Configuration parameters of the different smart contracts that power the Hermez network. diff --git a/common/tx.go b/common/tx.go index 8d719b2..a20a8ea 100644 --- a/common/tx.go +++ b/common/tx.go @@ -3,8 +3,10 @@ package common import ( "database/sql/driver" "encoding/hex" + "errors" "fmt" "math/big" + "strings" ethCommon "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/babyjub" @@ -56,6 +58,21 @@ func (txid TxID) String() string { return "0x" + hex.EncodeToString(txid[:]) } +// NewTxIDFromString returns a string hexadecimal representation of the TxID +func NewTxIDFromString(idStr string) (TxID, error) { + txid := TxID{} + idStr = strings.TrimPrefix(idStr, "0x") + decoded, err := hex.DecodeString(idStr) + if err != nil { + return TxID{}, err + } + if len(decoded) != TxIDLen { + return txid, errors.New("Invalid idStr") + } + copy(txid[:], decoded) + return txid, nil +} + // TxType is a string that represents the type of a Hermez network transaction type TxType string diff --git a/db/historydb/historydb.go b/db/historydb/historydb.go index e5fe5a1..dc8bea7 100644 --- a/db/historydb/historydb.go +++ b/db/historydb/historydb.go @@ -18,6 +18,13 @@ import ( "github.com/russross/meddler" ) +const ( + // OrderAsc indicates ascending order when using pagination + OrderAsc = "ASC" + // OrderDesc indicates descending order when using pagination + OrderDesc = "DESC" +) + // TODO(Edu): Document here how HistoryDB is kept consistent // HistoryDB persist the historic of the rollup @@ -445,25 +452,47 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error { // return db.SlicePtrsToSlice(txs).([]common.Tx), err // } +// GetHistoryTx returns a tx from the DB given a TxID +func (hdb *HistoryDB) GetHistoryTx(txID common.TxID) (*HistoryTx, error) { + tx := &HistoryTx{} + err := meddler.QueryRow( + hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position, + tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.amount_usd, + tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin, + tx.from_eth_addr, tx.from_bjj, tx.load_amount, + tx.load_amount_usd, tx.fee, tx.fee_usd, tx.nonce, + token.token_id, token.eth_block_num AS token_block, + token.eth_addr, token.name, token.symbol, token.decimals, token.usd, + token.usd_update, block.timestamp + FROM tx INNER JOIN token ON tx.token_id = token.token_id + INNER JOIN block ON tx.eth_block_num = block.eth_block_num + WHERE tx.id = $1;`, txID, + ) + return tx, err +} + // GetHistoryTxs returns a list of txs from the DB using the HistoryTx struct +// and pagination info func (hdb *HistoryDB) GetHistoryTxs( ethAddr *ethCommon.Address, bjj *babyjub.PublicKey, - tokenID, idx, batchNum *uint, txType *common.TxType, - offset, limit *uint, last bool, -) ([]HistoryTx, int, error) { + tokenID *common.TokenID, idx *common.Idx, batchNum *uint, txType *common.TxType, + fromItem, limit *uint, order string, +) ([]HistoryTx, *db.Pagination, error) { if ethAddr != nil && bjj != nil { - return nil, 0, errors.New("ethAddr and bjj are incompatible") + return nil, nil, errors.New("ethAddr and bjj are incompatible") } var query string var args []interface{} - queryStr := `SELECT tx.is_l1, tx.id, tx.type, tx.position, tx.from_idx, tx.to_idx, - tx.amount, tx.token_id, tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, - tx.user_origin, tx.from_eth_addr, tx.from_bjj, tx.load_amount, tx.fee, tx.nonce, + queryStr := `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position, + tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.amount_usd, + tx.batch_num, tx.eth_block_num, tx.to_forge_l1_txs_num, tx.user_origin, + tx.from_eth_addr, tx.from_bjj, tx.load_amount, + tx.load_amount_usd, tx.fee, tx.fee_usd, tx.nonce, token.token_id, token.eth_block_num AS token_block, token.eth_addr, token.name, token.symbol, token.decimals, token.usd, - token.usd_update, block.timestamp, count(*) OVER() AS total_items - FROM tx - INNER JOIN token ON tx.token_id = token.token_id + token.usd_update, block.timestamp, count(*) OVER() AS total_items, + MIN(tx.item_id) OVER() AS first_item, MAX(tx.item_id) OVER() AS last_item + FROM tx INNER JOIN token ON tx.token_id = token.token_id INNER JOIN block ON tx.eth_block_num = block.eth_block_num ` // Apply filters nextIsAnd := false @@ -523,33 +552,164 @@ func (hdb *HistoryDB) GetHistoryTxs( } queryStr += "tx.type = ? " args = append(args, txType) - // nextIsAnd = true + nextIsAnd = true + } + if fromItem != nil { + if nextIsAnd { + queryStr += "AND " + } else { + queryStr += "WHERE " + } + if order == OrderAsc { + queryStr += "tx.item_id >= ? " + } else { + queryStr += "tx.item_id <= ? " + } + args = append(args, fromItem) + nextIsAnd = true } + if nextIsAnd { + queryStr += "AND " + } else { + queryStr += "WHERE " + } + queryStr += "tx.batch_num IS NOT NULL " + // pagination - if last { - queryStr += "ORDER BY (batch_num, position) DESC NULLS FIRST " + queryStr += "ORDER BY tx.item_id " + if order == OrderAsc { + queryStr += " ASC " } else { - queryStr += "ORDER BY (batch_num, position) ASC NULLS LAST " - queryStr += fmt.Sprintf("OFFSET %d ", *offset) + queryStr += " DESC " } queryStr += fmt.Sprintf("LIMIT %d;", *limit) query = hdb.db.Rebind(queryStr) - // log.Debug(query) + log.Debug(query) txsPtrs := []*HistoryTx{} if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil { - return nil, 0, err + return nil, nil, err } txs := db.SlicePtrsToSlice(txsPtrs).([]HistoryTx) if len(txs) == 0 { - return nil, 0, sql.ErrNoRows - } else if last { - tmp := []HistoryTx{} - for i := len(txs) - 1; i >= 0; i-- { - tmp = append(tmp, txs[i]) + return nil, nil, sql.ErrNoRows + } + return txs, &db.Pagination{ + TotalItems: txs[0].TotalItems, + FirstItem: txs[0].FirstItem, + LastItem: txs[0].LastItem, + }, nil +} + +// GetExit returns a exit from the DB +func (hdb *HistoryDB) GetExit(batchNum *uint, idx *common.Idx) (*HistoryExit, error) { + exit := &HistoryExit{} + err := meddler.QueryRow( + hdb.db, exit, `SELECT exit_tree.*, token.token_id, token.eth_block_num AS token_block, + token.eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update + FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx + INNER JOIN token ON account.token_id = token.token_id + WHERE exit_tree.batch_num = $1 AND exit_tree.account_idx = $2;`, batchNum, idx, + ) + return exit, err +} + +// GetExits returns a list of exits from the DB and pagination info +func (hdb *HistoryDB) GetExits( + ethAddr *ethCommon.Address, bjj *babyjub.PublicKey, + tokenID *common.TokenID, idx *common.Idx, batchNum *uint, + fromItem, limit *uint, order string, +) ([]HistoryExit, *db.Pagination, error) { + if ethAddr != nil && bjj != nil { + return nil, nil, errors.New("ethAddr and bjj are incompatible") + } + var query string + var args []interface{} + queryStr := `SELECT exit_tree.*, token.token_id, token.eth_block_num AS token_block, + token.eth_addr, token.name, token.symbol, token.decimals, token.usd, + token.usd_update, COUNT(*) OVER() AS total_items, MIN(exit_tree.item_id) OVER() AS first_item, MAX(exit_tree.item_id) OVER() AS last_item + FROM exit_tree INNER JOIN account ON exit_tree.account_idx = account.idx + INNER JOIN token ON account.token_id = token.token_id ` + // Apply filters + nextIsAnd := false + // ethAddr filter + if ethAddr != nil { + queryStr += "WHERE account.eth_addr = ? " + nextIsAnd = true + args = append(args, ethAddr) + } else if bjj != nil { // bjj filter + queryStr += "WHERE account.bjj = ? " + nextIsAnd = true + args = append(args, bjj) + } + // tokenID filter + if tokenID != nil { + if nextIsAnd { + queryStr += "AND " + } else { + queryStr += "WHERE " + } + queryStr += "account.token_id = ? " + args = append(args, tokenID) + nextIsAnd = true + } + // idx filter + if idx != nil { + if nextIsAnd { + queryStr += "AND " + } else { + queryStr += "WHERE " + } + queryStr += "exit_tree.account_idx = ? " + args = append(args, idx) + nextIsAnd = true + } + // batchNum filter + if batchNum != nil { + if nextIsAnd { + queryStr += "AND " + } else { + queryStr += "WHERE " + } + queryStr += "exit_tree.batch_num = ? " + args = append(args, batchNum) + nextIsAnd = true + } + if fromItem != nil { + if nextIsAnd { + queryStr += "AND " + } else { + queryStr += "WHERE " } - txs = tmp + if order == OrderAsc { + queryStr += "exit_tree.item_id >= ? " + } else { + queryStr += "exit_tree.item_id <= ? " + } + args = append(args, fromItem) + // nextIsAnd = true + } + // pagination + queryStr += "ORDER BY exit_tree.item_id " + if order == OrderAsc { + queryStr += " ASC " + } else { + queryStr += " DESC " + } + queryStr += fmt.Sprintf("LIMIT %d;", *limit) + query = hdb.db.Rebind(queryStr) + // log.Debug(query) + exits := []*HistoryExit{} + if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil { + return nil, nil, err + } + if len(exits) == 0 { + return nil, nil, sql.ErrNoRows } - return txs, txs[0].TotalItems, nil + return db.SlicePtrsToSlice(exits).([]HistoryExit), &db.Pagination{ + TotalItems: exits[0].TotalItems, + FirstItem: exits[0].FirstItem, + LastItem: exits[0].LastItem, + }, nil } // // GetTx returns a tx from the DB diff --git a/db/historydb/historydb_test.go b/db/historydb/historydb_test.go index 4289f83..e907a49 100644 --- a/db/historydb/historydb_test.go +++ b/db/historydb/historydb_test.go @@ -213,6 +213,7 @@ func TestTxs(t *testing.T) { /* Uncomment once the transaction generation is fixed + !! test that batches that forge user L1s !! !! Missing tests to check that historic USD is not set if USDUpdate is too old (24h) !! // Generate fake L1 txs @@ -333,9 +334,14 @@ func TestExitTree(t *testing.T) { blocks := setTestBlocks(0, 10) batches := test.GenBatches(nBatches, blocks) err := historyDB.AddBatches(batches) + const nTokens = 50 + tokens := test.GenTokens(nTokens, blocks) + assert.NoError(t, historyDB.AddTokens(tokens)) assert.NoError(t, err) - - exitTree := test.GenExitTree(nBatches) + const nAccounts = 3 + accs := test.GenAccounts(nAccounts, 0, tokens, nil, nil, batches) + assert.NoError(t, historyDB.AddAccounts(accs)) + exitTree := test.GenExitTree(nBatches, batches, accs) err = historyDB.AddExitTree(exitTree) assert.NoError(t, err) } diff --git a/db/historydb/views.go b/db/historydb/views.go index 19360b9..5898b9d 100644 --- a/db/historydb/views.go +++ b/db/historydb/views.go @@ -7,6 +7,7 @@ import ( ethCommon "github.com/ethereum/go-ethereum/common" "github.com/hermeznetwork/hermez-node/common" "github.com/iden3/go-iden3-crypto/babyjub" + "github.com/iden3/go-merkletree" ) // HistoryTx is a representation of a generic Tx with additional information @@ -15,6 +16,7 @@ type HistoryTx struct { // Generic IsL1 bool `meddler:"is_l1"` TxID common.TxID `meddler:"id"` + ItemID int `meddler:"item_id"` Type common.TxType `meddler:"type"` Position int `meddler:"position"` FromIdx *common.Idx `meddler:"from_idx"` @@ -38,6 +40,8 @@ type HistoryTx struct { // API extras Timestamp time.Time `meddler:"timestamp,utctime"` TotalItems int `meddler:"total_items"` + FirstItem int `meddler:"first_item"` + LastItem int `meddler:"last_item"` TokenID common.TokenID `meddler:"token_id"` TokenEthBlockNum int64 `meddler:"token_block"` TokenEthAddr ethCommon.Address `meddler:"eth_addr"` @@ -86,3 +90,27 @@ type TokenRead struct { USD *float64 `json:"USD" meddler:"usd"` USDUpdate *time.Time `json:"fiatUpdate" meddler:"usd_update,utctime"` } + +// HistoryExit is a representation of a exit with additional information +// required by the API, and extracted by joining token table +type HistoryExit struct { + ItemID int `meddler:"item_id"` + BatchNum common.BatchNum `meddler:"batch_num"` + AccountIdx common.Idx `meddler:"account_idx"` + MerkleProof *merkletree.CircomVerifierProof `meddler:"merkle_proof,json"` + Balance *big.Int `meddler:"balance,bigint"` + InstantWithdrawn *int64 `meddler:"instant_withdrawn"` + DelayedWithdrawRequest *int64 `meddler:"delayed_withdraw_request"` + DelayedWithdrawn *int64 `meddler:"delayed_withdrawn"` + TotalItems int `meddler:"total_items"` + FirstItem int `meddler:"first_item"` + LastItem int `meddler:"last_item"` + TokenID common.TokenID `meddler:"token_id"` + TokenEthBlockNum int64 `meddler:"token_block"` + TokenEthAddr ethCommon.Address `meddler:"eth_addr"` + TokenName string `meddler:"name"` + TokenSymbol string `meddler:"symbol"` + TokenDecimals uint64 `meddler:"decimals"` + TokenUSD *float64 `meddler:"usd"` + TokenUSDUpdate *time.Time `meddler:"usd_update"` +} diff --git a/db/migrations/0001.sql b/db/migrations/0001.sql index d52b3fe..63cc7b2 100644 --- a/db/migrations/0001.sql +++ b/db/migrations/0001.sql @@ -28,17 +28,6 @@ CREATE TABLE batch ( total_fees_usd NUMERIC ); -CREATE TABLE exit_tree ( - batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE, - account_idx BIGINT, - merkle_proof BYTEA NOT NULL, - balance BYTEA NOT NULL, - instant_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL, - delayed_withdraw_request BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL, - delayed_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL, - PRIMARY KEY (batch_num, account_idx) -); - CREATE TABLE bid ( slot_num BIGINT NOT NULL, bid_value BYTEA NOT NULL, @@ -58,6 +47,25 @@ CREATE TABLE token ( usd_update TIMESTAMP WITHOUT TIME ZONE ); +CREATE TABLE account ( + idx BIGINT PRIMARY KEY, + token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE, + batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE, + bjj BYTEA NOT NULL, + eth_addr BYTEA NOT NULL +); + +CREATE TABLE exit_tree ( + item_id SERIAL PRIMARY KEY, + batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE, + account_idx BIGINT REFERENCES account (idx) ON DELETE CASCADE, + merkle_proof BYTEA NOT NULL, + balance BYTEA NOT NULL, + instant_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL, + delayed_withdraw_request BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL, + delayed_withdrawn BIGINT REFERENCES batch (batch_num) ON DELETE SET NULL +); + -- +migrate StatementBegin CREATE FUNCTION set_token_usd_update() RETURNS TRIGGER @@ -75,10 +83,13 @@ LANGUAGE plpgsql; CREATE TRIGGER trigger_token_usd_update BEFORE UPDATE OR INSERT ON token FOR EACH ROW EXECUTE PROCEDURE set_token_usd_update(); +CREATE SEQUENCE tx_item_id; + CREATE TABLE tx ( -- Generic TX + item_id INTEGER PRIMARY KEY DEFAULT nextval('tx_item_id'), is_l1 BOOLEAN NOT NULL, - id BYTEA PRIMARY KEY, + id BYTEA, type VARCHAR(40) NOT NULL, position INT NOT NULL, from_idx BIGINT, @@ -103,8 +114,6 @@ CREATE TABLE tx ( nonce BIGINT ); -CREATE INDEX tx_order ON tx (batch_num, position); - -- +migrate StatementBegin CREATE FUNCTION fee_percentage(NUMERIC) RETURNS NUMERIC @@ -412,9 +421,10 @@ BEGIN usd / POWER(10, decimals), usd_update, timestamp FROM token INNER JOIN block on token.eth_block_num = block.eth_block_num WHERE token_id = NEW.token_id; IF _tx_timestamp - interval '24 hours' < _usd_update AND _tx_timestamp + interval '24 hours' > _usd_update THEN NEW."amount_usd" = (SELECT _value * NEW.amount_f); - NEW."load_amount_usd" = (SELECT _value * NEW.load_amount_f); IF NOT NEW.is_l1 THEN NEW."fee_usd" = (SELECT NEW."amount_usd" * fee_percentage(NEW.fee::NUMERIC)); + ELSE + NEW."load_amount_usd" = (SELECT _value * NEW.load_amount_f); END IF; END IF; RETURN NEW; @@ -433,8 +443,13 @@ $BODY$ BEGIN IF NEW.forge_l1_txs_num IS NOT NULL THEN UPDATE tx - SET batch_num = NEW.batch_num - WHERE user_origin AND NEW.forge_l1_txs_num = to_forge_l1_txs_num; + SET item_id = nextval('tx_item_id'), batch_num = NEW.batch_num + WHERE id IN ( + SELECT id FROM tx + WHERE user_origin AND NEW.forge_l1_txs_num = to_forge_l1_txs_num + ORDER BY position + FOR UPDATE + ); END IF; RETURN NEW; END; @@ -444,14 +459,6 @@ LANGUAGE plpgsql; CREATE TRIGGER trigger_forge_l1_txs AFTER INSERT ON batch FOR EACH ROW EXECUTE PROCEDURE forge_l1_user_txs(); -CREATE TABLE account ( - idx BIGINT PRIMARY KEY, - token_id INT NOT NULL REFERENCES token (token_id) ON DELETE CASCADE, - batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE, - bjj BYTEA NOT NULL, - eth_addr BYTEA NOT NULL -); - CREATE TABLE rollup_vars ( eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE, forge_l1_timeout BYTEA NOT NULL, diff --git a/db/utils.go b/db/utils.go index 4e341c1..cb96172 100644 --- a/db/utils.go +++ b/db/utils.go @@ -177,3 +177,18 @@ func SlicePtrsToSlice(slice interface{}) interface{} { } return res.Interface() } + +// Pagination give information on the items of a query +type Pagination struct { + TotalItems int `json:"totalItems"` + FirstItem int `json:"firstItem"` + LastItem int `json:"lastItem"` + FirstReturnedItem int `json:"-"` + LastReturnedItem int `json:"-"` +} + +// Paginationer is an interface that allows getting pagination info on any struct +type Paginationer interface { + GetPagination() *Pagination + Len() int +} diff --git a/test/historydb.go b/test/historydb.go index 2b59b34..b0dd35f 100644 --- a/test/historydb.go +++ b/test/historydb.go @@ -68,7 +68,7 @@ func GenBatches(nBatches int, blocks []common.Block) []common.Batch { } if i%2 == 0 { toForge := new(int64) - *toForge = int64(i) + *toForge = int64(i + 1) batch.ForgeL1TxsNum = toForge } batches = append(batches, batch) @@ -142,7 +142,9 @@ func GenL1Txs( panic(err) } tx = *nTx - if batches[i%len(batches)].ForgeL1TxsNum != nil { + if !tx.UserOrigin { + tx.BatchNum = &batches[i%len(batches)].BatchNum + } else if batches[i%len(batches)].ForgeL1TxsNum != nil { // Add already forged txs tx.BatchNum = &batches[i%len(batches)].BatchNum setFromToAndAppend(fromIdx, tx, i, nUserTxs, userAddr, accounts, &userTxs, &othersTxs) @@ -332,15 +334,15 @@ func GenBids(nBids int, blocks []common.Block, coords []common.Coordinator) []co // GenExitTree generates an exitTree (as an array of Exits) //nolint:gomnd -func GenExitTree(n int) []common.ExitInfo { +func GenExitTree(n int, batches []common.Batch, accounts []common.Account) []common.ExitInfo { exitTree := make([]common.ExitInfo, n) for i := 0; i < n; i++ { exitTree[i] = common.ExitInfo{ - BatchNum: common.BatchNum(i + 1), + BatchNum: batches[i%len(batches)].BatchNum, InstantWithdrawn: nil, DelayedWithdrawRequest: nil, DelayedWithdrawn: nil, - AccountIdx: common.Idx(i * 10), + AccountIdx: accounts[i%len(accounts)].Idx, MerkleProof: &merkletree.CircomVerifierProof{ Root: &merkletree.Hash{byte(i), byte(i + 1)}, Siblings: []*big.Int{ @@ -356,6 +358,20 @@ func GenExitTree(n int) []common.ExitInfo { }, Balance: big.NewInt(int64(i) * 1000), } + if i%2 == 0 { + instant := new(int64) + *instant = int64(batches[(i+1)%len(batches)].BatchNum) + exitTree[i].InstantWithdrawn = instant + } else if i%3 == 0 { + delayedReq := new(int64) + *delayedReq = int64(batches[(i+1)%len(batches)].BatchNum) + exitTree[i].DelayedWithdrawRequest = delayedReq + if i%9 == 0 { + delayed := new(int64) + *delayed = int64(batches[(i+2)%len(batches)].BatchNum) + exitTree[i].DelayedWithdrawn = delayed + } + } } return exitTree }