Compare commits

..

95 Commits

Author SHA1 Message Date
Eduard S
b330889570 WIP 2021-03-09 14:32:23 +01:00
Eduard S
a5ef822c64 WIP3 2021-03-08 18:17:15 +01:00
Eduard S
5501f30062 WIP2 2021-03-04 14:05:35 +01:00
Eduard S
d4f6926311 WIP 2021-03-03 14:37:41 +01:00
Eduard S
bfba1ba2d2 Clean 2021-03-03 12:50:21 +01:00
arnaubennassar
eed635539f pull 2021-03-02 18:49:34 +01:00
arnaubennassar
87610f6188 wip 2021-03-02 18:46:56 +01:00
arnaubennassar
4b596072d2 Add table to decouple API from node 2021-03-02 15:22:02 +01:00
Eduard S
95c4019cb2 WIP 2021-03-01 10:51:30 +01:00
Eduard S
c4d5e8a7ab WIP 2021-03-01 10:51:30 +01:00
Eduard S
c1375d9c5f Serve API only via cli 2021-03-01 10:51:30 +01:00
Eduard S
39b7882ef2 Merge pull request #594 from hermeznetwork/fix/post-tx-no-usd
Avoid SQL error when checking value of token without usd on tx insert
2021-03-01 10:34:35 +01:00
Eduard S
c7d0422c16 Merge pull request #596 from hermeznetwork/fix/l1CoordinatorFromBytes-parser
Update L1CoordinatorTxFromBytes parser to EIP712
2021-03-01 10:19:21 +01:00
arnaucube
56ffea2190 Update L1CoordinatorTxFromBytes to EIP712 2021-02-28 10:56:51 +01:00
arnaubennassar
cf70111de5 Avoid SQL error when checking value of token without usd on tx insert 2021-02-26 18:22:07 +01:00
a_bennassar
f664a3a382 Merge pull request #588 from hermeznetwork/feature/check-forge-balance
check the forge address balance before starting the node
2021-02-26 17:32:53 +01:00
Danilo Pantani
cd1df6ea8c check the forge address balance before starting the node 2021-02-26 12:51:31 -03:00
Eduard S
26e2bbc262 WIP 2021-02-26 16:17:06 +01:00
Eduard S
6da827c751 Merge pull request #591 from hermeznetwork/feature/accCreationAuth-EIP712
Update AccCreationAuth signature hash to EIP-712
2021-02-26 13:50:52 +01:00
arnaucube
60023e4574 Update AccCreationAuth signature hash to EIP-712
Compatible with `hermeznetwork/contracts` commit: `67726208723a40f2251953aaabf4d2b6221f8b13`
Compatible with `hermeznetwork/commonjs` commit: `dee266fb036a64bebc65756ebd5f0361929c110d`
2021-02-26 13:46:35 +01:00
arnau
9de3a4ec6a Merge pull request #590 from hermeznetwork/fix/zki-imExitRoot
Set the intermediary signal when exit TX is not inserted
2021-02-26 13:39:06 +01:00
Eduard S
bb4c464200 WIP 2021-02-26 13:09:24 +01:00
Eduard S
982899efed Serve API only via cli 2021-02-26 13:09:24 +01:00
Alberto Elias
91c96eb429 Merge pull request #589 from hermeznetwork/feature/api-batch-timeout
Add forge delay to api get state
2021-02-26 11:44:20 +00:00
Jordi Baylina
70f874aaf1 Set the intermediary signal when exit TX is not inserted 2021-02-26 12:19:29 +01:00
arnaubennassar
54508b0ba6 Add forge delay to api get state 2021-02-26 12:15:39 +01:00
Eduard S
0adcf1a2bc Merge pull request #587 from hermeznetwork/feature/legacy-sync2
Update synchronizer.Sync2 to Sync from legacy impl
2021-02-25 17:02:15 +01:00
a_bennassar
527cd9a2cc Merge pull request #584 from hermeznetwork/fix/fgingAfterReboot
Reorg l2db before starting pipeline
2021-02-25 16:59:57 +01:00
arnau
b269117a32 Merge pull request #586 from hermeznetwork/feature/gindebugviaconfig
Set gin debug mode via config
2021-02-25 16:57:55 +01:00
arnau
e14705c13b Merge pull request #585 from hermeznetwork/feature/gascalc
Calculate ForgeBatch gasLimit with parametrized formula
2021-02-25 16:56:59 +01:00
arnau
5ff0350f51 Merge pull request #583 from hermeznetwork/fix/checkinterval
Fix missing timer reset in TxManager
2021-02-25 16:53:48 +01:00
arnaucube
15fd3945dd Update synchronizer.Sync2 to Sync from legacy impl 2021-02-25 16:52:09 +01:00
Eduard S
83c256deda Merge pull request #581 from hermeznetwork/feature/api-L1-forge-time
Add API metric for time to forge L1 tx
2021-02-25 16:37:05 +01:00
Eduard S
d50ae71710 Merge pull request #580 from hermeznetwork/fix/zki-multiple-exits-balance2
Fix ZKI Exit Balance2 accumulate Amounts
2021-02-25 16:22:52 +01:00
Eduard S
ba108b1146 Set gin debug mode via config
Add new config setting `Debug.GinDebugMode`.  When set to true, gin will run in
debug mode.  If not set, gin will run in release mode.  Before this change, gin
always ran in debug mode, so to keep the same behaviour as before, set this
parameter to true
2021-02-25 15:51:59 +01:00
Eduard S
c771bdf94e Calculate ForgeBatch gasLimit with parametrized formula
Add the following config parameters at
`Coordinator.EthClient.ForgeBatchGasCost`:
- `Fixed`
- `L1UserTx`
- `L1CoordTx`
- `L2Tx`
Which are the costs associated to a ForgeBatch transaction, split
into different parts to be used in the formula to compute the gasLimit.
2021-02-25 15:26:04 +01:00
Eduard S
3c68aa5014 Reorg l2db before starting pipeline 2021-02-25 14:12:15 +01:00
Eduard S
4856251f01 Fix missing timer reset in TxManager
Also, replace usage of time.Duration by time.NewTimer, because the later allows
replacing timers by stopping them before so that we never leak resources.
2021-02-25 14:04:22 +01:00
arnaubennassar
4acfeb0ad9 Add API metric for time to forge L1 tx 2021-02-25 13:06:26 +01:00
arnaucube
400ab14f04 Fix ZKI Exit Balance2 accumulated Amounts 2021-02-25 11:37:34 +01:00
arnau
706e4c7a3d Merge pull request #575 from hermeznetwork/fix/httpserve
In http servers, first listen, then serve
2021-02-24 16:40:16 +01:00
arnau
bf4acfad9f Merge pull request #578 from hermeznetwork/feature/waitproofserverbeforebatch
Wait for serverProof before starting batch
2021-02-24 16:39:46 +01:00
a_bennassar
ba88b25425 Merge pull request #570 from hermeznetwork/feature/api-TxPerBatchAmend
Change how TX per Batch is calculated in state API endpoint
2021-02-24 15:49:16 +01:00
Eduard S
df729b3b71 Wait for serverProof before starting batch 2021-02-24 15:43:55 +01:00
Toni Ramírez
f8d01b5998 Change how TX per Batch is calculated in state API endpoint 2021-02-24 15:39:54 +01:00
Eduard S
155e06484b Merge pull request #571 from hermeznetwork/feature/sql-read-write
Diferentiate SQL connections by read write
2021-02-24 15:26:43 +01:00
Eduard S
5cdcae47b8 Merge pull request #577 from hermeznetwork/feature/zki-forceexit-circuit-effectiveamount-behaviour
Update ZKI generation to match circuit behaviour for Exit Amount>Balance
2021-02-24 15:25:32 +01:00
arnaubennassar
ed9bfdffa9 Diferentiate SQL connections by read write 2021-02-24 15:12:43 +01:00
arnaucube
504e36ac47 Update ZKI gen to match circuit behaviour for Exit
Currently the circuit does not use an Exit Leaf at the Exit MerkleTree
when the Amount=0 & EffectiveAmount=0, but it does use an Exit Leaf when
the Amount>0 but EffectiveAmount=0 (for example when tx.Amount >
sender.Balance). This is a particularity of the approach of the circuit,
the idea will be in the future to update the circuit and when Amount>0
but EffectiveAmount=0, to not add the Exit in the Exits MerkleTree, but
for the moment the Go code is adapted to the circuit and when an Exit
with Amount>0 & EffectiveAmount=0, it will be added to the Leaf of the
Exit MerkleTree.
2021-02-24 15:00:30 +01:00
Eduard S
ca53dc9b52 In http servers, first listen, then serve
- In test, doing the `net.Listen` outside of the goroutine guarantees that we
  are listening, so we avoid a possible datarace consisting of doing an http
  request before listening.
- In packages that run an http server: doing the listen first allows
    - Checking for errors when opening the address for listening before
      starting the goroutine, so that if there's an error on listen we can
      terminate grafecully
    - Logging that we are listening when we are really listening, and not
      before.
2021-02-24 12:11:12 +01:00
Eduard S
5b4cfac309 Merge pull request #563 from hermeznetwork/feature/metrics0
Add Prometheus initial setup
2021-02-23 18:53:57 +01:00
arnau
11a0707746 Merge pull request #574 from hermeznetwork/feature/apirecommendedfeeuseminfee
In API recommended fee, use minFeeUSD as min value
2021-02-23 17:58:38 +01:00
Eduard S
d16fba72a7 In API recommended fee, use minFeeUSD as min value 2021-02-23 17:20:16 +01:00
arnaucube
971b2c1d40 Add Prometheus initial setup 2021-02-23 16:27:29 +01:00
arnau
9d08ec6978 Merge pull request #566 from hermeznetwork/fix/process-tx-runtime-error
fix possible runtime error
2021-02-23 16:13:31 +01:00
arnau
ffda9fa1ef Merge pull request #569 from hermeznetwork/fix/node-redundant-condition
node: remove redundant error check
2021-02-23 16:11:34 +01:00
arnau
5a11aa5c27 Merge pull request #567 from hermeznetwork/fix/tx-manager-redundant-condition
tx manager: remove redundant error check
2021-02-23 16:10:38 +01:00
Eduard S
3e5e9bd633 Merge pull request #564 from hermeznetwork/feature/api-without-statedb
Stop using stateDB in API
2021-02-23 15:22:53 +01:00
arnaubennassar
c83047f527 Stop using stateDB in API 2021-02-23 11:04:16 +01:00
Danilo Pantani
bcd576480c remove redundant error check condition for node creation 2021-02-22 13:59:56 -03:00
Danilo Pantani
35ea597ac4 remove redundant error check condition for tx manager 2021-02-22 13:54:10 -03:00
arnau
8259aee884 Merge pull request #565 from hermeznetwork/feature/minprice1
Add minPriceUSD in L2DB, check maxTxs atomically
2021-02-22 17:54:04 +01:00
Danilo Pantani
72862147f3 fix possible runtime error 2021-02-22 13:44:36 -03:00
Eduard S
3706ddb2fb Add minPriceUSD in L2DB, check maxTxs atomically
- Add config parameter `Coordinator.L2DB.MinPriceUSD` which allows rejecting
  txs to the pool that have a fee lower than the minimum.
- In pool tx insertion, checking the number of pending txs atomically with the
  insertion to avoid data races leading to more than MaxTxs pending txs in the
  pool.
2021-02-22 16:45:15 +01:00
arnau
df0cc32eed Merge pull request #557 from hermeznetwork/feature/poolexternaldelete
Delete pending txs by external mark, store tx IP
2021-02-19 16:58:56 +01:00
Eduard S
67b2b7da4b Delete pending txs by external mark, store tx IP
- In tx_pool, add a column called `external_delete` that can be set to true
  externally.  Regularly, the coordinator will delete all pending txs with this
  column set to true.  The interval for this action is set via the new config
  parameter `Coordinator.PurgeByExtDelInterval`.
- In tx_pool, add a column for the client ip that sent the transaction.  The
  api fills this value using the ClientIP method from gin.Context, which should
  work even under a reverse-proxy.
2021-02-19 16:00:45 +01:00
arnau
e23063380c Merge pull request #555 from hermeznetwork/feature/accountupdatetable
Feature/accountupdatetable
2021-02-19 13:23:03 +01:00
Eduard S
ed4d39fcd1 Add account_update SQL table with balances and nonces 2021-02-19 13:16:14 +01:00
Eduard S
d6ec1910da Simplify historyDB test and make it faster 2021-02-19 11:12:56 +01:00
Eduard S
c829eb99dc Remove unecessary slice indexing 2021-02-19 11:00:36 +01:00
Eduard S
6ecb8118bd Merge pull request #553 from hermeznetwork/fix/exit-amount-0
Txs w/ exit Amount=0,to not create new Exit leafs
2021-02-19 10:56:08 +01:00
arnaucube
4500820a03 Txs w/ exit Amount=0,to not create new Exit leafs
There are 2 ways to interpret an Exit transaction with Amount=0:
`A`. Not adding a new Leaf to the ExitTree, so not modifying the Exit
MerkleRoot
`B`. Adding a new Leaf to the ExitTree, with Balance=0, which modifies
the Exit MerkleRoot

Currently the [Circuits](https://github.com/hermeznetwork/circuits) are
doing approach `A`, and the
[hermez-node](https://github.com/hermeznetwork/hermez-node) is doing the
approach `B`. The idea of this commit, is to use approach `A` also in
the hermez-node.
2021-02-19 10:50:59 +01:00
Eduard S
b4e6104fd3 Merge pull request #551 from hermeznetwork/fix/api-err-dupkey-meddler
Duplicated error when caused by meddler
2021-02-18 17:18:20 +01:00
arnaubennassar
28f026f628 Duplicated error when caused by meddler 2021-02-18 17:06:49 +01:00
arnau
688d376ce0 Merge pull request #550 from hermeznetwork/fix/txselectornoncessorting2
Fix TxSel sorting, TxManager geth err checking
2021-02-18 16:59:10 +01:00
Eduard S
2547d5dce7 Fix TxSel sorting, TxManager geth err checking 2021-02-18 15:18:39 +01:00
arnau
bb8d81c3aa Merge pull request #549 from hermeznetwork/fix/txselectornoncessorting
Fix sorting of l2txs nonces in TxSelector
2021-02-17 13:11:42 +01:00
Eduard S
af6f114667 Fix sorting of l2txs nonces in TxSelector 2021-02-17 12:36:22 +01:00
arnau
e2376980f8 Merge pull request #547 from hermeznetwork/fix/coordtimers
Fix timers resetting in coordinator loops
2021-02-16 16:53:21 +01:00
Eduard S
264f01b572 Fix timers resetting in coordinator loops 2021-02-16 16:43:27 +01:00
arnau
a21793b2b0 Merge pull request #541 from hermeznetwork/feature/update-txman5
Feature/update txman5
2021-02-16 16:21:24 +01:00
Eduard S
7b01f6a288 Add discard cli to discard synced blocks
The command is useful in case the synchronizer reaches an invalid state and you
want to roll back a few blocks and try again (maybe with some fixes in the
code).
2021-02-16 16:10:28 +01:00
Eduard S
f2e5800ebd Delay forging of batches via config parameters
coordinator:
    - Add config `ForgeDelay`: ForgeDelay is the delay after which a batch is
      forged if the slot is already commited.  If set to 0s, the coordinator
      will continously forge at the maximum rate.
    - Add config `ForgeNoTxsDelay`: ForgeNoTxsDelay is the delay after which a
      batch is forged even if there are no txs to forge if the slot is already
      commited.  If set to 0s, the coordinator will continously forge even if
      the batches are empty.
    - Add config `GasPriceIncPerc`: GasPriceIncPerc is the percentage increase
      of gas price set in an ethereum transaction from the suggested gas price
      by the ehtereum node
    - Remove unused configuration parameters `CallGasLimit` and `GasPriceDiv`
    - Forge always regardless of configured forge delay when the current slot is
      not yet commited and we are the winner of the slot
synchronizer:
    - Don't log with error (use warning) level when there's a reorg and the
      queried events by block using the block hash returns "unknown block".
2021-02-16 16:10:28 +01:00
Eduard S
f0e79f3d55 Update coordinator to work better under real net
- cli / node
    - Update handler of SIGINT so that after 3 SIGINTs, the process terminates
      unconditionally
- coordinator
    - Store stats without pointer
    - In all functions that send a variable via channel, check for context done
      to avoid deadlock (due to no process reading from the channel, which has
      no queue) when the node is stopped.
    - Abstract `canForge` so that it can be used outside of the `Coordinator`
    - In `canForge` check the blockNumber in current and next slot.
    - Update tests due to smart contract changes in slot handling, and minimum
      bid defaults
    - TxManager
        - Add consts, vars and stats to allow evaluating `canForge`
        - Add `canForge` method (not used yet)
        - Store batch and nonces status (last success and last pending)
        - Track nonces internally instead of relying on the ethereum node (this
          is required to work with ganache when there are pending txs)
        - Handle the (common) case of the receipt not being found after the tx
          is sent.
        - Don't start the main loop until we get an initial messae fo the stats
          and vars (so that in the loop the stats and vars are set to
          synchronizer values)
        - When a tx fails, check and discard all the failed transactions before
          sending the message to stop the pipeline.  This will avoid sending
          consecutive messages of stop the pipeline when multiple txs are
          detected to be failed consecutively.  Also, future txs of the same
          pipeline after a discarded txs are discarded, and their nonces reused.
        - Robust handling of nonces:
            - If geth returns nonce is too low, increase it
            - If geth returns nonce too hight, decrease it
            - If geth returns underpriced, increase gas price
            - If geth returns replace underpriced, increase gas price
        - Add support for resending transactions after a timeout
        - Store `BatchInfos` in a queue
    - Pipeline
        - When an error is found, stop forging batches and send a message to the
          coordinator to stop the pipeline with information of the failed batch
          number so that in a restart, non-failed batches are not repated.
        - When doing a reset of the stateDB, if possible reset from the local
          checkpoint instead of resetting from the synchronizer.  This allows
          resetting from a batch that is valid but not yet sent / synced.
    - Every time a pipeline is started, assign it a number from a counter.  This
      allows the TxManager to ignore batches from stopped pipelines, via a
      message sent by the coordinator.
    - Avoid forging when we haven't reached the rollup genesis block number.
    - Add config parameter `StartSlotBlocksDelay`: StartSlotBlocksDelay is the
      number of blocks of delay to wait before starting the pipeline when we
      reach a slot in which we can forge.
    - When detecting a reorg, only reset the pipeline if the batch from which
      the pipeline started changed and wasn't sent by us.
    - Add config parameter `ScheduleBatchBlocksAheadCheck`:
      ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which the
      forger address is checked to be allowed to forge (apart from checking the
      next block), used to decide when to stop scheduling new batches (by
      stopping the pipeline).  For example, if we are at block 10 and
      ScheduleBatchBlocksAheadCheck is 5, eventhough at block 11 we canForge,
      the pipeline will be stopped if we can't forge at block 15.  This value
      should be the expected number of blocks it takes between scheduling a
      batch and having it mined.
    - Add config parameter `SendBatchBlocksMarginCheck`:
      SendBatchBlocksMarginCheck is the number of margin blocks ahead in which
      the coordinator is also checked to be allowed to forge, apart from the
      next block; used to decide when to stop sending batches to the smart
      contract.  For example, if we are at block 10 and
      SendBatchBlocksMarginCheck is 5, eventhough at block 11 we canForge, the
      batch will be discarded if we can't forge at block 15.
    - Add config parameter `TxResendTimeout`: TxResendTimeout is the timeout
      after which a non-mined ethereum transaction will be resent (reusing the
      nonce) with a newly calculated gas price
    - Add config parameter `MaxGasPrice`: MaxGasPrice is the maximum gas price
      allowed for ethereum transactions
    - Add config parameter `NoReuseNonce`: NoReuseNonce disables reusing nonces
      of pending transactions for new replacement transactions.  This is useful
      for testing with Ganache.
    - Extend BatchInfo with more useful information for debugging

- eth / ethereum client
    - Add necessary methods to create the auth object for transactions manually
      so that we can set the nonce, gas price, gas limit, etc manually
    - Update `RollupForgeBatch` to take an auth object as input (so that the
      coordinator can set parameters manually)
- synchronizer
    - In stats, add `NextSlot`
    - In stats, store full last batch instead of just last batch number
    - Instead of calculating a nextSlot from scratch every time, update the
      current struct (only updating the forger info if we are Synced)
    - Afer every processed batch, check that the calculated StateDB MTRoot
      matches the StateRoot found in the forgeBatch event.
2021-02-16 16:10:23 +01:00
arnau
26fbeb5c68 Merge pull request #544 from hermeznetwork/feature/document2
Document how to build node
2021-02-15 15:49:56 +01:00
Eduard S
05104b0565 Document how to build node 2021-02-15 15:41:32 +01:00
arnau
53507edabb Merge pull request #543 from hermeznetwork/feature/semaphore2
Replace github.com/marusama/semaphore by golang.org/x/sync
2021-02-15 15:35:43 +01:00
Eduard S
729966f854 Replace github.com/marusama/semaphore by golang.org/x/sync 2021-02-15 13:08:43 +01:00
Eduard S
1c10a01cf7 Merge pull request #540 from hermeznetwork/feature/float40upgrade
Feature/float40upgrade
2021-02-15 12:45:55 +01:00
arnaucube
1d0abe438f Remove Float16 related files & minor typos fixes 2021-02-15 12:09:08 +01:00
arnaucube
6a01c0ac14 Update ZKInputs test vectors with float40 & other
- Add AmountF new parameter to ZKInputs
- Update ZKInputs test vectors with float40 checked with circom circuits
- Small fix at eth/rollup.go with lenL1L2TxsBytes with new length of
Float40
2021-02-12 16:40:36 +01:00
arnaucube
63151a285c Migrate all packages to use Float40
Migrate all packages to use Float40 & Add more test vectors at common
2021-02-12 14:27:07 +01:00
arnaucube
52d4197330 Add Float40 methods
This commit adds Float40 related methods, and keeps the Float16 version
which will be deleted in a near future once the Float40 migration is
ready.
2021-02-12 14:27:07 +01:00
arnau
ea63cba62a Merge pull request #538 from hermeznetwork/fix/txselTransferToBJJ
Fix txselector TransferToBJJ behaviour
2021-02-12 12:02:13 +01:00
Eduard S
c7e6267189 Fix txselector TransferToBJJ behaviour 2021-02-11 18:19:29 +01:00
75 changed files with 5641 additions and 2945 deletions

View File

@@ -4,6 +4,10 @@ Go implementation of the Hermez node.
## Developing ## Developing
### Go version
The `hermez-node` has been tested with go version 1.14
### Unit testing ### Unit testing
Running the unit tests requires a connection to a PostgreSQL database. You can Running the unit tests requires a connection to a PostgreSQL database. You can
@@ -11,7 +15,7 @@ start PostgreSQL with docker easily this way (where `yourpasswordhere` should
be your password): be your password):
``` ```
POSTGRES_PASS=yourpasswordhere sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres POSTGRES_PASS=yourpasswordhere; sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=hermez -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD="$POSTGRES_PASS" -d postgres
``` ```
Afterwards, run the tests with the password as env var: Afterwards, run the tests with the password as env var:

View File

@@ -4,10 +4,7 @@ import (
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/tracerr"
) )
func (a *API) getAccount(c *gin.Context) { func (a *API) getAccount(c *gin.Context) {
@@ -23,16 +20,6 @@ func (a *API) getAccount(c *gin.Context) {
return return
} }
// Get balance from stateDB
account, err := a.s.LastGetAccount(*idx)
if err != nil {
retSQLErr(err, c)
return
}
apiAccount.Balance = apitypes.NewBigIntStr(account.Balance)
apiAccount.Nonce = account.Nonce
c.JSON(http.StatusOK, apiAccount) c.JSON(http.StatusOK, apiAccount)
} }
@@ -57,26 +44,6 @@ func (a *API) getAccounts(c *gin.Context) {
return return
} }
// Get balances from stateDB
if err := a.s.LastRead(func(sdb *statedb.Last) error {
for x, apiAccount := range apiAccounts {
idx, err := stringToIdx(string(apiAccount.Idx), "Account Idx")
if err != nil {
return tracerr.Wrap(err)
}
account, err := sdb.GetAccount(*idx)
if err != nil {
return tracerr.Wrap(err)
}
apiAccounts[x].Balance = apitypes.NewBigIntStr(account.Balance)
apiAccounts[x].Nonce = account.Nonce
}
return nil
}); err != nil {
retSQLErr(err, c)
return
}
// Build succesfull response // Build succesfull response
type accountResponse struct { type accountResponse struct {
Accounts []historydb.AccountAPI `json:"accounts"` Accounts []historydb.AccountAPI `json:"accounts"`

View File

@@ -2,41 +2,19 @@ package api
import ( import (
"errors" "errors"
"sync"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// TODO: Add correct values to constants
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Status define status of the network
type Status struct {
sync.RWMutex
Network Network `json:"network"`
Metrics historydb.Metrics `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// API serves HTTP requests to allow external interaction with the Hermez node // API serves HTTP requests to allow external interaction with the Hermez node
type API struct { type API struct {
h *historydb.HistoryDB h *historydb.HistoryDB
cg *configAPI cg *configAPI
s *statedb.StateDB
l2 *l2db.L2DB l2 *l2db.L2DB
status Status
chainID uint16 chainID uint16
hermezAddress ethCommon.Address hermezAddress ethCommon.Address
} }
@@ -46,9 +24,7 @@ func NewAPI(
coordinatorEndpoints, explorerEndpoints bool, coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *Config,
) (*API, error) { ) (*API, error) {
// Check input // Check input
// TODO: is stateDB only needed for explorer endpoints or for both? // TODO: is stateDB only needed for explorer endpoints or for both?
@@ -58,19 +34,20 @@ func NewAPI(
if explorerEndpoints && hdb == nil { if explorerEndpoints && hdb == nil {
return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB")) return nil, tracerr.Wrap(errors.New("cannot serve Explorer endpoints without HistoryDB"))
} }
consts, err := hdb.GetConstants()
if err != nil {
return nil, err
}
a := &API{ a := &API{
h: hdb, h: hdb,
cg: &configAPI{ cg: &configAPI{
RollupConstants: *newRollupConstants(config.RollupConstants), RollupConstants: *newRollupConstants(consts.Rollup),
AuctionConstants: config.AuctionConstants, AuctionConstants: consts.Auction,
WDelayerConstants: config.WDelayerConstants, WDelayerConstants: consts.WDelayer,
}, },
s: sdb,
l2: l2db, l2: l2db,
status: Status{}, chainID: consts.ChainID,
chainID: config.ChainID, hermezAddress: consts.HermezAddress,
hermezAddress: config.HermezAddress,
} }
// Add coordinator endpoints // Add coordinator endpoints

View File

@@ -8,6 +8,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"net"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
@@ -22,7 +23,6 @@ import (
"github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
@@ -39,8 +39,8 @@ type Pendinger interface {
New() Pendinger New() Pendinger
} }
const apiPort = ":4010" const apiAddr = ":4010"
const apiURL = "http://localhost" + apiPort + "/" const apiURL = "http://localhost" + apiAddr + "/"
var SetBlockchain = ` var SetBlockchain = `
Type: Blockchain Type: Blockchain
@@ -180,12 +180,13 @@ type testCommon struct {
auctionVars common.AuctionVariables auctionVars common.AuctionVariables
rollupVars common.RollupVariables rollupVars common.RollupVariables
wdelayerVars common.WDelayerVariables wdelayerVars common.WDelayerVariables
nextForgers []NextForger nextForgers []historydb.NextForgerAPI
} }
var tc testCommon var tc testCommon
var config configAPI var config configAPI
var api *API var api *API
var stateAPIUpdater *StateAPIUpdater
// TestMain initializes the API server, and fill HistoryDB and StateDB with fake data, // TestMain initializes the API server, and fill HistoryDB and StateDB with fake data,
// emulating the task of the synchronizer in order to have data to be returned // emulating the task of the synchronizer in order to have data to be returned
@@ -202,26 +203,12 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
apiConnCon := db.NewAPICnnectionController(1, time.Second) apiConnCon := db.NewAPICnnectionController(1, time.Second)
hdb := historydb.NewHistoryDB(database, apiConnCon) hdb := historydb.NewHistoryDB(database, database, apiConnCon)
if err != nil {
panic(err)
}
// StateDB
dir, err := ioutil.TempDir("", "tmpdb")
if err != nil {
panic(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
}()
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, Type: statedb.TypeTxSelector, NLevels: 0})
if err != nil { if err != nil {
panic(err) panic(err)
} }
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, 10, 1000, 24*time.Hour, apiConnCon) l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants) // Config (smart contract constants)
chainID := uint16(0) chainID := uint16(0)
@@ -234,29 +221,53 @@ func TestMain(m *testing.M) {
// API // API
apiGin := gin.Default() apiGin := gin.Default()
// Reset DB
test.WipeDB(hdb.DB())
constants := &historydb.Constants{
SCConsts: common.SCConsts{
Rollup: _config.RollupConstants,
Auction: _config.AuctionConstants,
WDelayer: _config.WDelayerConstants,
},
ChainID: chainID,
HermezAddress: _config.HermezAddress,
}
if err := hdb.SetConstants(constants); err != nil {
panic(err)
}
nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10,
MinFeeUSD: 0,
}
if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err)
}
api, err = NewAPI( api, err = NewAPI(
true, true,
true, true,
apiGin, apiGin,
hdb, hdb,
sdb,
l2DB, l2DB,
&_config,
) )
if err != nil { if err != nil {
log.Error(err)
panic(err) panic(err)
} }
// Start server // Start server
server := &http.Server{Addr: apiPort, Handler: apiGin} listener, err := net.Listen("tcp", apiAddr) //nolint:gosec
if err != nil {
panic(err)
}
server := &http.Server{Handler: apiGin}
go func() { go func() {
if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed { if err := server.Serve(listener); err != nil &&
tracerr.Unwrap(err) != http.ErrServerClosed {
panic(err) panic(err)
} }
}() }()
// Reset DB
test.WipeDB(api.h.DB())
// Genratre blockchain data with til // Genratre blockchain data with til
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{ tilCfgExtra := til.ConfigExtra{
@@ -350,19 +361,6 @@ func TestMain(m *testing.M) {
} }
} }
// lastBlockNum2 := blocksData[len(blocksData)-1].Block.EthBlockNum
// Add accounts to StateDB
for i := 0; i < len(commonAccounts); i++ {
if _, err := api.s.CreateAccount(commonAccounts[i].Idx, &commonAccounts[i]); err != nil {
panic(err)
}
}
// Make a checkpoint to make the accounts available in Last
if err := api.s.MakeCheckpoint(); err != nil {
panic(err)
}
// Generate Coordinators and add them to HistoryDB // Generate Coordinators and add them to HistoryDB
const nCoords = 10 const nCoords = 10
commonCoords := test.GenCoordinators(nCoords, commonBlocks) commonCoords := test.GenCoordinators(nCoords, commonBlocks)
@@ -470,19 +468,19 @@ func TestMain(m *testing.M) {
if err = api.h.AddBids(bids); err != nil { if err = api.h.AddBids(bids); err != nil {
panic(err) panic(err)
} }
bootForger := NextForger{ bootForger := historydb.NextForgerAPI{
Coordinator: historydb.CoordinatorAPI{ Coordinator: historydb.CoordinatorAPI{
Forger: auctionVars.BootCoordinator, Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL, URL: auctionVars.BootCoordinatorURL,
}, },
} }
// Set next forgers: set all as boot coordinator then replace the non boot coordinators // Set next forgers: set all as boot coordinator then replace the non boot coordinators
nextForgers := []NextForger{} nextForgers := []historydb.NextForgerAPI{}
var initBlock int64 = 140 var initBlock int64 = 140
var deltaBlocks int64 = 40 var deltaBlocks int64 = 40
for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ { for i := 1; i < int(auctionVars.ClosedAuctionSlots)+2; i++ {
fromBlock := initBlock + deltaBlocks*int64(i-1) fromBlock := initBlock + deltaBlocks*int64(i-1)
bootForger.Period = Period{ bootForger.Period = historydb.Period{
SlotNum: int64(i), SlotNum: int64(i),
FromBlock: fromBlock, FromBlock: fromBlock,
ToBlock: fromBlock + deltaBlocks - 1, ToBlock: fromBlock + deltaBlocks - 1,
@@ -522,6 +520,12 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000), WithdrawalDelay: uint64(3000),
} }
stateAPIUpdater = NewStateAPIUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars,
Auction: auctionVars,
WDelayer: wdelayerVars,
}, constants)
// Generate test data, as expected to be received/sended from/to the API // Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords) testCoords := genTestCoordinators(commonCoords)
testBids := genTestBids(commonBlocks, testCoords, bids) testBids := genTestBids(commonBlocks, testCoords, bids)
@@ -529,13 +533,41 @@ func TestMain(m *testing.M) {
testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks) testTxs := genTestTxs(commonL1Txs, commonL2Txs, commonAccounts, testTokens, commonBlocks)
testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs) testBatches, testFullBatches := genTestBatches(commonBlocks, commonBatches, testTxs)
poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts) poolTxsToSend, poolTxsToReceive := genTestPoolTxs(commonPoolTxs, testTokens, commonAccounts)
// Add balance and nonce to historyDB
accounts := genTestAccounts(commonAccounts, testTokens)
accUpdates := []common.AccountUpdate{}
for i := 0; i < len(accounts); i++ {
balance := new(big.Int)
balance.SetString(string(*accounts[i].Balance), 10)
idx, err := stringToIdx(string(accounts[i].Idx), "foo")
if err != nil {
panic(err)
}
accUpdates = append(accUpdates, common.AccountUpdate{
EthBlockNum: 0,
BatchNum: 1,
Idx: *idx,
Nonce: 0,
Balance: balance,
})
accUpdates = append(accUpdates, common.AccountUpdate{
EthBlockNum: 0,
BatchNum: 1,
Idx: *idx,
Nonce: accounts[i].Nonce,
Balance: balance,
})
}
if err := api.h.AddAccountUpdates(accUpdates); err != nil {
panic(err)
}
tc = testCommon{ tc = testCommon{
blocks: commonBlocks, blocks: commonBlocks,
tokens: testTokens, tokens: testTokens,
batches: testBatches, batches: testBatches,
fullBatches: testFullBatches, fullBatches: testFullBatches,
coordinators: testCoords, coordinators: testCoords,
accounts: genTestAccounts(commonAccounts, testTokens), accounts: accounts,
txs: testTxs, txs: testTxs,
exits: testExits, exits: testExits,
poolTxsToSend: poolTxsToSend, poolTxsToSend: poolTxsToSend,
@@ -571,21 +603,18 @@ func TestMain(m *testing.M) {
if err := database.Close(); err != nil { if err := database.Close(); err != nil {
panic(err) panic(err)
} }
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
os.Exit(result) os.Exit(result)
} }
func TestTimeout(t *testing.T) { func TestTimeout(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
databaseTO, err := db.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") databaseTO, err := db.ConnectSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond) apiConnConTO := db.NewAPICnnectionController(1, 100*time.Millisecond)
hdbTO := historydb.NewHistoryDB(databaseTO, apiConnConTO) hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
require.NoError(t, err) require.NoError(t, err)
// L2DB // L2DB
l2DBTO := l2db.NewL2DB(databaseTO, 10, 1000, 24*time.Hour, apiConnConTO) l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO)
// API // API
apiGinTO := gin.Default() apiGinTO := gin.Default()
@@ -600,21 +629,21 @@ func TestTimeout(t *testing.T) {
<-finishWait <-finishWait
}) })
// Start server // Start server
serverTO := &http.Server{Addr: ":4444", Handler: apiGinTO} serverTO := &http.Server{Handler: apiGinTO}
listener, err := net.Listen("tcp", ":4444") //nolint:gosec
require.NoError(t, err)
go func() { go func() {
if err := serverTO.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed { if err := serverTO.Serve(listener); err != nil &&
tracerr.Unwrap(err) != http.ErrServerClosed {
require.NoError(t, err) require.NoError(t, err)
} }
}() }()
_config := getConfigTest(0)
_, err = NewAPI( _, err = NewAPI(
true, true,
true, true,
apiGinTO, apiGinTO,
hdbTO, hdbTO,
nil,
l2DBTO, l2DBTO,
&_config,
) )
require.NoError(t, err) require.NoError(t, err)

View File

@@ -10,6 +10,7 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/russross/meddler"
) )
const ( const (
@@ -46,24 +47,33 @@ var (
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error() errMsg := tracerr.Unwrap(err).Error()
retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errDuplicatedKey,
})
} else {
c.JSON(http.StatusInternalServerError, errorMsg{
Message: errMsg,
})
}
}
if errMsg == errCtxTimeout { if errMsg == errCtxTimeout {
c.JSON(http.StatusServiceUnavailable, errorMsg{ c.JSON(http.StatusServiceUnavailable, errorMsg{
Message: errSQLTimeout, Message: errSQLTimeout,
}) })
} else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok { } else if sqlErr, ok := tracerr.Unwrap(err).(*pq.Error); ok {
// https://www.postgresql.org/docs/current/errcodes-appendix.html retDupKey(sqlErr.Code)
if sqlErr.Code == "23505" { } else if sqlErr, ok := meddler.DriverErr(tracerr.Unwrap(err)); ok {
c.JSON(http.StatusInternalServerError, errorMsg{ retDupKey(sqlErr.(*pq.Error).Code)
Message: errDuplicatedKey,
})
}
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
c.JSON(http.StatusNotFound, errorMsg{ c.JSON(http.StatusNotFound, errorMsg{
Message: err.Error(), Message: errMsg,
}) })
} else { } else {
c.JSON(http.StatusInternalServerError, errorMsg{ c.JSON(http.StatusInternalServerError, errorMsg{
Message: err.Error(), Message: errMsg,
}) })
} }
} }

View File

@@ -99,7 +99,9 @@ func TestGetSlot(t *testing.T) {
nil, &fetchedSlot, nil, &fetchedSlot,
), ),
) )
emptySlot := api.getEmptyTestSlot(slotNum, api.status.Network.LastSyncBlock, tc.auctionVars) // ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
emptySlot := api.getEmptyTestSlot(slotNum, 0, tc.auctionVars)
assertSlot(t, emptySlot, fetchedSlot) assertSlot(t, emptySlot, fetchedSlot)
// Invalid slotNum // Invalid slotNum
@@ -127,8 +129,10 @@ func TestGetSlots(t *testing.T) {
err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter) err := doGoodReqPaginated(path, historydb.OrderAsc, &testSlotsResponse{}, appendIter)
assert.NoError(t, err) assert.NoError(t, err)
allSlots := tc.slots allSlots := tc.slots
// ni, err := api.h.GetNodeInfoAPI()
// assert.NoError(t, err)
for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ { for i := tc.slots[len(tc.slots)-1].SlotNum; i < maxSlotNum; i++ {
emptySlot := api.getEmptyTestSlot(i+1, api.status.Network.LastSyncBlock, tc.auctionVars) emptySlot := api.getEmptyTestSlot(i+1, 0, tc.auctionVars)
allSlots = append(allSlots, emptySlot) allSlots = append(allSlots, emptySlot)
} }
assertSlots(t, allSlots, fetchedSlots) assertSlots(t, allSlots, fetchedSlots)

View File

@@ -2,305 +2,160 @@ package api
import ( import (
"database/sql" "database/sql"
"fmt"
"math/big"
"net/http" "net/http"
"time" "sync"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
// Network define status of the network
type Network struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *historydb.BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"`
}
// NextForger is a representation of the information of a coordinator and the period will forge
type NextForger struct {
Coordinator historydb.CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// Period is a representation of a period
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
func (a *API) getState(c *gin.Context) { func (a *API) getState(c *gin.Context) {
// TODO: There are no events for the buckets information, so now this information will be 0 stateAPI, err := a.h.GetStateAPI()
a.status.RLock() if err != nil {
status := a.status //nolint retBadReq(err, c)
a.status.RUnlock() return
c.JSON(http.StatusOK, status) //nolint
}
// SC Vars
// SetRollupVariables set Status.Rollup variables
func (a *API) SetRollupVariables(rollupVariables common.RollupVariables) {
a.status.Lock()
var rollupVAPI historydb.RollupVariablesAPI
rollupVAPI.EthBlockNum = rollupVariables.EthBlockNum
rollupVAPI.FeeAddToken = apitypes.NewBigIntStr(rollupVariables.FeeAddToken)
rollupVAPI.ForgeL1L2BatchTimeout = rollupVariables.ForgeL1L2BatchTimeout
rollupVAPI.WithdrawalDelay = rollupVariables.WithdrawalDelay
for i, bucket := range rollupVariables.Buckets {
var apiBucket historydb.BucketParamsAPI
apiBucket.CeilUSD = apitypes.NewBigIntStr(bucket.CeilUSD)
apiBucket.Withdrawals = apitypes.NewBigIntStr(bucket.Withdrawals)
apiBucket.BlockWithdrawalRate = apitypes.NewBigIntStr(bucket.BlockWithdrawalRate)
apiBucket.MaxWithdrawals = apitypes.NewBigIntStr(bucket.MaxWithdrawals)
rollupVAPI.Buckets[i] = apiBucket
} }
c.JSON(http.StatusOK, stateAPI)
rollupVAPI.SafeMode = rollupVariables.SafeMode
a.status.Rollup = rollupVAPI
a.status.Unlock()
} }
// SetWDelayerVariables set Status.WithdrawalDelayer variables // StateAPIUpdater is an utility object to facilitate updating the StateAPI
func (a *API) SetWDelayerVariables(wDelayerVariables common.WDelayerVariables) { type StateAPIUpdater struct {
a.status.Lock() hdb *historydb.HistoryDB
a.status.WithdrawalDelayer = wDelayerVariables state historydb.StateAPI
a.status.Unlock() config historydb.NodeConfig
vars common.SCVariablesPtr
consts historydb.Constants
rw sync.RWMutex
} }
// SetAuctionVariables set Status.Auction variables // NewStateAPIUpdater creates a new StateAPIUpdater
func (a *API) SetAuctionVariables(auctionVariables common.AuctionVariables) { func NewStateAPIUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
a.status.Lock() consts *historydb.Constants) *StateAPIUpdater {
var auctionAPI historydb.AuctionVariablesAPI u := StateAPIUpdater{
hdb: hdb,
auctionAPI.EthBlockNum = auctionVariables.EthBlockNum config: *config,
auctionAPI.DonationAddress = auctionVariables.DonationAddress consts: *consts,
auctionAPI.BootCoordinator = auctionVariables.BootCoordinator
auctionAPI.BootCoordinatorURL = auctionVariables.BootCoordinatorURL
auctionAPI.DefaultSlotSetBidSlotNum = auctionVariables.DefaultSlotSetBidSlotNum
auctionAPI.ClosedAuctionSlots = auctionVariables.ClosedAuctionSlots
auctionAPI.OpenAuctionSlots = auctionVariables.OpenAuctionSlots
auctionAPI.Outbidding = auctionVariables.Outbidding
auctionAPI.SlotDeadline = auctionVariables.SlotDeadline
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionAPI.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
} }
u.SetSCVars(vars.AsPtr())
for i, ratio := range auctionVariables.AllocationRatio { return &u
auctionAPI.AllocationRatio[i] = ratio
}
a.status.Auction = auctionAPI
a.status.Unlock()
} }
// Network // Store the State in the HistoryDB
func (u *StateAPIUpdater) Store() error {
u.rw.RLock()
defer u.rw.RUnlock()
return tracerr.Wrap(u.hdb.SetStateInternalAPI(&u.state))
}
// SetSCVars sets the smart contract vars (ony updates those that are not nil)
func (u *StateAPIUpdater) SetSCVars(vars *common.SCVariablesPtr) {
u.rw.Lock()
defer u.rw.Unlock()
if vars.Rollup != nil {
u.vars.Rollup = vars.Rollup
rollupVars := historydb.NewRollupVariablesAPI(u.vars.Rollup)
u.state.Rollup = *rollupVars
}
if vars.Auction != nil {
u.vars.Auction = vars.Auction
auctionVars := historydb.NewAuctionVariablesAPI(u.vars.Auction)
u.state.Auction = *auctionVars
}
if vars.WDelayer != nil {
u.vars.WDelayer = vars.WDelayer
u.state.WithdrawalDelayer = *u.vars.WDelayer
}
}
// UpdateRecommendedFee update Status.RecommendedFee information
func (u *StateAPIUpdater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil
}
// UpdateMetrics update Status.Metrics information
func (u *StateAPIUpdater) UpdateMetrics() error {
u.rw.RLock()
lastBatch := u.state.Network.LastBatch
u.rw.RUnlock()
if lastBatch == nil {
return nil
}
lastBatchNum := lastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.Metrics = *metrics
u.rw.Unlock()
return nil
}
// UpdateNetworkInfoBlock update Status.Network block related information // UpdateNetworkInfoBlock update Status.Network block related information
func (a *API) UpdateNetworkInfoBlock( func (u *StateAPIUpdater) UpdateNetworkInfoBlock(lastEthBlock, lastSyncBlock common.Block) {
lastEthBlock, lastSyncBlock common.Block, u.rw.Lock()
) { u.state.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastSyncBlock = lastSyncBlock.Num u.state.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num u.rw.Unlock()
} }
// UpdateNetworkInfo update Status.Network information // UpdateNetworkInfo update Status.Network information
func (a *API) UpdateNetworkInfo( func (u *StateAPIUpdater) UpdateNetworkInfo(
lastEthBlock, lastSyncBlock common.Block, lastEthBlock, lastSyncBlock common.Block,
lastBatchNum common.BatchNum, currentSlot int64, lastBatchNum common.BatchNum, currentSlot int64,
) error { ) error {
lastBatch, err := a.h.GetBatchAPI(lastBatchNum) // Get last batch in API format
lastBatch, err := u.hdb.GetBatchInternalAPI(lastBatchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
lastBatch = nil lastBatch = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
lastClosedSlot := currentSlot + int64(a.status.Auction.ClosedAuctionSlots) u.rw.RLock()
nextForgers, err := a.getNextForgers(lastSyncBlock, currentSlot, lastClosedSlot) auctionVars := u.vars.Auction
u.rw.RUnlock()
// Get next forgers
lastClosedSlot := currentSlot + int64(auctionVars.ClosedAuctionSlots)
nextForgers, err := u.hdb.GetNextForgersInternalAPI(auctionVars, &u.consts.Auction,
lastSyncBlock, currentSlot, lastClosedSlot)
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
nextForgers = nil nextForgers = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
a.status.Lock()
a.status.Network.LastSyncBlock = lastSyncBlock.Num
a.status.Network.LastEthBlock = lastEthBlock.Num
a.status.Network.LastBatch = lastBatch
a.status.Network.CurrentSlot = currentSlot
a.status.Network.NextForgers = nextForgers
// Update buckets withdrawals bucketUpdates, err := u.hdb.GetBucketUpdatesInternalAPI()
bucketsUpdate, err := a.h.GetBucketUpdatesAPI() if err == sql.ErrNoRows {
if tracerr.Unwrap(err) == sql.ErrNoRows { bucketUpdates = nil
bucketsUpdate = nil
} else if err != nil { } else if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for i, bucketParams := range a.status.Rollup.Buckets { u.rw.Lock()
for _, bucketUpdate := range bucketsUpdate { // Update NodeInfo struct
for i, bucketParams := range u.state.Rollup.Buckets {
for _, bucketUpdate := range bucketUpdates {
if bucketUpdate.NumBucket == i { if bucketUpdate.NumBucket == i {
bucketParams.Withdrawals = bucketUpdate.Withdrawals bucketParams.Withdrawals = bucketUpdate.Withdrawals
a.status.Rollup.Buckets[i] = bucketParams u.state.Rollup.Buckets[i] = bucketParams
break break
} }
} }
} }
a.status.Unlock() u.state.Network.LastSyncBlock = lastSyncBlock.Num
return nil u.state.Network.LastEthBlock = lastEthBlock.Num
} u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot
// apiSlotToBigInts converts from [6]*apitypes.BigIntStr to [6]*big.Int u.state.Network.NextForgers = nextForgers
func apiSlotToBigInts(defaultSlotSetBid [6]*apitypes.BigIntStr) ([6]*big.Int, error) { u.rw.Unlock()
var slots [6]*big.Int
for i, slot := range defaultSlotSetBid {
bigInt, ok := new(big.Int).SetString(string(*slot), 10)
if !ok {
return slots, tracerr.Wrap(fmt.Errorf("can't convert %T into big.Int", slot))
}
slots[i] = bigInt
}
return slots, nil
}
// getNextForgers returns next forgers
func (a *API) getNextForgers(lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForger, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := a.h.GetBestBidsAPI(&currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForger{}
// Get min bid info
var minBidInfo []historydb.MinBidInfo
if currentSlot >= a.status.Auction.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
bigIntSlots, err := apiSlotToBigInts(a.status.Auction.DefaultSlotSetBid)
if err != nil {
return nil, tracerr.Wrap(err)
}
minBidInfo = []historydb.MinBidInfo{{
DefaultSlotSetBid: bigIntSlots,
DefaultSlotSetBidSlotNum: a.status.Auction.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = a.h.GetAuctionVarsUntilSetSlotNumAPI(lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum
toBlock := (i+1)*int64(a.cg.AuctionConstants.BlocksPerSlot) + a.cg.AuctionConstants.GenesisBlockNum - 1
nextForger := NextForger{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second * time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(a.status.Auction.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := a.h.GetCoordinatorAPI(bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = historydb.CoordinatorAPI{
Forger: a.status.Auction.BootCoordinator,
URL: a.status.Auction.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// Metrics
// UpdateMetrics update Status.Metrics information
func (a *API) UpdateMetrics() error {
a.status.RLock()
if a.status.Network.LastBatch == nil {
a.status.RUnlock()
return nil
}
batchNum := a.status.Network.LastBatch.BatchNum
a.status.RUnlock()
metrics, err := a.h.GetMetricsAPI(batchNum)
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.Metrics = *metrics
a.status.Unlock()
return nil
}
// Recommended fee
// UpdateRecommendedFee update Status.RecommendedFee information
func (a *API) UpdateRecommendedFee() error {
feeExistingAccount, err := a.h.GetAvgTxFeeAPI()
if err != nil {
return tracerr.Wrap(err)
}
a.status.Lock()
a.status.RecommendedFee.ExistingAccount = feeExistingAccount
a.status.RecommendedFee.CreatesAccount = createAccountExtraFeePercentage * feeExistingAccount
a.status.RecommendedFee.CreatesAccountAndRegister = createAccountInternalExtraFeePercentage * feeExistingAccount
a.status.Unlock()
return nil return nil
} }

View File

@@ -13,7 +13,7 @@ import (
type testStatus struct { type testStatus struct {
Network testNetwork `json:"network"` Network testNetwork `json:"network"`
Metrics historydb.Metrics `json:"metrics"` Metrics historydb.MetricsAPI `json:"metrics"`
Rollup historydb.RollupVariablesAPI `json:"rollup"` Rollup historydb.RollupVariablesAPI `json:"rollup"`
Auction historydb.AuctionVariablesAPI `json:"auction"` Auction historydb.AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"` WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
@@ -21,18 +21,19 @@ type testStatus struct {
} }
type testNetwork struct { type testNetwork struct {
LastEthBlock int64 `json:"lastEthereumBlock"` LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"` LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch testBatch `json:"lastBatch"` LastBatch testBatch `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"` CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForger `json:"nextForgers"` NextForgers []historydb.NextForgerAPI `json:"nextForgers"`
} }
func TestSetRollupVariables(t *testing.T) { func TestSetRollupVariables(t *testing.T) {
rollupVars := &common.RollupVariables{} stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Rollup: &tc.rollupVars})
assertEqualRollupVariables(t, *rollupVars, api.status.Rollup, true) require.NoError(t, stateAPIUpdater.Store())
api.SetRollupVariables(tc.rollupVars) ni, err := api.h.GetNodeInfoAPI()
assertEqualRollupVariables(t, tc.rollupVars, api.status.Rollup, true) require.NoError(t, err)
assertEqualRollupVariables(t, tc.rollupVars, ni.StateAPI.Rollup, true)
} }
func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) { func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVariables, apiVariables historydb.RollupVariablesAPI, checkBuckets bool) {
@@ -51,17 +52,19 @@ func assertEqualRollupVariables(t *testing.T, rollupVariables common.RollupVaria
} }
func TestSetWDelayerVariables(t *testing.T) { func TestSetWDelayerVariables(t *testing.T) {
wdelayerVars := &common.WDelayerVariables{} stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{WDelayer: &tc.wdelayerVars})
assert.Equal(t, *wdelayerVars, api.status.WithdrawalDelayer) require.NoError(t, stateAPIUpdater.Store())
api.SetWDelayerVariables(tc.wdelayerVars) ni, err := api.h.GetNodeInfoAPI()
assert.Equal(t, tc.wdelayerVars, api.status.WithdrawalDelayer) require.NoError(t, err)
assert.Equal(t, tc.wdelayerVars, ni.StateAPI.WithdrawalDelayer)
} }
func TestSetAuctionVariables(t *testing.T) { func TestSetAuctionVariables(t *testing.T) {
auctionVars := &common.AuctionVariables{} stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{Auction: &tc.auctionVars})
assertEqualAuctionVariables(t, *auctionVars, api.status.Auction) require.NoError(t, stateAPIUpdater.Store())
api.SetAuctionVariables(tc.auctionVars) ni, err := api.h.GetNodeInfoAPI()
assertEqualAuctionVariables(t, tc.auctionVars, api.status.Auction) require.NoError(t, err)
assertEqualAuctionVariables(t, tc.auctionVars, ni.StateAPI.Auction)
} }
func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) { func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVariables, apiVariables historydb.AuctionVariablesAPI) {
@@ -85,11 +88,6 @@ func assertEqualAuctionVariables(t *testing.T, auctionVariables common.AuctionVa
} }
func TestUpdateNetworkInfo(t *testing.T) { func TestUpdateNetworkInfo(t *testing.T) {
status := &Network{}
assert.Equal(t, status.LastSyncBlock, api.status.Network.LastSyncBlock)
assert.Equal(t, status.LastBatch, api.status.Network.LastBatch)
assert.Equal(t, status.CurrentSlot, api.status.Network.CurrentSlot)
assert.Equal(t, status.NextForgers, api.status.Network.NextForgers)
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3) lastBatchNum := common.BatchNum(3)
currentSlotNum := int64(1) currentSlotNum := int64(1)
@@ -118,62 +116,79 @@ func TestUpdateNetworkInfo(t *testing.T) {
err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates) err := api.h.AddBucketUpdatesTest(api.h.DB(), bucketUpdates)
require.NoError(t, err) require.NoError(t, err)
err = api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) // stateAPIUpdater := NewStateAPIUpdater(hdb)
assert.NoError(t, err) err = stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.Equal(t, lastBlock.Num, api.status.Network.LastSyncBlock) require.NoError(t, err)
assert.Equal(t, lastBatchNum, api.status.Network.LastBatch.BatchNum) require.NoError(t, stateAPIUpdater.Store())
assert.Equal(t, currentSlotNum, api.status.Network.CurrentSlot) ni, err := api.h.GetNodeInfoAPI()
assert.Equal(t, int(api.status.Auction.ClosedAuctionSlots)+1, len(api.status.Network.NextForgers)) require.NoError(t, err)
assert.Equal(t, api.status.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123))) assert.Equal(t, lastBlock.Num, ni.StateAPI.Network.LastSyncBlock)
assert.Equal(t, api.status.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43))) assert.Equal(t, lastBatchNum, ni.StateAPI.Network.LastBatch.BatchNum)
assert.Equal(t, currentSlotNum, ni.StateAPI.Network.CurrentSlot)
assert.Equal(t, int(ni.StateAPI.Auction.ClosedAuctionSlots)+1, len(ni.StateAPI.Network.NextForgers))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[0].Withdrawals, apitypes.NewBigIntStr(big.NewInt(123)))
assert.Equal(t, ni.StateAPI.Rollup.Buckets[2].Withdrawals, apitypes.NewBigIntStr(big.NewInt(43)))
} }
func TestUpdateMetrics(t *testing.T) { func TestUpdateMetrics(t *testing.T) {
// Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated // Update Metrics needs api.status.Network.LastBatch.BatchNum to be updated
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3) lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1) currentSlotNum := int64(1)
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err) require.NoError(t, err)
err = api.UpdateMetrics() err = stateAPIUpdater.UpdateMetrics()
assert.NoError(t, err) require.NoError(t, err)
assert.Greater(t, api.status.Metrics.TransactionsPerBatch, float64(0)) require.NoError(t, stateAPIUpdater.Store())
assert.Greater(t, api.status.Metrics.BatchFrequency, float64(0)) ni, err := api.h.GetNodeInfoAPI()
assert.Greater(t, api.status.Metrics.TransactionsPerSecond, float64(0)) require.NoError(t, err)
assert.Greater(t, api.status.Metrics.TotalAccounts, int64(0)) assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, api.status.Metrics.TotalBJJs, int64(0)) assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
assert.Greater(t, api.status.Metrics.AvgTransactionFee, float64(0)) assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
} }
func TestUpdateRecommendedFee(t *testing.T) { func TestUpdateRecommendedFee(t *testing.T) {
err := api.UpdateRecommendedFee() err := stateAPIUpdater.UpdateRecommendedFee()
assert.NoError(t, err) require.NoError(t, err)
assert.Greater(t, api.status.RecommendedFee.ExistingAccount, float64(0)) require.NoError(t, stateAPIUpdater.Store())
assert.Equal(t, api.status.RecommendedFee.CreatesAccount, var minFeeUSD float64
api.status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage) if api.l2 != nil {
assert.Equal(t, api.status.RecommendedFee.CreatesAccountAndRegister, minFeeUSD = api.l2.MinFeeUSD()
api.status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage) }
ni, err := api.h.GetNodeInfoAPI()
require.NoError(t, err)
assert.Greater(t, ni.StateAPI.RecommendedFee.ExistingAccount, minFeeUSD)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccount,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
// assert.Equal(t, ni.StateAPI.RecommendedFee.CreatesAccountAndRegister,
// ni.StateAPI.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
} }
func TestGetState(t *testing.T) { func TestGetState(t *testing.T) {
lastBlock := tc.blocks[3] lastBlock := tc.blocks[3]
lastBatchNum := common.BatchNum(3) lastBatchNum := common.BatchNum(12)
currentSlotNum := int64(1) currentSlotNum := int64(1)
api.SetRollupVariables(tc.rollupVars) stateAPIUpdater.SetSCVars(&common.SCVariablesPtr{
api.SetWDelayerVariables(tc.wdelayerVars) Rollup: &tc.rollupVars,
api.SetAuctionVariables(tc.auctionVars) Auction: &tc.auctionVars,
err := api.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum) WDelayer: &tc.wdelayerVars,
assert.NoError(t, err) })
err = api.UpdateMetrics() err := stateAPIUpdater.UpdateNetworkInfo(lastBlock, lastBlock, lastBatchNum, currentSlotNum)
assert.NoError(t, err) require.NoError(t, err)
err = api.UpdateRecommendedFee() err = stateAPIUpdater.UpdateMetrics()
assert.NoError(t, err) require.NoError(t, err)
err = stateAPIUpdater.UpdateRecommendedFee()
require.NoError(t, err)
require.NoError(t, stateAPIUpdater.Store())
endpoint := apiURL + "state" endpoint := apiURL + "state"
var status testStatus var status testStatus
assert.NoError(t, doGoodReq("GET", endpoint, nil, &status)) require.NoError(t, doGoodReq("GET", endpoint, nil, &status))
// SC vars // SC vars
// UpdateNetworkInfo will overwrite buckets withdrawal values // UpdateNetworkInfo will overwrite buckets withdrawal values
@@ -200,13 +215,13 @@ func TestGetState(t *testing.T) {
// Recommended fee // Recommended fee
// TODO: perform real asserts (not just greater than 0) // TODO: perform real asserts (not just greater than 0)
assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0)) assert.Greater(t, status.RecommendedFee.ExistingAccount, float64(0))
assert.Equal(t, status.RecommendedFee.CreatesAccount, // assert.Equal(t, status.RecommendedFee.CreatesAccount,
status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage) // status.RecommendedFee.ExistingAccount*createAccountExtraFeePercentage)
assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister, // assert.Equal(t, status.RecommendedFee.CreatesAccountAndRegister,
status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage) // status.RecommendedFee.ExistingAccount*createAccountInternalExtraFeePercentage)
} }
func assertNextForgers(t *testing.T, expected, actual []NextForger) { func assertNextForgers(t *testing.T, expected, actual []historydb.NextForgerAPI) {
assert.Equal(t, len(expected), len(actual)) assert.Equal(t, len(expected), len(actual))
for i := range expected { for i := range expected {
// ignore timestamps and other metadata // ignore timestamps and other metadata

View File

@@ -1329,13 +1329,6 @@ components:
type: string type: string
description: Moment in which the transaction was added to the pool. description: Moment in which the transaction was added to the pool.
format: date-time format: date-time
batchNum:
type: integer
description: Identifier of a batch. Every new forged batch increases by one the batchNum, starting at 0.
minimum: 0
maximum: 4294967295
nullable: true
example: null
requestFromAccountIndex: requestFromAccountIndex:
type: string type: string
description: >- description: >-
@@ -1390,7 +1383,6 @@ components:
$ref: '#/components/schemas/Token' $ref: '#/components/schemas/Token'
example: example:
amount: '100000000000000' amount: '100000000000000'
batchNum:
fee: 0 fee: 0
fromAccountIndex: hez:SCC:256 fromAccountIndex: hez:SCC:256
fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn fromBJJ: hez:r_trOasVEk0zNaalOoS9aLedu6mO7jI5XTIPu_zGXoyn
@@ -1438,7 +1430,6 @@ components:
- info - info
- signature - signature
- timestamp - timestamp
- batchNum
- requestFromAccountIndex - requestFromAccountIndex
- requestToAccountIndex - requestToAccountIndex
- requestToHezEthereumAddress - requestToHezEthereumAddress
@@ -2578,6 +2569,21 @@ components:
description: List of next coordinators to forge. description: List of next coordinators to forge.
items: items:
$ref: '#/components/schemas/NextForger' $ref: '#/components/schemas/NextForger'
NodeConfig:
type: object
description: Configuration of the coordinator node. Note that this is specific for each coordinator.
properties:
forgeDelay:
type: number
description: |
Delay in seconds after which a batch is forged if the slot is
already committed. If set to 0s, the coordinator will continuously
forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value.
example: 193.4
additionalProperties: false
required:
- forgeDelay
State: State:
type: object type: object
description: Gobal variables of the network description: Gobal variables of the network
@@ -2594,6 +2600,8 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer' $ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee: recommendedFee:
$ref: '#/components/schemas/RecommendedFee' $ref: '#/components/schemas/RecommendedFee'
nodeConfig:
$ref: '#/components/schemas/NodeConfig'
additionalProperties: false additionalProperties: false
required: required:
- network - network
@@ -2602,6 +2610,7 @@ components:
- auction - auction
- withdrawalDelayer - withdrawalDelayer
- recommendedFee - recommendedFee
- nodeConfig
StateNetwork: StateNetwork:
type: object type: object
description: Gobal statistics of the network description: Gobal statistics of the network
@@ -2812,6 +2821,10 @@ components:
type: number type: number
description: Average fee percentage paid for L2 transactions in the last 24 hours. description: Average fee percentage paid for L2 transactions in the last 24 hours.
example: 1.54 example: 1.54
estimatedTimeToForgeL1:
type: number
description: Estimated time needed to forge a L1 transaction, from the time it's added on the smart contract, until it's actualy forged. In seconds.
example: 193.4
additionalProperties: false additionalProperties: false
required: required:
- transactionsPerBatch - transactionsPerBatch
@@ -2820,6 +2833,7 @@ components:
- totalAccounts - totalAccounts
- totalBJJs - totalBJJs
- avgTransactionFee - avgTransactionFee
- estimatedTimeToForgeL1
PendingItems: PendingItems:
type: integer type: integer
description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent. description: Amount of items that will be returned in subsequent calls to the endpoint, as long as they are done with same filters. When the value is 0 it means that all items have been sent.

View File

@@ -2,6 +2,7 @@ package api
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"net/http" "net/http"
@@ -27,6 +28,7 @@ func (a *API) postPoolTx(c *gin.Context) {
retBadReq(err, c) retBadReq(err, c)
return return
} }
writeTx.ClientIP = c.ClientIP()
// Insert to DB // Insert to DB
if err := a.l2.AddTxAPI(writeTx); err != nil { if err := a.l2.AddTxAPI(writeTx); err != nil {
retSQLErr(err, c) retSQLErr(err, c)
@@ -169,16 +171,21 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Get public key
account, err := a.s.LastGetAccount(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(err)
}
// Validate feeAmount // Validate feeAmount
_, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee) _, err = common.CalcFeeAmount(poolTx.Amount, poolTx.Fee)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Get public key
account, err := a.h.GetCommonAccountAPI(poolTx.FromIdx)
if err != nil {
return tracerr.Wrap(err)
}
// Validate TokenID
if poolTx.TokenID != account.TokenID {
return tracerr.Wrap(fmt.Errorf("tx.TokenID (%v) != account.TokenID (%v)",
poolTx.TokenID, account.TokenID))
}
// Check signature // Check signature
if !poolTx.VerifySignature(a.chainID, account.BJJ) { if !poolTx.VerifySignature(a.chainID, account.BJJ) {
return tracerr.Wrap(errors.New("wrong signature")) return tracerr.Wrap(errors.New("wrong signature"))

View File

@@ -10,6 +10,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// testPoolTxReceive is a struct to be used to assert the response // testPoolTxReceive is a struct to be used to assert the response
@@ -170,9 +171,9 @@ func TestPoolTxs(t *testing.T) {
fetchedTxID := common.TxID{} fetchedTxID := common.TxID{}
for _, tx := range tc.poolTxsToSend { for _, tx := range tc.poolTxsToSend {
jsonTxBytes, err := json.Marshal(tx) jsonTxBytes, err := json.Marshal(tx)
assert.NoError(t, err) require.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
assert.NoError( require.NoError(
t, doGoodReq( t, doGoodReq(
"POST", "POST",
endpoint, endpoint,
@@ -187,42 +188,42 @@ func TestPoolTxs(t *testing.T) {
badTx.Amount = "99950000000000000" badTx.Amount = "99950000000000000"
badTx.Fee = 255 badTx.Fee = 255
jsonTxBytes, err := json.Marshal(badTx) jsonTxBytes, err := json.Marshal(badTx)
assert.NoError(t, err) require.NoError(t, err)
jsonTxReader := bytes.NewReader(jsonTxBytes) jsonTxReader := bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
assert.NoError(t, err) require.NoError(t, err)
// Wrong signature // Wrong signature
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
badTx.FromIdx = "hez:foo:1000" badTx.FromIdx = "hez:foo:1000"
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
assert.NoError(t, err) require.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
assert.NoError(t, err) require.NoError(t, err)
// Wrong to // Wrong to
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" ethAddr := "hez:0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
badTx.ToEthAddr = &ethAddr badTx.ToEthAddr = &ethAddr
badTx.ToIdx = nil badTx.ToIdx = nil
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
assert.NoError(t, err) require.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
assert.NoError(t, err) require.NoError(t, err)
// Wrong rq // Wrong rq
badTx = tc.poolTxsToSend[0] badTx = tc.poolTxsToSend[0]
rqFromIdx := "hez:foo:30" rqFromIdx := "hez:foo:30"
badTx.RqFromIdx = &rqFromIdx badTx.RqFromIdx = &rqFromIdx
jsonTxBytes, err = json.Marshal(badTx) jsonTxBytes, err = json.Marshal(badTx)
assert.NoError(t, err) require.NoError(t, err)
jsonTxReader = bytes.NewReader(jsonTxBytes) jsonTxReader = bytes.NewReader(jsonTxBytes)
err = doBadReq("POST", endpoint, jsonTxReader, 400) err = doBadReq("POST", endpoint, jsonTxReader, 400)
assert.NoError(t, err) require.NoError(t, err)
// GET // GET
endpoint += "/" endpoint += "/"
for _, tx := range tc.poolTxsToReceive { for _, tx := range tc.poolTxsToReceive {
fetchedTx := testPoolTxReceive{} fetchedTx := testPoolTxReceive{}
assert.NoError( require.NoError(
t, doGoodReq( t, doGoodReq(
"GET", "GET",
endpoint+tx.TxID.String(), endpoint+tx.TxID.String(),
@@ -233,10 +234,10 @@ func TestPoolTxs(t *testing.T) {
} }
// 400, due invalid TxID // 400, due invalid TxID
err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400) err = doBadReq("GET", endpoint+"0xG2241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 400)
assert.NoError(t, err) require.NoError(t, err)
// 404, due inexistent TxID in DB // 404, due inexistent TxID in DB
err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404) err = doBadReq("GET", endpoint+"0x02241b6f2b1dd772dba391f4a1a3407c7c21f598d86e2585a14e616fb4a255f823", nil, 404)
assert.NoError(t, err) require.NoError(t, err)
} }
func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) { func assertPoolTx(t *testing.T, expected, actual testPoolTxReceive) {

View File

@@ -54,7 +54,7 @@ func NewBatchBuilder(dbpath string, synchronizerStateDB *statedb.StateDB, batchN
// copy of the rollup state from the Synchronizer at that `batchNum`, otherwise // copy of the rollup state from the Synchronizer at that `batchNum`, otherwise
// it can just roll back the internal copy. // it can just roll back the internal copy.
func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (bb *BatchBuilder) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
return bb.localStateDB.Reset(batchNum, fromSynchronizer) return tracerr.Wrap(bb.localStateDB.Reset(batchNum, fromSynchronizer))
} }
// BuildBatch takes the transactions and returns the common.ZKInputs of the next batch // BuildBatch takes the transactions and returns the common.ZKInputs of the next batch
@@ -64,7 +64,10 @@ func (bb *BatchBuilder) BuildBatch(coordIdxs []common.Idx, configBatch *ConfigBa
tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig) tp := txprocessor.NewTxProcessor(bbStateDB, configBatch.TxProcessorConfig)
ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs) ptOut, err := tp.ProcessTxs(coordIdxs, l1usertxs, l1coordinatortxs, pooll2txs)
return ptOut.ZKInputs, tracerr.Wrap(err) if err != nil {
return nil, tracerr.Wrap(err)
}
return ptOut.ZKInputs, nil
} }
// LocalStateDB returns the underlying LocalStateDB // LocalStateDB returns the underlying LocalStateDB

1
cli/node/.gitignore vendored
View File

@@ -1,2 +1,3 @@
cfg.example.secret.toml cfg.example.secret.toml
cfg.toml cfg.toml
node

View File

@@ -2,6 +2,10 @@
This is the main cli for the node This is the main cli for the node
## Go version
The `hermez-node` has been tested with go version 1.14
## Usage ## Usage
``` ```
@@ -65,29 +69,64 @@ when running the coordinator in sync mode
- The node requires a PostgreSQL database. The parameters of the server and - The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section. database must be set in the `PostgreSQL` section.
## Building
*All commands assume you are at the `cli/node` directory.*
Building the node requires using the packr utility to bundle the database
migrations inside the resulting binary. Install the packr utility with:
```
cd /tmp && go get -u github.com/gobuffalo/packr/v2/packr2 && cd -
```
Make sure your `$PATH` contains `$GOPATH/bin`, otherwise the packr utility will
not be found.
Now build the node executable:
```
cd ../../db && packr2 && cd -
go build .
cd ../../db && packr2 clean && cd -
```
The executable is `node`.
## Usage Examples ## Usage Examples
The following commands assume you have built the node previously. You can also
run the following examples by replacing `./node` with `go run .` and executing
them in the `cli/node` directory to build from source and run at the same time.
Run the node in mode synchronizer: Run the node in mode synchronizer:
``` ```
go run . --mode sync --cfg cfg.buidler.toml run ./node --mode sync --cfg cfg.buidler.toml run
``` ```
Run the node in mode coordinator: Run the node in mode coordinator:
``` ```
go run . --mode coord --cfg cfg.buidler.toml run ./node --mode coord --cfg cfg.buidler.toml run
``` ```
Import an ethereum private key into the keystore: Import an ethereum private key into the keystore:
``` ```
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde ./node --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
``` ```
Generate a new BabyJubJub key pair: Generate a new BabyJubJub key pair:
``` ```
go run . --mode coord --cfg cfg.buidler.toml genbjj ./node --mode coord --cfg cfg.buidler.toml genbjj
``` ```
Wipe the entier SQL database (this will destroy all synchronized and pool data): Wipe the entier SQL database (this will destroy all synchronized and pool
data):
``` ```
go run . --mode coord --cfg cfg.buidler.toml wipesql ./node --mode coord --cfg cfg.buidler.toml wipesql
```
Discard all synchronized blocks and associated state up to a given block
number. This command is useful in case the synchronizer reaches an invalid
state and you want to roll back a few blocks and try again (maybe with some
fixes in the code).
```
./node --mode coord --cfg cfg.buidler.toml discard --block 8061330
``` ```

View File

@@ -14,17 +14,23 @@ Type = "bitfinexV2"
[Debug] [Debug]
APIAddress = "localhost:12345" APIAddress = "localhost:12345"
MeddlerLogs = true MeddlerLogs = true
GinDebugMode = true
[StateDB] [StateDB]
Path = "/tmp/iden3-test/hermez/statedb" Path = "/tmp/iden3-test/hermez/statedb"
Keep = 256 Keep = 256
[PostgreSQL] [PostgreSQL]
Port = 5432 PortWrite = 5432
Host = "localhost" HostWrite = "localhost"
User = "hermez" UserWrite = "hermez"
Password = "yourpasswordhere" PasswordWrite = "yourpasswordhere"
Name = "hermez" NameWrite = "hermez"
# PortRead = 5432
# HostRead = "localhost"
# UserRead = "hermez"
# PasswordRead = "yourpasswordhere"
# NameRead = "hermez"
[Web3] [Web3]
URL = "http://localhost:8545" URL = "http://localhost:8545"
@@ -41,15 +47,22 @@ TokenHEZ = "0x5D94e3e7aeC542aB0F9129B9a7BAdeb5B3Ca0f77"
TokenHEZName = "Hermez Network Token" TokenHEZName = "Hermez Network Token"
[Coordinator] [Coordinator]
# ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator ForgerAddress = "0x05c23b938a85ab26A36E6314a0D02080E9ca6BeD" # Non-Boot Coordinator
# ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3" # ForgerAddressPrivateKey = "0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3"
ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator # ForgerAddress = "0xb4124ceb3451635dacedd11767f004d8a28c6ee7" # Boot Coordinator
# ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563" # ForgerAddressPrivateKey = "0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563"
MinimumForgeAddressBalance = 0
ConfirmBlocks = 10 ConfirmBlocks = 10
L1BatchTimeoutPerc = 0.999 L1BatchTimeoutPerc = 0.6
StartSlotBlocksDelay = 2
ScheduleBatchBlocksAheadCheck = 3
SendBatchBlocksMarginCheck = 1
ProofServerPollInterval = "1s" ProofServerPollInterval = "1s"
ForgeRetryInterval = "500ms" ForgeRetryInterval = "500ms"
SyncRetryInterval = "1s" SyncRetryInterval = "1s"
ForgeDelay = "10s"
ForgeNoTxsDelay = "0s"
PurgeByExtDelInterval = "1m"
[Coordinator.FeeAccount] [Coordinator.FeeAccount]
Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E" Address = "0x56232B1c5B10038125Bc7345664B4AFD745bcF8E"
@@ -60,6 +73,7 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
[Coordinator.L2DB] [Coordinator.L2DB]
SafetyPeriod = 10 SafetyPeriod = 10
MaxTxs = 512 MaxTxs = 512
MinFeeUSD = 0.0
TTL = "24h" TTL = "24h"
PurgeBatchDelay = 10 PurgeBatchDelay = 10
InvalidateBatchDelay = 20 InvalidateBatchDelay = 20
@@ -80,18 +94,24 @@ MaxTx = 512
NLevels = 32 NLevels = 32
[Coordinator.EthClient] [Coordinator.EthClient]
ReceiptTimeout = "60s"
ReceiptLoopInterval = "500ms"
CheckLoopInterval = "500ms" CheckLoopInterval = "500ms"
Attempts = 4 Attempts = 4
AttemptsDelay = "500ms" AttemptsDelay = "500ms"
CallGasLimit = 300000 TxResendTimeout = "2m"
GasPriceDiv = 100 NoReuseNonce = false
MaxGasPrice = "5000000000"
GasPriceIncPerc = 10
[Coordinator.EthClient.Keystore] [Coordinator.EthClient.Keystore]
Path = "/tmp/iden3-test/hermez/ethkeystore" Path = "/tmp/iden3-test/hermez/ethkeystore"
Password = "yourpasswordhere" Password = "yourpasswordhere"
[Coordinator.EthClient.ForgeBatchGasCost]
Fixed = 500000
L1UserTx = 8000
L1CoordTx = 9000
L2Tx = 1
[Coordinator.API] [Coordinator.API]
Coordinator = true Coordinator = true

View File

@@ -11,10 +11,13 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/hermeznetwork/hermez-node/config" "github.com/hermeznetwork/hermez-node/config"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/node" "github.com/hermeznetwork/hermez-node/node"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@@ -23,6 +26,7 @@ const (
flagMode = "mode" flagMode = "mode"
flagSK = "privatekey" flagSK = "privatekey"
flagYes = "yes" flagYes = "yes"
flagBlock = "block"
modeSync = "sync" modeSync = "sync"
modeCoord = "coord" modeCoord = "coord"
) )
@@ -87,11 +91,11 @@ func cmdWipeSQL(c *cli.Context) error {
} }
} }
db, err := dbUtils.ConnectSQLDB( db, err := dbUtils.ConnectSQLDB(
cfg.PostgreSQL.Port, cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.Host, cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.User, cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.Password, cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.Name, cfg.PostgreSQL.NameWrite,
) )
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -103,17 +107,7 @@ func cmdWipeSQL(c *cli.Context) error {
return nil return nil
} }
func cmdRun(c *cli.Context) error { func waitSigInt() {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
stopCh := make(chan interface{}) stopCh := make(chan interface{})
// catch ^C to send the stop signal // catch ^C to send the stop signal
@@ -134,11 +128,101 @@ func cmdRun(c *cli.Context) error {
} }
}() }()
<-stopCh <-stopCh
}
func cmdRun(c *cli.Context) error {
cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
node, err := node.NewNode(cfg.mode, cfg.node)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting node: %w", err))
}
node.Start()
waitSigInt()
node.Stop() node.Stop()
return nil return nil
} }
func cmdServeAPI(c *cli.Context) error {
cfg, err := parseCliAPIServer(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
srv, err := node.NewAPIServer(cfg.mode, cfg.server)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error starting api server: %w", err))
}
srv.Start()
waitSigInt()
srv.Stop()
return nil
}
func cmdDiscard(c *cli.Context) error {
_cfg, err := parseCli(c)
if err != nil {
return tracerr.Wrap(fmt.Errorf("error parsing flags and config: %w", err))
}
cfg := _cfg.node
blockNum := c.Int64(flagBlock)
log.Infof("Discarding all blocks up to block %v...", blockNum)
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, nil)
if err := historyDB.Reorg(blockNum); err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.Reorg: %w", err))
}
batchNum, err := historyDB.GetLastBatchNum()
if err != nil {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
}
l2DB := l2db.NewL2DB(
dbRead, dbWrite,
cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration,
nil,
)
if err := l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("l2DB.Reorg: %w", err))
}
return nil
}
// Config is the configuration of the hermez node execution // Config is the configuration of the hermez node execution
type Config struct { type Config struct {
mode node.Mode mode node.Mode
@@ -160,20 +244,59 @@ func getConfig(c *cli.Context) (*Config, error) {
var cfg Config var cfg Config
mode := c.String(flagMode) mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg) nodeCfgPath := c.String(flagCfg)
if nodeCfgPath == "" {
return nil, tracerr.Wrap(fmt.Errorf("required flag \"%v\" not set", flagCfg))
}
var err error var err error
switch mode { switch mode {
case modeSync: case modeSync:
cfg.mode = node.ModeSynchronizer cfg.mode = node.ModeSynchronizer
cfg.node, err = config.LoadNode(nodeCfgPath) cfg.node, err = config.LoadNode(nodeCfgPath, false)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
case modeCoord: case modeCoord:
cfg.mode = node.ModeCoordinator cfg.mode = node.ModeCoordinator
cfg.node, err = config.LoadCoordinator(nodeCfgPath) cfg.node, err = config.LoadNode(nodeCfgPath, true)
if err != nil {
return nil, tracerr.Wrap(err)
}
default:
return nil, tracerr.Wrap(fmt.Errorf("invalid mode \"%v\"", mode))
}
return &cfg, nil
}
// ConfigAPIServer is the configuration of the api server execution
type ConfigAPIServer struct {
mode node.Mode
server *config.APIServer
}
func parseCliAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
cfg, err := getConfigAPIServer(c)
if err != nil {
if err := cli.ShowAppHelp(c); err != nil {
panic(err)
}
return nil, tracerr.Wrap(err)
}
return cfg, nil
}
func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
var cfg ConfigAPIServer
mode := c.String(flagMode)
nodeCfgPath := c.String(flagCfg)
var err error
switch mode {
case modeSync:
cfg.mode = node.ModeSynchronizer
cfg.server, err = config.LoadAPIServer(nodeCfgPath, false)
if err != nil {
return nil, tracerr.Wrap(err)
}
case modeCoord:
cfg.mode = node.ModeCoordinator
cfg.server, err = config.LoadAPIServer(nodeCfgPath, true)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -239,6 +362,24 @@ func main() {
Usage: "Run the hermez-node in the indicated mode", Usage: "Run the hermez-node in the indicated mode",
Action: cmdRun, Action: cmdRun,
}, },
{
Name: "serveapi",
Aliases: []string{},
Usage: "Serve the API only",
Action: cmdServeAPI,
},
{
Name: "discard",
Aliases: []string{},
Usage: "Discard blocks up to a specified block number",
Action: cmdDiscard,
Flags: []cli.Flag{
&cli.Int64Flag{
Name: flagBlock,
Usage: "last block number to keep",
Required: false,
}},
},
} }
err := app.Run(os.Args) err := app.Run(os.Args)

View File

@@ -263,3 +263,13 @@ type IdxNonce struct {
Idx Idx `db:"idx"` Idx Idx `db:"idx"`
Nonce Nonce `db:"nonce"` Nonce Nonce `db:"nonce"`
} }
// AccountUpdate represents an account balance and/or nonce update after a
// processed batch
type AccountUpdate struct {
EthBlockNum int64 `meddler:"eth_block_num"`
BatchNum BatchNum `meddler:"batch_num"`
Idx Idx `meddler:"idx"`
Nonce Nonce `meddler:"nonce"`
Balance *big.Int `meddler:"balance,bigint"`
}

View File

@@ -1,21 +1,30 @@
package common package common
import ( import (
"encoding/binary"
"strconv"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
ethMath "github.com/ethereum/go-ethereum/common/math"
ethCrypto "github.com/ethereum/go-ethereum/crypto" ethCrypto "github.com/ethereum/go-ethereum/crypto"
ethSigner "github.com/ethereum/go-ethereum/signer/core"
"github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// AccountCreationAuthMsg is the message that is signed to authorize a Hermez // AccountCreationAuthMsg is the message that is signed to authorize a Hermez
// account creation // account creation
const AccountCreationAuthMsg = "I authorize this babyjubjub key for hermez rollup account creation" const AccountCreationAuthMsg = "Account creation"
// EthMsgPrefix is the prefix for message signing at the Ethereum ecosystem // EIP712Version is the used version of the EIP-712
const EthMsgPrefix = "\x19Ethereum Signed Message:\n" const EIP712Version = "1"
// EIP712Provider defines the Provider for the EIP-712
const EIP712Provider = "Hermez Network"
var (
// EmptyEthSignature is an ethereum signature of all zeroes
EmptyEthSignature = make([]byte, 65)
)
// AccountCreationAuth authorizations sent by users to the L2DB, to be used for // AccountCreationAuth authorizations sent by users to the L2DB, to be used for
// account creations when necessary // account creations when necessary
@@ -26,27 +35,64 @@ type AccountCreationAuth struct {
Timestamp time.Time `meddler:"timestamp,utctime"` Timestamp time.Time `meddler:"timestamp,utctime"`
} }
// toHash returns a byte array to be hashed from the AccountCreationAuth, which
// follows the EIP-712 encoding
func (a *AccountCreationAuth) toHash(chainID uint16, func (a *AccountCreationAuth) toHash(chainID uint16,
hermezContractAddr ethCommon.Address) []byte { hermezContractAddr ethCommon.Address) ([]byte, error) {
var chainIDBytes [2]byte chainIDFormatted := ethMath.NewHexOrDecimal256(int64(chainID))
binary.BigEndian.PutUint16(chainIDBytes[:], chainID)
// [EthPrefix | AccountCreationAuthMsg | compressedBJJ | chainID | hermezContractAddr]
var b []byte
b = append(b, []byte(AccountCreationAuthMsg)...)
b = append(b, SwapEndianness(a.BJJ[:])...) // for js implementation compatibility
b = append(b, chainIDBytes[:]...)
b = append(b, hermezContractAddr[:]...)
ethPrefix := EthMsgPrefix + strconv.Itoa(len(b)) signerData := ethSigner.TypedData{
return append([]byte(ethPrefix), b...) Types: ethSigner.Types{
"EIP712Domain": []ethSigner.Type{
{Name: "name", Type: "string"},
{Name: "version", Type: "string"},
{Name: "chainId", Type: "uint256"},
{Name: "verifyingContract", Type: "address"},
},
"Authorise": []ethSigner.Type{
{Name: "Provider", Type: "string"},
{Name: "Authorisation", Type: "string"},
{Name: "BJJKey", Type: "bytes32"},
},
},
PrimaryType: "Authorise",
Domain: ethSigner.TypedDataDomain{
Name: EIP712Provider,
Version: EIP712Version,
ChainId: chainIDFormatted,
VerifyingContract: hermezContractAddr.Hex(),
},
Message: ethSigner.TypedDataMessage{
"Provider": EIP712Provider,
"Authorisation": AccountCreationAuthMsg,
"BJJKey": SwapEndianness(a.BJJ[:]),
},
}
domainSeparator, err := signerData.HashStruct("EIP712Domain", signerData.Domain.Map())
if err != nil {
return nil, tracerr.Wrap(err)
}
typedDataHash, err := signerData.HashStruct(signerData.PrimaryType, signerData.Message)
if err != nil {
return nil, tracerr.Wrap(err)
}
rawData := []byte{0x19, 0x01} // "\x19\x01"
rawData = append(rawData, domainSeparator...)
rawData = append(rawData, typedDataHash...)
return rawData, nil
} }
// HashToSign returns the hash to be signed by the Etherum address to authorize // HashToSign returns the hash to be signed by the Etherum address to authorize
// the account creation // the account creation, which follows the EIP-712 encoding
func (a *AccountCreationAuth) HashToSign(chainID uint16, func (a *AccountCreationAuth) HashToSign(chainID uint16,
hermezContractAddr ethCommon.Address) ([]byte, error) { hermezContractAddr ethCommon.Address) ([]byte, error) {
b := a.toHash(chainID, hermezContractAddr) b, err := a.toHash(chainID, hermezContractAddr)
return ethCrypto.Keccak256Hash(b).Bytes(), nil if err != nil {
return nil, tracerr.Wrap(err)
}
return ethCrypto.Keccak256(b), nil
} }
// Sign signs the account creation authorization message using the provided // Sign signs the account creation authorization message using the provided
@@ -54,16 +100,17 @@ func (a *AccountCreationAuth) HashToSign(chainID uint16,
// should do an ethereum signature using the account corresponding to // should do an ethereum signature using the account corresponding to
// `a.EthAddr`. The `signHash` function is used to make signig flexible: in // `a.EthAddr`. The `signHash` function is used to make signig flexible: in
// tests we sign directly using the private key, outside tests we sign using // tests we sign directly using the private key, outside tests we sign using
// the keystore (which never exposes the private key). // the keystore (which never exposes the private key). Sign follows the EIP-712
// encoding.
func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error), func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
chainID uint16, hermezContractAddr ethCommon.Address) error { chainID uint16, hermezContractAddr ethCommon.Address) error {
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
if err != nil { if err != nil {
return err return tracerr.Wrap(err)
} }
sig, err := signHash(hash) sig, err := signHash(hash)
if err != nil { if err != nil {
return err return tracerr.Wrap(err)
} }
sig[64] += 27 sig[64] += 27
a.Signature = sig a.Signature = sig
@@ -72,7 +119,8 @@ func (a *AccountCreationAuth) Sign(signHash func(hash []byte) ([]byte, error),
} }
// VerifySignature ensures that the Signature is done with the EthAddr, for the // VerifySignature ensures that the Signature is done with the EthAddr, for the
// chainID and hermezContractAddress passed by parameter // chainID and hermezContractAddress passed by parameter. VerifySignature
// follows the EIP-712 encoding.
func (a *AccountCreationAuth) VerifySignature(chainID uint16, func (a *AccountCreationAuth) VerifySignature(chainID uint16,
hermezContractAddr ethCommon.Address) bool { hermezContractAddr ethCommon.Address) bool {
// Calculate hash to be signed // Calculate hash to be signed

View File

@@ -39,7 +39,7 @@ func TestAccountCreationAuthSignVerify(t *testing.T) {
// Hash and sign manually and compare the generated signature // Hash and sign manually and compare the generated signature
hash, err := a.HashToSign(chainID, hermezContractAddr) hash, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "4f8df75e96fdce1ac90bb2f8d81c42047600f85bfcef80ce3b91c2a2afc58c1e", assert.Equal(t, "9414667457e658dd31949b82996b75c65a055512244c3bbfd22ff56add02ba65",
hex.EncodeToString(hash)) hex.EncodeToString(hash))
sig, err := ethCrypto.Sign(hash, ethSk) sig, err := ethCrypto.Sign(hash, ethSk)
require.NoError(t, err) require.NoError(t, err)
@@ -75,9 +75,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7", pkCompStr: "21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7",
chainID: uint16(4), chainID: uint16(4),
hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf", hermezContractAddr: "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf",
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d700047e5f4552091a69125d5dfcb7b8c2659029395bdf", toHashExpected: "190189658bba487e11c7da602676ee32bc90b77d3f32a305b147e4f3c3b35f19672e5d84ccc38d0ab245c469b719549d837113465c2abf9972c49403ca6fd10ed3dc",
hashExpected: "39afea52d843a4de905b6b5ebb0ee8c678141f711d96d9b429c4aec10ef9911f", hashExpected: "c56eba41e511df100c804c5c09288f35887efea4f033be956481af335df3bea2",
sigExpected: "73d10d6ecf06ee8a5f60ac90f06b78bef9c650f414ba3ac73e176dc32e896159147457e9c86f0b4bd60fdaf2c0b2aec890a7df993d69a4805e242a6b845ebf231c", sigExpected: "dbedcc5ce02db8f48afbdb2feba9a3a31848eaa8fca5f312ce37b01db45d2199208335330d4445bd2f51d1db68dbc0d0bf3585c4a07504b4efbe46a69eaae5a21b",
} }
tv1 := testVector{ tv1 := testVector{
ethSk: "0000000000000000000000000000000000000000000000000000000000000002", ethSk: "0000000000000000000000000000000000000000000000000000000000000002",
@@ -85,9 +85,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d", pkCompStr: "093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d",
chainID: uint16(0), chainID: uint16(0),
hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf", hermezContractAddr: "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf",
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e093985b1993d9f743f9d7d943ed56f38601cb8b196db025f79650c4007c3054d00002b5ad5c4795c026514f8317c7a215e218dccd6cf", toHashExpected: "1901dafbc253dedf90d6421dc6e25d5d9efc6985133cb2a8d363d0a081a0e3eddddc65f603a88de36aaeabd3b4cf586538c7f3fd50c94780530a3707c8c14ad9fd11",
hashExpected: "89a3895993a4736232212e59566294feb3da227af44375daf3307dcad5451d5d", hashExpected: "deb9afa479282cf27b442ce8ba86b19448aa87eacef691521a33db5d0feb9959",
sigExpected: "bb4156156c705494ad5f99030342c64657e51e2994750f92125717c40bf56ad632044aa6bd00979feea92c417b552401e65fe5f531f15010d9d1c278da8be1df1b", sigExpected: "6a0da90ba2d2b1be679a28ebe54ee03082d44b836087391cd7d2607c1e4dafe04476e6e88dccb8707c68312512f16c947524b35c80f26c642d23953e9bb84c701c",
} }
tv2 := testVector{ tv2 := testVector{
ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122", ethSk: "c5e8f61d1ab959b397eecc0a37a6517b8e67a0e7cf1f4bce5591f3ed80199122",
@@ -95,9 +95,9 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52", pkCompStr: "22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b52",
chainID: uint16(31337), // =0x7a69 chainID: uint16(31337), // =0x7a69
hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c", hermezContractAddr: "0xf4e77E5Da47AC3125140c470c71cBca77B5c638c",
toHashExpected: "19457468657265756d205369676e6564204d6573736167653a0a3132304920617574686f72697a65207468697320626162796a75626a7562206b657920666f72206865726d657a20726f6c6c7570206163636f756e74206372656174696f6e22870c1bcc451396202d62f566026eab8e438c6c91decf8ddf63a6c162619b527a69f4e77e5da47ac3125140c470c71cbca77b5c638c", toHashExpected: "190167617949b934d7e01add4009cd3d47415a26727b7d6288e5dce33fb3721d5a1a9ce511b19b694c9aaf8183f4987ed752f24884c54c003d11daa2e98c7547a79e",
hashExpected: "4f6ead01278ba4597d4720e37482f585a713497cea994a95209f4c57a963b4a7", hashExpected: "157b570c597e615b8356ce008ac39f43bc9b6d50080bc07d968031b9378acbbb",
sigExpected: "43b5818802a137a72a190c1d8d767ca507f7a4804b1b69b5e055abf31f4f2b476c80bb1ba63260d95610f6f831420d32130e7f22fec5d76e16644ddfcedd0d441c", sigExpected: "a0766181102428b5672e523dc4b905c10ddf025c10dbd0b3534ef864632a14652737610041c670b302fc7dca28edd5d6eac42b72d69ce58da8ce21287b244e381b",
} }
tvs = append(tvs, tv0) tvs = append(tvs, tv0)
tvs = append(tvs, tv1) tvs = append(tvs, tv1)
@@ -122,10 +122,10 @@ func TestAccountCreationAuthJSComp(t *testing.T) {
BJJ: pkComp, BJJ: pkComp,
} }
toHash := a.toHash(chainID, hermezContractAddr) toHash, err := a.toHash(chainID, hermezContractAddr)
require.NoError(t, err)
assert.Equal(t, tv.toHashExpected, assert.Equal(t, tv.toHashExpected,
hex.EncodeToString(toHash)) hex.EncodeToString(toHash))
assert.Equal(t, 120+len(EthMsgPrefix)+len([]byte("120")), len(toHash))
msg, err := a.HashToSign(chainID, hermezContractAddr) msg, err := a.HashToSign(chainID, hermezContractAddr)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -27,6 +27,24 @@ type Batch struct {
TotalFeesUSD *float64 `meddler:"total_fees_usd"` TotalFeesUSD *float64 `meddler:"total_fees_usd"`
} }
// NewEmptyBatch creates a new empty batch
func NewEmptyBatch() *Batch {
return &Batch{
BatchNum: 0,
EthBlockNum: 0,
ForgerAddr: ethCommon.Address{},
CollectedFees: make(map[TokenID]*big.Int),
FeeIdxsCoordinator: make([]Idx, 0),
StateRoot: big.NewInt(0),
NumAccounts: 0,
LastIdx: 0,
ExitRoot: big.NewInt(0),
ForgeL1TxsNum: nil,
SlotNum: 0,
TotalFeesUSD: nil,
}
}
// BatchNum identifies a batch // BatchNum identifies a batch
type BatchNum int64 type BatchNum int64
@@ -59,6 +77,7 @@ type BatchData struct {
L1CoordinatorTxs []L1Tx L1CoordinatorTxs []L1Tx
L2Txs []L2Tx L2Txs []L2Tx
CreatedAccounts []Account CreatedAccounts []Account
UpdatedAccounts []AccountUpdate
ExitTree []ExitInfo ExitTree []ExitInfo
Batch Batch Batch Batch
} }

33
common/eth.go Normal file
View File

@@ -0,0 +1,33 @@
package common
// SCVariables joins all the smart contract variables in a single struct
type SCVariables struct {
Rollup RollupVariables `validate:"required"`
Auction AuctionVariables `validate:"required"`
WDelayer WDelayerVariables `validate:"required"`
}
// AsPtr returns the SCVariables as a SCVariablesPtr using pointers to the
// original SCVariables
func (v *SCVariables) AsPtr() *SCVariablesPtr {
return &SCVariablesPtr{
Rollup: &v.Rollup,
Auction: &v.Auction,
WDelayer: &v.WDelayer,
}
}
// SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct
type SCVariablesPtr struct {
Rollup *RollupVariables `validate:"required"`
Auction *AuctionVariables `validate:"required"`
WDelayer *WDelayerVariables `validate:"required"`
}
// SCConsts joins all the smart contract constants in a single struct
type SCConsts struct {
Rollup RollupConstants
Auction AuctionConstants
WDelayer WDelayerConstants
}

View File

@@ -33,7 +33,8 @@ func (c *AuctionConstants) SlotNum(blockNum int64) int64 {
if blockNum >= c.GenesisBlockNum { if blockNum >= c.GenesisBlockNum {
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot) return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
return -1 // This result will be negative
return (blockNum - c.GenesisBlockNum) / int64(c.BlocksPerSlot)
} }
// SlotBlocks returns the first and the last block numbers included in that slot // SlotBlocks returns the first and the last block numbers included in that slot

View File

@@ -1,132 +0,0 @@
// Package common Float16 provides methods to work with Hermez custom half float
// precision, 16 bits, codification internally called Float16 has been adopted
// to encode large integers. This is done in order to save bits when L2
// transactions are published.
//nolint:gomnd
package common
import (
"encoding/binary"
"errors"
"math/big"
"github.com/hermeznetwork/tracerr"
)
var (
// ErrRoundingLoss is used when converted big.Int to Float16 causes rounding loss
ErrRoundingLoss = errors.New("input value causes rounding loss")
)
// Float16 represents a float in a 16 bit format
type Float16 uint16
// Bytes return a byte array of length 2 with the Float16 value encoded in BigEndian
func (f16 Float16) Bytes() []byte {
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(f16))
return b[:]
}
// Float16FromBytes returns a Float16 from a byte array of 2 bytes.
func Float16FromBytes(b []byte) *Float16 {
// WARNING b[:2] for a b where len(b)<2 can break
f16 := Float16(binary.BigEndian.Uint16(b[:2]))
return &f16
}
// BigInt converts the Float16 to a *big.Int integer
func (f16 *Float16) BigInt() *big.Int {
fl := int64(*f16)
m := big.NewInt(fl & 0x3FF)
e := big.NewInt(fl >> 11)
e5 := (fl >> 10) & 0x01
exp := big.NewInt(0).Exp(big.NewInt(10), e, nil)
res := m.Mul(m, exp)
if e5 != 0 && e.Cmp(big.NewInt(0)) != 0 {
res.Add(res, exp.Div(exp, big.NewInt(2)))
}
return res
}
// floorFix2Float converts a fix to a float, always rounding down
func floorFix2Float(_f *big.Int) Float16 {
zero := big.NewInt(0)
ten := big.NewInt(10)
e := int64(0)
m := big.NewInt(0)
m.Set(_f)
if m.Cmp(zero) == 0 {
return 0
}
s := big.NewInt(0).Rsh(m, 10)
for s.Cmp(zero) != 0 {
m.Div(m, ten)
s.Rsh(m, 10)
e++
}
return Float16(m.Int64() | e<<11)
}
// NewFloat16 encodes a *big.Int integer as a Float16, returning error in
// case of loss during the encoding.
func NewFloat16(f *big.Int) (Float16, error) {
fl1 := floorFix2Float(f)
fi1 := fl1.BigInt()
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
m3 := (fl1 & 0x3FF) + 1
e3 := fl1 >> 11
if m3&0x400 == 0 {
m3 = 0x66
e3++
}
fl3 := m3 + e3<<11
fi3 := fl3.BigInt()
res := fl1
d := big.NewInt(0).Abs(fi1.Sub(fi1, f))
d2 := big.NewInt(0).Abs(fi2.Sub(fi2, f))
if d.Cmp(d2) == 1 {
res = fl2
d = d2
}
d3 := big.NewInt(0).Abs(fi3.Sub(fi3, f))
if d.Cmp(d3) == 1 {
res = fl3
}
// Do rounding check
if res.BigInt().Cmp(f) == 0 {
return res, nil
}
return res, tracerr.Wrap(ErrRoundingLoss)
}
// NewFloat16Floor encodes a big.Int integer as a Float16, rounding down in
// case of loss during the encoding.
func NewFloat16Floor(f *big.Int) Float16 {
fl1 := floorFix2Float(f)
fl2 := fl1 | 0x400
fi2 := fl2.BigInt()
if fi2.Cmp(f) < 1 {
return fl2
}
return fl1
}

View File

@@ -1,132 +0,0 @@
package common
import (
"math/big"
"testing"
"github.com/hermeznetwork/tracerr"
"github.com/stretchr/testify/assert"
)
func TestConversionsFloat16(t *testing.T) {
testVector := map[Float16]string{
0x307B: "123000000",
0x1DC6: "454500",
0xFFFF: "10235000000000000000000000000000000",
0x0000: "0",
0x0400: "0",
0x0001: "1",
0x0401: "1",
0x0800: "0",
0x0c00: "5",
0x0801: "10",
0x0c01: "15",
}
for test := range testVector {
fix := test.BigInt()
assert.Equal(t, fix.String(), testVector[test])
bi := big.NewInt(0)
bi.SetString(testVector[test], 10)
fl, err := NewFloat16(bi)
assert.NoError(t, err)
fx2 := fl.BigInt()
assert.Equal(t, fx2.String(), testVector[test])
}
}
func TestFloorFix2FloatFloat16(t *testing.T) {
testVector := map[string]Float16{
"87999990000000000": 0x776f,
"87950000000000001": 0x776f,
"87950000000000000": 0x776f,
"87949999999999999": 0x736f,
}
for test := range testVector {
bi := big.NewInt(0)
bi.SetString(test, 10)
testFloat := NewFloat16Floor(bi)
assert.Equal(t, testFloat, testVector[test])
}
}
func TestConversionLossesFloat16(t *testing.T) {
a := big.NewInt(1000)
b, err := NewFloat16(a)
assert.NoError(t, err)
c := b.BigInt()
assert.Equal(t, c, a)
a = big.NewInt(1024)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32767)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(32768)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
a = big.NewInt(65536000)
b, err = NewFloat16(a)
assert.Equal(t, ErrRoundingLoss, tracerr.Unwrap(err))
c = b.BigInt()
assert.NotEqual(t, c, a)
}
func BenchmarkFloat16(b *testing.B) {
newBigInt := func(s string) *big.Int {
bigInt, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("Bad big int")
}
return bigInt
}
type pair struct {
Float16 Float16
BigInt *big.Int
}
testVector := []pair{
{0x307B, newBigInt("123000000")},
{0x1DC6, newBigInt("454500")},
{0xFFFF, newBigInt("10235000000000000000000000000000000")},
{0x0000, newBigInt("0")},
{0x0400, newBigInt("0")},
{0x0001, newBigInt("1")},
{0x0401, newBigInt("1")},
{0x0800, newBigInt("0")},
{0x0c00, newBigInt("5")},
{0x0801, newBigInt("10")},
{0x0c01, newBigInt("15")},
}
b.Run("floorFix2Float()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
NewFloat16Floor(testVector[i%len(testVector)].BigInt)
}
})
b.Run("NewFloat16()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = NewFloat16(testVector[i%len(testVector)].BigInt)
}
})
b.Run("Float16.BigInt()", func(b *testing.B) {
for i := 0; i < b.N; i++ {
testVector[i%len(testVector)].Float16.BigInt()
}
})
}

View File

@@ -6,7 +6,6 @@
package common package common
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"math/big" "math/big"
@@ -18,11 +17,14 @@ const (
// maxFloat40Value is the maximum value that the Float40 can have // maxFloat40Value is the maximum value that the Float40 can have
// (40 bits: maxFloat40Value=2**40-1) // (40 bits: maxFloat40Value=2**40-1)
maxFloat40Value = 0xffffffffff maxFloat40Value = 0xffffffffff
// Float40BytesLength defines the length of the Float40 values
// represented as byte arrays
Float40BytesLength = 5
) )
var ( var (
// ErrFloat40Overflow is used when a given nonce overflows the maximum // ErrFloat40Overflow is used when a given Float40 overflows the
// capacity of the Float40 (2**40-1) // maximum capacity of the Float40 (2**40-1)
ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1") ErrFloat40Overflow = errors.New("Float40 overflow, max value: 2**40 -1")
// ErrFloat40E31 is used when the e > 31 when trying to convert a // ErrFloat40E31 is used when the e > 31 when trying to convert a
// *big.Int to Float40 // *big.Int to Float40
@@ -85,15 +87,14 @@ func NewFloat40(f *big.Int) (Float40, error) {
zero := big.NewInt(0) zero := big.NewInt(0)
ten := big.NewInt(10) ten := big.NewInt(10)
thres := big.NewInt(0x08_00_00_00_00) thres := big.NewInt(0x08_00_00_00_00)
for bytes.Equal(zero.Bytes(), new(big.Int).Mod(m, ten).Bytes()) && for new(big.Int).Mod(m, ten).Cmp(zero) == 0 && m.Cmp(thres) >= 0 {
!bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) {
m = new(big.Int).Div(m, ten) m = new(big.Int).Div(m, ten)
e = new(big.Int).Add(e, big.NewInt(1)) e = new(big.Int).Add(e, big.NewInt(1))
} }
if e.Int64() > 31 { if e.Int64() > 31 {
return 0, ErrFloat40E31 return 0, ErrFloat40E31
} }
if !bytes.Equal(zero.Bytes(), new(big.Int).Div(m, thres).Bytes()) { if m.Cmp(thres) >= 0 {
return 0, ErrFloat40NotEnoughPrecission return 0, ErrFloat40NotEnoughPrecission
} }
r := new(big.Int).Add(m, r := new(big.Int).Add(m,

View File

@@ -15,7 +15,7 @@ import (
type L1Tx struct { type L1Tx struct {
// Stored in DB: mandatory fileds // Stored in DB: mandatory fileds
// TxID (12 bytes) for L1Tx is: // TxID (32 bytes) for L1Tx is the Keccak256 (ethereum) hash of:
// bytes: | 1 | 8 | 2 | 1 | // bytes: | 1 | 8 | 2 | 1 |
// values: | type | ToForgeL1TxsNum | Position | 0 (padding) | // values: | type | ToForgeL1TxsNum | Position | 0 (padding) |
// where type: // where type:
@@ -225,7 +225,7 @@ func (tx *L1Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes) copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
} }
// fee = 0 (as is L1Tx) // fee = 0 (as is L1Tx)
return b[:], nil return b[:], nil
@@ -237,7 +237,7 @@ func L1TxFromDataAvailability(b []byte, nLevels uint32) (*L1Tx, error) {
fromIdxBytes := b[0:idxLen] fromIdxBytes := b[0:idxLen]
toIdxBytes := b[idxLen : idxLen*2] toIdxBytes := b[idxLen : idxLen*2]
amountBytes := b[idxLen*2 : idxLen*2+5] amountBytes := b[idxLen*2 : idxLen*2+Float40BytesLength]
l1tx := L1Tx{} l1tx := L1Tx{}
fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6)) fromIdx, err := IdxFromBytes(ethCommon.LeftPadBytes(fromIdxBytes, 6))
@@ -368,19 +368,12 @@ func L1UserTxFromBytes(b []byte) (*L1Tx, error) {
return tx, nil return tx, nil
} }
func signHash(data []byte) []byte {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
return ethCrypto.Keccak256([]byte(msg))
}
// L1CoordinatorTxFromBytes decodes a L1Tx from []byte // L1CoordinatorTxFromBytes decodes a L1Tx from []byte
func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) { func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommon.Address) (*L1Tx, error) {
if len(b) != RollupConstL1CoordinatorTotalBytes { if len(b) != RollupConstL1CoordinatorTotalBytes {
return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b))) return nil, tracerr.Wrap(fmt.Errorf("Can not parse L1CoordinatorTx bytes, expected length %d, current: %d", 101, len(b)))
} }
bytesMessage := []byte("I authorize this babyjubjub key for hermez rollup account creation")
tx := &L1Tx{ tx := &L1Tx{
UserOrigin: false, UserOrigin: false,
} }
@@ -401,18 +394,20 @@ func L1CoordinatorTxFromBytes(b []byte, chainID *big.Int, hermezAddress ethCommo
// L1CoordinatorTX ETH // L1CoordinatorTX ETH
// Ethereum adds 27 to v // Ethereum adds 27 to v
v = b[0] - byte(27) //nolint:gomnd v = b[0] - byte(27) //nolint:gomnd
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
var data []byte
data = append(data, bytesMessage...)
data = append(data, pkCompB...)
data = append(data, chainIDBytes[:]...)
data = append(data, hermezAddress.Bytes()...)
var signature []byte var signature []byte
signature = append(signature, r[:]...) signature = append(signature, r[:]...)
signature = append(signature, s[:]...) signature = append(signature, s[:]...)
signature = append(signature, v) signature = append(signature, v)
hash := signHash(data)
pubKeyBytes, err := ethCrypto.Ecrecover(hash, signature) accCreationAuth := AccountCreationAuth{
BJJ: tx.FromBJJ,
}
h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
if err != nil {
return nil, tracerr.Wrap(err)
}
pubKeyBytes, err := ethCrypto.Ecrecover(h, signature)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }

View File

@@ -227,7 +227,6 @@ func TestL1TxByteParsersCompatibility(t *testing.T) {
func TestL1CoordinatorTxByteParsers(t *testing.T) { func TestL1CoordinatorTxByteParsers(t *testing.T) {
hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe") hermezAddress := ethCommon.HexToAddress("0xD6C850aeBFDC46D7F4c207e445cC0d6B0919BDBe")
chainID := big.NewInt(1337) chainID := big.NewInt(1337)
chainIDBytes := ethCommon.LeftPadBytes(chainID.Bytes(), 2)
privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19") privateKey, err := crypto.HexToECDSA("fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19")
require.NoError(t, err) require.NoError(t, err)
@@ -245,18 +244,16 @@ func TestL1CoordinatorTxByteParsers(t *testing.T) {
pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c") pkCompL := []byte("56ca90f80d7c374ae7485e9bcc47d4ac399460948da6aeeb899311097925a72c")
err = pkComp.UnmarshalText(pkCompL) err = pkComp.UnmarshalText(pkCompL)
require.NoError(t, err) require.NoError(t, err)
bytesMessage1 := []byte("\x19Ethereum Signed Message:\n120")
bytesMessage2 := []byte("I authorize this babyjubjub key for hermez rollup account creation")
babyjubB := SwapEndianness(pkComp[:]) accCreationAuth := AccountCreationAuth{
var data []byte EthAddr: fromEthAddr,
data = append(data, bytesMessage1...) BJJ: pkComp,
data = append(data, bytesMessage2...) }
data = append(data, babyjubB[:]...)
data = append(data, chainIDBytes...) h, err := accCreationAuth.HashToSign(uint16(chainID.Uint64()), hermezAddress)
data = append(data, hermezAddress.Bytes()...) require.NoError(t, err)
hash := crypto.Keccak256Hash(data)
signature, err := crypto.Sign(hash.Bytes(), privateKey) signature, err := crypto.Sign(h, privateKey)
require.NoError(t, err) require.NoError(t, err)
// Ethereum adds 27 to v // Ethereum adds 27 to v
v := int(signature[64]) v := int(signature[64])

View File

@@ -200,8 +200,8 @@ func (tx L2Tx) BytesDataAvailability(nLevels uint32) ([]byte, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
copy(b[idxLen*2:idxLen*2+5], amountFloat40Bytes) copy(b[idxLen*2:idxLen*2+Float40BytesLength], amountFloat40Bytes)
b[idxLen*2+5] = byte(tx.Fee) b[idxLen*2+Float40BytesLength] = byte(tx.Fee)
return b[:], nil return b[:], nil
} }
@@ -226,10 +226,10 @@ func L2TxFromBytesDataAvailability(b []byte, nLevels int) (*L2Tx, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+5]).BigInt() tx.Amount, err = Float40FromBytes(b[idxLen*2 : idxLen*2+Float40BytesLength]).BigInt()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
tx.Fee = FeeSelector(b[idxLen*2+5]) tx.Fee = FeeSelector(b[idxLen*2+Float40BytesLength])
return tx, nil return tx, nil
} }

View File

@@ -36,7 +36,7 @@ type PoolL2Tx struct {
ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"` ToEthAddr ethCommon.Address `meddler:"to_eth_addr,zeroisnull"`
ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"` ToBJJ babyjub.PublicKeyComp `meddler:"to_bjj,zeroisnull"`
TokenID TokenID `meddler:"token_id"` TokenID TokenID `meddler:"token_id"`
Amount *big.Int `meddler:"amount,bigint"` // TODO: change to float40 Amount *big.Int `meddler:"amount,bigint"`
Fee FeeSelector `meddler:"fee"` Fee FeeSelector `meddler:"fee"`
Nonce Nonce `meddler:"nonce"` // effective 40 bits used Nonce Nonce `meddler:"nonce"` // effective 40 bits used
State PoolL2TxState `meddler:"state"` State PoolL2TxState `meddler:"state"`
@@ -53,7 +53,7 @@ type PoolL2Tx struct {
RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"` RqToEthAddr ethCommon.Address `meddler:"rq_to_eth_addr,zeroisnull"`
RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"` RqToBJJ babyjub.PublicKeyComp `meddler:"rq_to_bjj,zeroisnull"`
RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"` RqTokenID TokenID `meddler:"rq_token_id,zeroisnull"`
RqAmount *big.Int `meddler:"rq_amount,bigintnull"` // TODO: change to float40 RqAmount *big.Int `meddler:"rq_amount,bigintnull"`
RqFee FeeSelector `meddler:"rq_fee,zeroisnull"` RqFee FeeSelector `meddler:"rq_fee,zeroisnull"`
RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used RqNonce Nonce `meddler:"rq_nonce,zeroisnull"` // effective 48 bits used
AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"` AbsoluteFee float64 `meddler:"fee_usd,zeroisnull"`
@@ -73,7 +73,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original Type doesn't match the correct one, return error // If original Type doesn't match the correct one, return error
if txTypeOld != "" && txTypeOld != tx.Type { if txTypeOld != "" && txTypeOld != tx.Type {
return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s", return nil, tracerr.Wrap(fmt.Errorf("L2Tx.Type: %s, should be: %s",
tx.Type, txTypeOld)) txTypeOld, tx.Type))
} }
txIDOld := tx.TxID txIDOld := tx.TxID
@@ -83,7 +83,7 @@ func NewPoolL2Tx(tx *PoolL2Tx) (*PoolL2Tx, error) {
// If original TxID doesn't match the correct one, return error // If original TxID doesn't match the correct one, return error
if txIDOld != (TxID{}) && txIDOld != tx.TxID { if txIDOld != (TxID{}) && txIDOld != tx.TxID {
return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s", return tx, tracerr.Wrap(fmt.Errorf("PoolL2Tx.TxID: %s, should be: %s",
tx.TxID.String(), txIDOld.String())) txIDOld.String(), tx.TxID.String()))
} }
return tx, nil return tx, nil
@@ -126,7 +126,7 @@ func (tx *PoolL2Tx) SetID() error {
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// [ 16 bits ] chainId // 2 bytes // [ 16 bits ] chainId // 2 bytes
// [ 32 bits ] signatureConstant // 4 bytes // [ 32 bits ] signatureConstant // 4 bytes
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation // Total bits compressed data: 225 bits // 29 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
var b [29]byte var b [29]byte
@@ -179,7 +179,7 @@ func TxCompressedDataEmpty(chainID uint16) *big.Int {
// [ 40 bits ] amountFloat40 // 5 bytes // [ 40 bits ] amountFloat40 // 5 bytes
// [ 48 bits ] toIdx // 6 bytes // [ 48 bits ] toIdx // 6 bytes
// [ 48 bits ] fromIdx // 6 bytes // [ 48 bits ] fromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation // Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
if tx.Amount == nil { if tx.Amount == nil {
tx.Amount = big.NewInt(0) tx.Amount = big.NewInt(0)
@@ -238,7 +238,7 @@ func (tx *PoolL2Tx) TxCompressedDataV2() (*big.Int, error) {
// [ 40 bits ] rqAmountFloat40 // 5 bytes // [ 40 bits ] rqAmountFloat40 // 5 bytes
// [ 48 bits ] rqToIdx // 6 bytes // [ 48 bits ] rqToIdx // 6 bytes
// [ 48 bits ] rqFromIdx // 6 bytes // [ 48 bits ] rqFromIdx // 6 bytes
// Total bits compressed data: 193 bits // 25 bytes in *big.Int representation // Total bits compressed data: 217 bits // 28 bytes in *big.Int representation
func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) { func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
if tx.RqAmount == nil { if tx.RqAmount == nil {
tx.RqAmount = big.NewInt(0) tx.RqAmount = big.NewInt(0)

View File

@@ -62,3 +62,17 @@ func RmEndingZeroes(siblings []*merkletree.Hash) []*merkletree.Hash {
} }
return siblings[:pos] return siblings[:pos]
} }
// TokensToUSD is a helper function to calculate the USD value of a certain
// amount of tokens considering the normalized token price (which is the price
// commonly reported by exhanges)
func TokensToUSD(amount *big.Int, decimals uint64, valueUSD float64) float64 {
amountF := new(big.Float).SetInt(amount)
// Divide by 10^decimals to normalize the amount
baseF := new(big.Float).SetInt(new(big.Int).Exp(
big.NewInt(10), big.NewInt(int64(decimals)), nil)) //nolint:gomnd
amountF.Mul(amountF, big.NewFloat(valueUSD))
amountF.Quo(amountF, baseF)
amountUSD, _ := amountF.Float64()
return amountUSD
}

View File

@@ -102,6 +102,8 @@ type ZKInputs struct {
ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx] ToBJJAy []*big.Int `json:"toBjjAy"` // big.Int, len: [maxTx]
// ToEthAddr // ToEthAddr
ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx] ToEthAddr []*big.Int `json:"toEthAddr"` // ethCommon.Address, len: [maxTx]
// AmountF encoded as float40
AmountF []*big.Int `json:"amountF"` // uint40 len: [maxTx]
// OnChain determines if is L1 (1/true) or L2 (0/false) // OnChain determines if is L1 (1/true) or L2 (0/false)
OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx] OnChain []*big.Int `json:"onChain"` // bool, len: [maxTx]
@@ -113,7 +115,7 @@ type ZKInputs struct {
// account (fromIdx==0) // account (fromIdx==0)
NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx] NewAccount []*big.Int `json:"newAccount"` // bool, len: [maxTx]
// DepositAmountF encoded as float40 // DepositAmountF encoded as float40
DepositAmountF []*big.Int `json:"loadAmountF"` // uint16, len: [maxTx] DepositAmountF []*big.Int `json:"loadAmountF"` // uint40, len: [maxTx]
// FromEthAddr // FromEthAddr
FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx] FromEthAddr []*big.Int `json:"fromEthAddr"` // ethCommon.Address, len: [maxTx]
// FromBJJCompressed boolean encoded where each value is a *big.Int // FromBJJCompressed boolean encoded where each value is a *big.Int
@@ -326,6 +328,7 @@ func NewZKInputs(chainID uint16, maxTx, maxL1Tx, maxFeeIdxs, nLevels uint32, cur
zki.AuxToIdx = newSlice(maxTx) zki.AuxToIdx = newSlice(maxTx)
zki.ToBJJAy = newSlice(maxTx) zki.ToBJJAy = newSlice(maxTx)
zki.ToEthAddr = newSlice(maxTx) zki.ToEthAddr = newSlice(maxTx)
zki.AmountF = newSlice(maxTx)
zki.OnChain = newSlice(maxTx) zki.OnChain = newSlice(maxTx)
zki.NewAccount = newSlice(maxTx) zki.NewAccount = newSlice(maxTx)
@@ -476,8 +479,8 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes()) copy(newExitRoot, z.Metadata.NewExitRootRaw.Bytes())
b = append(b, newExitRoot...) b = append(b, newExitRoot...)
// [MAX_L1_TX * (2 * MAX_NLEVELS + 480) bits] L1TxsData // [MAX_L1_TX * (2 * MAX_NLEVELS + 528) bits] L1TxsData
l1TxDataLen := (2*z.Metadata.MaxLevels + 480) l1TxDataLen := (2*z.Metadata.MaxLevels + 528)
l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen) l1TxsDataLen := (z.Metadata.MaxL1Tx * l1TxDataLen)
l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd l1TxsData := make([]byte, l1TxsDataLen/8) //nolint:gomnd
for i := 0; i < len(z.Metadata.L1TxsData); i++ { for i := 0; i < len(z.Metadata.L1TxsData); i++ {
@@ -494,9 +497,9 @@ func (z ZKInputs) ToHashGlobalData() ([]byte, error) {
} }
b = append(b, l1TxsDataAvailability...) b = append(b, l1TxsDataAvailability...)
// [MAX_TX*(2*NLevels + 24) bits] L2TxsData // [MAX_TX*(2*NLevels + 48) bits] L2TxsData
var l2TxsData []byte var l2TxsData []byte
l2TxDataLen := 2*z.Metadata.NLevels + 24 //nolint:gomnd l2TxDataLen := 2*z.Metadata.NLevels + 48 //nolint:gomnd
l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen) l2TxsDataLen := (z.Metadata.MaxTx * l2TxDataLen)
expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd expectedL2TxsDataLen := l2TxsDataLen / 8 //nolint:gomnd
for i := 0; i < len(z.Metadata.L2TxsData); i++ { for i := 0; i < len(z.Metadata.L2TxsData); i++ {

View File

@@ -3,6 +3,7 @@ package config
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big"
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
@@ -34,10 +35,30 @@ type ServerProof struct {
URL string `validate:"required"` URL string `validate:"required"`
} }
// ForgeBatchGasCost is the costs associated to a ForgeBatch transaction, split
// into different parts to be used in a formula.
type ForgeBatchGasCost struct {
Fixed uint64 `validate:"required"`
L1UserTx uint64 `validate:"required"`
L1CoordTx uint64 `validate:"required"`
L2Tx uint64 `validate:"required"`
}
// CoordinatorAPI specifies the configuration parameters of the API in mode
// coordinator
type CoordinatorAPI struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
}
// Coordinator is the coordinator specific configuration. // Coordinator is the coordinator specific configuration.
type Coordinator struct { type Coordinator struct {
// ForgerAddress is the address under which this coordinator is forging // ForgerAddress is the address under which this coordinator is forging
ForgerAddress ethCommon.Address `validate:"required"` ForgerAddress ethCommon.Address `validate:"required"`
// MinimumForgeAddressBalance is the minimum balance the forger address
// needs to start the coordinator in wei. Of set to 0, the coordinator
// will not check the balance before starting.
MinimumForgeAddressBalance *big.Int
// FeeAccount is the Hermez account that the coordinator uses to receive fees // FeeAccount is the Hermez account that the coordinator uses to receive fees
FeeAccount struct { FeeAccount struct {
// Address is the ethereum address of the account to receive fees // Address is the ethereum address of the account to receive fees
@@ -51,33 +72,85 @@ type Coordinator struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 `validate:"required"` L1BatchTimeoutPerc float64 `validate:"required"`
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
SendBatchBlocksMarginCheck int64
// ProofServerPollInterval is the waiting interval between polling the // ProofServerPollInterval is the waiting interval between polling the
// ProofServer while waiting for a particular status // ProofServer while waiting for a particular status
ProofServerPollInterval Duration `validate:"required"` ProofServerPollInterval Duration `validate:"required"`
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval Duration `validate:"required"` ForgeRetryInterval Duration `validate:"required"`
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay Duration `validate:"-"`
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay Duration `validate:"-"`
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval Duration `validate:"required"` SyncRetryInterval Duration `validate:"required"`
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval Duration `validate:"required"`
// L2DB is the DB that holds the pool of L2Txs // L2DB is the DB that holds the pool of L2Txs
L2DB struct { L2DB struct {
// SafetyPeriod is the number of batches after which // SafetyPeriod is the number of batches after which
// non-pending L2Txs are deleted from the pool // non-pending L2Txs are deleted from the pool
SafetyPeriod common.BatchNum `validate:"required"` SafetyPeriod common.BatchNum `validate:"required"`
// MaxTxs is the number of L2Txs that once reached triggers // MaxTxs is the maximum number of pending L2Txs that can be
// deletion of old L2Txs // stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"` MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs // TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted. // L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"` TTL Duration `validate:"required"`
// PurgeBatchDelay is the delay between batches to purge outdated transactions // PurgeBatchDelay is the delay between batches to purge
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBatchDelay int64 `validate:"required"` PurgeBatchDelay int64 `validate:"required"`
// InvalidateBatchDelay is the delay between batches to mark invalid transactions // InvalidateBatchDelay is the delay between batches to mark
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBatchDelay int64 `validate:"required"` InvalidateBatchDelay int64 `validate:"required"`
// PurgeBlockDelay is the delay between blocks to purge outdated transactions // PurgeBlockDelay is the delay between blocks to purge
// outdated transactions. Oudated L2Txs are those that have
// been forged or marked as invalid for longer than the
// SafetyPeriod and pending L2Txs that have been in the pool
// for longer than TTL once there are MaxTxs.
PurgeBlockDelay int64 `validate:"required"` PurgeBlockDelay int64 `validate:"required"`
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions // InvalidateBlockDelay is the delay between blocks to mark
// invalid transactions due to nonce lower than the account
// nonce.
InvalidateBlockDelay int64 `validate:"required"` InvalidateBlockDelay int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
TxSelector struct { TxSelector struct {
@@ -97,12 +170,13 @@ type Coordinator struct {
NLevels int64 `validate:"required"` NLevels int64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
EthClient struct { EthClient struct {
// CallGasLimit is the default gas limit set for ethereum // MaxGasPrice is the maximum gas price allowed for ethereum
// calls, except for methods where a particular gas limit is // transactions
// harcoded because it's known to be a big value MaxGasPrice *big.Int `validate:"required"`
CallGasLimit uint64 `validate:"required"` // GasPriceIncPerc is the percentage increase of gas price set
// GasPriceDiv is the gas price division // in an ethereum transaction from the suggested gas price by
GasPriceDiv uint64 `validate:"required"` // the ehtereum node
GasPriceIncPerc int64
// CheckLoopInterval is the waiting interval between receipt // CheckLoopInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
CheckLoopInterval Duration `validate:"required"` CheckLoopInterval Duration `validate:"required"`
@@ -112,6 +186,13 @@ type Coordinator struct {
// AttemptsDelay is delay between attempts do do an eth client // AttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
AttemptsDelay Duration `validate:"required"` AttemptsDelay Duration `validate:"required"`
// TxResendTimeout is the timeout after which a non-mined
// ethereum transaction will be resent (reusing the nonce) with
// a newly calculated gas price
TxResendTimeout Duration `validate:"required"`
// NoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
NoReuseNonce bool
// Keystore is the ethereum keystore where private keys are kept // Keystore is the ethereum keystore where private keys are kept
Keystore struct { Keystore struct {
// Path to the keystore // Path to the keystore
@@ -119,11 +200,11 @@ type Coordinator struct {
// Password used to decrypt the keys in the keystore // Password used to decrypt the keys in the keystore
Password string `validate:"required"` Password string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost ForgeBatchGasCost `validate:"required"`
} `validate:"required"` } `validate:"required"`
API struct { API CoordinatorAPI `validate:"required"`
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
Debug struct { Debug struct {
// BatchPath if set, specifies the path where batchInfo is stored // BatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
@@ -138,6 +219,45 @@ type Coordinator struct {
} }
} }
// PostgreSQL is the postgreSQL configuration parameters. It's possible to use
// diferentiated SQL connections for read/write. If the read configuration is
// not provided, the write one it's going to be used for both reads and writes
type PostgreSQL struct {
// Port of the PostgreSQL write server
PortWrite int `validate:"required"`
// Host of the PostgreSQL write server
HostWrite string `validate:"required"`
// User of the PostgreSQL write server
UserWrite string `validate:"required"`
// Password of the PostgreSQL write server
PasswordWrite string `validate:"required"`
// Name of the PostgreSQL write server database
NameWrite string `validate:"required"`
// Port of the PostgreSQL read server
PortRead int
// Host of the PostgreSQL read server
HostRead string
// User of the PostgreSQL read server
UserRead string
// Password of the PostgreSQL read server
PasswordRead string
// Name of the PostgreSQL read server database
NameRead string
}
// NodeDebug specifies debug configuration parameters
type NodeDebug struct {
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
// GinDebugMode sets Gin-Gonic (the web framework) to run in
// debug mode
GinDebugMode bool
}
// Node is the hermez node configuration. // Node is the hermez node configuration.
type Node struct { type Node struct {
PriceUpdater struct { PriceUpdater struct {
@@ -154,19 +274,8 @@ type Node struct {
// Keep is the number of checkpoints to keep // Keep is the number of checkpoints to keep
Keep int `validate:"required"` Keep int `validate:"required"`
} `validate:"required"` } `validate:"required"`
PostgreSQL struct { PostgreSQL PostgreSQL `validate:"required"`
// Port of the PostgreSQL server Web3 struct {
Port int `validate:"required"`
// Host of the PostgreSQL server
Host string `validate:"required"`
// User of the PostgreSQL server
User string `validate:"required"`
// Password of the PostgreSQL server
Password string `validate:"required"`
// Name of the PostgreSQL server database
Name string `validate:"required"`
} `validate:"required"`
Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server // URL is the URL of the web3 ethereum-node RPC server
URL string `validate:"required"` URL string `validate:"required"`
} `validate:"required"` } `validate:"required"`
@@ -196,6 +305,7 @@ type Node struct {
// TokenHEZ address // TokenHEZ address
TokenHEZName string `validate:"required"` TokenHEZName string `validate:"required"`
} `validate:"required"` } `validate:"required"`
// API specifies the configuration parameters of the API
API struct { API struct {
// Address where the API will listen if set // Address where the API will listen if set
Address string Address string
@@ -213,17 +323,45 @@ type Node struct {
// can wait to stablish a SQL connection // can wait to stablish a SQL connection
SQLConnectionTimeout Duration SQLConnectionTimeout Duration
} `validate:"required"` } `validate:"required"`
Debug struct { Debug NodeDebug `validate:"required"`
// APIAddress is the address where the debugAPI will listen if
// set
APIAddress string
// MeddlerLogs enables meddler debug mode, where unused columns and struct
// fields will be logged
MeddlerLogs bool
}
Coordinator Coordinator `validate:"-"` Coordinator Coordinator `validate:"-"`
} }
// APIServer is the api server configuration parameters
type APIServer struct {
// NodeAPI specifies the configuration parameters of the API
API struct {
// Address where the API will listen if set
Address string `validate:"required"`
// Explorer enables the Explorer API endpoints
Explorer bool
// Maximum concurrent connections allowed between API and SQL
MaxSQLConnections int `validate:"required"`
// SQLConnectionTimeout is the maximum amount of time that an API request
// can wait to stablish a SQL connection
SQLConnectionTimeout Duration
} `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"`
Coordinator struct {
API struct {
// Coordinator enables the coordinator API endpoints
Coordinator bool
} `validate:"required"`
L2DB struct {
// MaxTxs is the maximum number of pending L2Txs that can be
// stored in the pool. Once this number of pending L2Txs is
// reached, inserts to the pool will be denied until some of
// the pending txs are forged.
MaxTxs uint32 `validate:"required"`
// MinFeeUSD is the minimum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level.
MinFeeUSD float64
} `validate:"required"`
}
Debug NodeDebug `validate:"required"`
}
// Load loads a generic config. // Load loads a generic config.
func Load(path string, cfg interface{}) error { func Load(path string, cfg interface{}) error {
bs, err := ioutil.ReadFile(path) //nolint:gosec bs, err := ioutil.ReadFile(path) //nolint:gosec
@@ -237,8 +375,8 @@ func Load(path string, cfg interface{}) error {
return nil return nil
} }
// LoadCoordinator loads the Coordinator configuration from path. // LoadNode loads the Node configuration from path.
func LoadCoordinator(path string) (*Node, error) { func LoadNode(path string, coordinator bool) (*Node, error) {
var cfg Node var cfg Node
if err := Load(path, &cfg); err != nil { if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err))
@@ -247,21 +385,28 @@ func LoadCoordinator(path string) (*Node, error) {
if err := validate.Struct(cfg); err != nil { if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
if err := validate.Struct(cfg.Coordinator); err != nil { if coordinator {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
} }
return &cfg, nil return &cfg, nil
} }
// LoadNode loads the Node configuration from path. // LoadAPIServer loads the APIServer configuration from path.
func LoadNode(path string) (*Node, error) { func LoadAPIServer(path string, coordinator bool) (*APIServer, error) {
var cfg Node var cfg APIServer
if err := Load(path, &cfg); err != nil { if err := Load(path, &cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error loading node configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error loading apiServer configuration file: %w", err))
} }
validate := validator.New() validate := validator.New()
if err := validate.Struct(cfg); err != nil { if err := validate.Struct(cfg); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
} }
if coordinator {
if err := validate.Struct(cfg.Coordinator); err != nil {
return nil, tracerr.Wrap(fmt.Errorf("error validating configuration file: %w", err))
}
}
return &cfg, nil return &cfg, nil
} }

View File

@@ -47,6 +47,8 @@ type Debug struct {
MineBlockNum int64 MineBlockNum int64
// SendBlockNum is the blockNum when the batch was sent to ethereum // SendBlockNum is the blockNum when the batch was sent to ethereum
SendBlockNum int64 SendBlockNum int64
// ResendNum is the number of times the tx has been resent
ResendNum int
// LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch // LastScheduledL1BatchBlockNum is the blockNum when the last L1Batch
// was scheduled // was scheduled
LastScheduledL1BatchBlockNum int64 LastScheduledL1BatchBlockNum int64
@@ -64,10 +66,17 @@ type Debug struct {
// StartToSendDelay is the delay between starting a batch and sending // StartToSendDelay is the delay between starting a batch and sending
// it to ethereum, in seconds // it to ethereum, in seconds
StartToSendDelay float64 StartToSendDelay float64
// StartToMineDelay is the delay between starting a batch and having
// it mined in seconds
StartToMineDelay float64
// SendToMineDelay is the delay between sending a batch tx and having
// it mined in seconds
SendToMineDelay float64
} }
// BatchInfo contans the Batch information // BatchInfo contans the Batch information
type BatchInfo struct { type BatchInfo struct {
PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
@@ -82,9 +91,16 @@ type BatchInfo struct {
CoordIdxs []common.Idx CoordIdxs []common.Idx
ForgeBatchArgs *eth.RollupForgeBatchArgs ForgeBatchArgs *eth.RollupForgeBatchArgs
// FeesInfo // FeesInfo
EthTx *types.Transaction EthTx *types.Transaction
Receipt *types.Receipt EthTxErr error
Debug Debug // SendTimestamp the time of batch sent to ethereum
SendTimestamp time.Time
Receipt *types.Receipt
// Fail is true if:
// - The receipt status is failed
// - A previous parent batch is failed
Fail bool
Debug Debug
} }
// DebugStore is a debug function to store the BatchInfo as a json text file in // DebugStore is a debug function to store the BatchInfo as a json text file in

View File

@@ -3,14 +3,15 @@ package coordinator
import ( import (
"context" "context"
"fmt" "fmt"
"math/big"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
@@ -23,7 +24,9 @@ import (
) )
var ( var (
errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet") errLastL1BatchNotSynced = fmt.Errorf("last L1Batch not synced yet")
errForgeNoTxsBeforeDelay = fmt.Errorf("no txs to forge and we haven't reached the forge no txs delay")
errForgeBeforeDelay = fmt.Errorf("we haven't reached the forge delay")
) )
const ( const (
@@ -42,26 +45,81 @@ type Config struct {
// L1BatchTimeoutPerc is the portion of the range before the L1Batch // L1BatchTimeoutPerc is the portion of the range before the L1Batch
// timeout that will trigger a schedule to forge an L1Batch // timeout that will trigger a schedule to forge an L1Batch
L1BatchTimeoutPerc float64 L1BatchTimeoutPerc float64
// StartSlotBlocksDelay is the number of blocks of delay to wait before
// starting the pipeline when we reach a slot in which we can forge.
StartSlotBlocksDelay int64
// ScheduleBatchBlocksAheadCheck is the number of blocks ahead in which
// the forger address is checked to be allowed to forge (apart from
// checking the next block), used to decide when to stop scheduling new
// batches (by stopping the pipeline).
// For example, if we are at block 10 and ScheduleBatchBlocksAheadCheck
// is 5, eventhough at block 11 we canForge, the pipeline will be
// stopped if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// scheduling a batch and having it mined.
ScheduleBatchBlocksAheadCheck int64
// SendBatchBlocksMarginCheck is the number of margin blocks ahead in
// which the coordinator is also checked to be allowed to forge, apart
// from the next block; used to decide when to stop sending batches to
// the smart contract.
// For example, if we are at block 10 and SendBatchBlocksMarginCheck is
// 5, eventhough at block 11 we canForge, the batch will be discarded
// if we can't forge at block 15.
// This value should be the expected number of blocks it takes between
// sending a batch and having it mined.
SendBatchBlocksMarginCheck int64
// EthClientAttempts is the number of attempts to do an eth client RPC // EthClientAttempts is the number of attempts to do an eth client RPC
// call before giving up // call before giving up
EthClientAttempts int EthClientAttempts int
// ForgeRetryInterval is the waiting interval between calls forge a // ForgeRetryInterval is the waiting interval between calls forge a
// batch after an error // batch after an error
ForgeRetryInterval time.Duration ForgeRetryInterval time.Duration
// ForgeDelay is the delay after which a batch is forged if the slot is
// already committed. If set to 0s, the coordinator will continuously
// forge at the maximum rate.
ForgeDelay time.Duration
// ForgeNoTxsDelay is the delay after which a batch is forged even if
// there are no txs to forge if the slot is already committed. If set
// to 0s, the coordinator will continuously forge even if the batches
// are empty.
ForgeNoTxsDelay time.Duration
// SyncRetryInterval is the waiting interval between calls to the main // SyncRetryInterval is the waiting interval between calls to the main
// handler of a synced block after an error // handler of a synced block after an error
SyncRetryInterval time.Duration SyncRetryInterval time.Duration
// PurgeByExtDelInterval is the waiting interval between calls
// to the PurgeByExternalDelete function of the l2db which deletes
// pending txs externally marked by the column `external_delete`
PurgeByExtDelInterval time.Duration
// EthClientAttemptsDelay is delay between attempts do do an eth client // EthClientAttemptsDelay is delay between attempts do do an eth client
// RPC call // RPC call
EthClientAttemptsDelay time.Duration EthClientAttemptsDelay time.Duration
// EthTxResendTimeout is the timeout after which a non-mined ethereum
// transaction will be resent (reusing the nonce) with a newly
// calculated gas price
EthTxResendTimeout time.Duration
// EthNoReuseNonce disables reusing nonces of pending transactions for
// new replacement transactions
EthNoReuseNonce bool
// MaxGasPrice is the maximum gas price allowed for ethereum
// transactions
MaxGasPrice *big.Int
// GasPriceIncPerc is the percentage increase of gas price set in an
// ethereum transaction from the suggested gas price by the ehtereum
// node
GasPriceIncPerc int64
// TxManagerCheckInterval is the waiting interval between receipt // TxManagerCheckInterval is the waiting interval between receipt
// checks of ethereum transactions in the TxManager // checks of ethereum transactions in the TxManager
TxManagerCheckInterval time.Duration TxManagerCheckInterval time.Duration
// DebugBatchPath if set, specifies the path where batchInfo is stored // DebugBatchPath if set, specifies the path where batchInfo is stored
// in JSON in every step/update of the pipeline // in JSON in every step/update of the pipeline
DebugBatchPath string DebugBatchPath string
Purger PurgerCfg Purger PurgerCfg
VerifierIdx uint8 // VerifierIdx is the index of the verifier contract registered in the
// smart contract
VerifierIdx uint8
// ForgeBatchGasCost contains the cost of each action in the
// ForgeBatch transaction.
ForgeBatchGasCost config.ForgeBatchGasCost
TxProcessorConfig txprocessor.Config TxProcessorConfig txprocessor.Config
} }
@@ -74,15 +132,22 @@ func (c *Config) debugBatchStore(batchInfo *BatchInfo) {
} }
} }
type fromBatch struct {
BatchNum common.BatchNum
ForgerAddr ethCommon.Address
StateRoot *big.Int
}
// Coordinator implements the Coordinator type // Coordinator implements the Coordinator type
type Coordinator struct { type Coordinator struct {
// State // State
pipelineBatchNum common.BatchNum // batchNum from which we started the pipeline pipelineNum int // Pipeline sequential number. The first pipeline is 1
provers []prover.Client pipelineFromBatch fromBatch // batch from which we started the pipeline
consts synchronizer.SCConsts provers []prover.Client
vars synchronizer.SCVariables consts common.SCConsts
stats synchronizer.Stats vars common.SCVariables
started bool stats synchronizer.Stats
started bool
cfg Config cfg Config
@@ -96,7 +161,17 @@ type Coordinator struct {
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc cancel context.CancelFunc
pipeline *Pipeline // mutexL2DBUpdateDelete protects updates to the L2DB so that
// these two processes always happen exclusively:
// - Pipeline taking pending txs, running through the TxProcessor and
// marking selected txs as forging
// - Coordinator deleting pending txs that have been marked with
// `external_delete`.
// Without this mutex, the coordinator could delete a pending txs that
// has just been selected by the TxProcessor in the pipeline.
mutexL2DBUpdateDelete sync.Mutex
pipeline *Pipeline
lastNonFailedBatchNum common.BatchNum
purger *Purger purger *Purger
txManager *TxManager txManager *TxManager
@@ -110,8 +185,8 @@ func NewCoordinator(cfg Config,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
serverProofs []prover.Client, serverProofs []prover.Client,
ethClient eth.ClientInterface, ethClient eth.ClientInterface,
scConsts *synchronizer.SCConsts, scConsts *common.SCConsts,
initSCVars *synchronizer.SCVariables, initSCVars *common.SCVariables,
) (*Coordinator, error) { ) (*Coordinator, error) {
// nolint reason: hardcoded `1.0`, by design the percentage can't be over 100% // nolint reason: hardcoded `1.0`, by design the percentage can't be over 100%
if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd if cfg.L1BatchTimeoutPerc >= 1.0 { //nolint:gomnd
@@ -139,10 +214,15 @@ func NewCoordinator(cfg Config,
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
c := Coordinator{ c := Coordinator{
pipelineBatchNum: -1, pipelineNum: 0,
provers: serverProofs, pipelineFromBatch: fromBatch{
consts: *scConsts, BatchNum: 0,
vars: *initSCVars, ForgerAddr: ethCommon.Address{},
StateRoot: big.NewInt(0),
},
provers: serverProofs,
consts: *scConsts,
vars: *initSCVars,
cfg: cfg, cfg: cfg,
@@ -183,8 +263,10 @@ func (c *Coordinator) BatchBuilder() *batchbuilder.BatchBuilder {
} }
func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) { func (c *Coordinator) newPipeline(ctx context.Context) (*Pipeline, error) {
return NewPipeline(ctx, c.cfg, c.historyDB, c.l2DB, c.txSelector, c.pipelineNum++
c.batchBuilder, c.purger, c.txManager, c.provers, &c.consts) return NewPipeline(ctx, c.cfg, c.pipelineNum, c.historyDB, c.l2DB, c.txSelector,
c.batchBuilder, &c.mutexL2DBUpdateDelete, c.purger, c, c.txManager,
c.provers, &c.consts)
} }
// MsgSyncBlock indicates an update to the Synchronizer stats // MsgSyncBlock indicates an update to the Synchronizer stats
@@ -193,18 +275,21 @@ type MsgSyncBlock struct {
Batches []common.BatchData Batches []common.BatchData
// Vars contains each Smart Contract variables if they are updated, or // Vars contains each Smart Contract variables if they are updated, or
// nil if they haven't changed. // nil if they haven't changed.
Vars synchronizer.SCVariablesPtr Vars common.SCVariablesPtr
} }
// MsgSyncReorg indicates a reorg // MsgSyncReorg indicates a reorg
type MsgSyncReorg struct { type MsgSyncReorg struct {
Stats synchronizer.Stats Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr Vars common.SCVariablesPtr
} }
// MsgStopPipeline indicates a signal to reset the pipeline // MsgStopPipeline indicates a signal to reset the pipeline
type MsgStopPipeline struct { type MsgStopPipeline struct {
Reason string Reason string
// FailedBatchNum indicates the first batchNum that failed in the
// pipeline. If FailedBatchNum is 0, it should be ignored.
FailedBatchNum common.BatchNum
} }
// SendMsg is a thread safe method to pass a message to the Coordinator // SendMsg is a thread safe method to pass a message to the Coordinator
@@ -215,27 +300,36 @@ func (c *Coordinator) SendMsg(ctx context.Context, msg interface{}) {
} }
} }
func (c *Coordinator) syncSCVars(vars synchronizer.SCVariablesPtr) { func updateSCVars(vars *common.SCVariables, update common.SCVariablesPtr) {
if vars.Rollup != nil { if update.Rollup != nil {
c.vars.Rollup = *vars.Rollup vars.Rollup = *update.Rollup
} }
if vars.Auction != nil { if update.Auction != nil {
c.vars.Auction = *vars.Auction vars.Auction = *update.Auction
} }
if vars.WDelayer != nil { if update.WDelayer != nil {
c.vars.WDelayer = *vars.WDelayer vars.WDelayer = *update.WDelayer
} }
} }
func (c *Coordinator) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&c.vars, vars)
}
func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables, func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.AuctionVariables,
currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool { currentSlot *common.Slot, nextSlot *common.Slot, addr ethCommon.Address, blockNum int64) bool {
if blockNum < auctionConstants.GenesisBlockNum {
log.Infow("canForge: requested blockNum is < genesis", "blockNum", blockNum,
"genesis", auctionConstants.GenesisBlockNum)
return false
}
var slot *common.Slot var slot *common.Slot
if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock { if currentSlot.StartBlock <= blockNum && blockNum <= currentSlot.EndBlock {
slot = currentSlot slot = currentSlot
} else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock { } else if nextSlot.StartBlock <= blockNum && blockNum <= nextSlot.EndBlock {
slot = nextSlot slot = nextSlot
} else { } else {
log.Warnw("Coordinator: requested blockNum for canForge is outside slot", log.Warnw("canForge: requested blockNum is outside current and next slot",
"blockNum", blockNum, "currentSlot", currentSlot, "blockNum", blockNum, "currentSlot", currentSlot,
"nextSlot", nextSlot, "nextSlot", nextSlot,
) )
@@ -244,16 +338,23 @@ func canForge(auctionConstants *common.AuctionConstants, auctionVars *common.Auc
anyoneForge := false anyoneForge := false
if !slot.ForgerCommitment && if !slot.ForgerCommitment &&
auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) { auctionConstants.RelativeBlock(blockNum) >= int64(auctionVars.SlotDeadline) {
log.Debugw("Coordinator: anyone can forge in the current slot (slotDeadline passed)", log.Debugw("canForge: anyone can forge in the current slot (slotDeadline passed)",
"block", blockNum) "block", blockNum)
anyoneForge = true anyoneForge = true
} }
if slot.Forger == addr || anyoneForge { if slot.Forger == addr || anyoneForge {
return true return true
} }
log.Debugw("canForge: can't forge", "slot.Forger", slot.Forger)
return false return false
} }
func (c *Coordinator) canForgeAt(blockNum int64) bool {
return canForge(&c.consts.Auction, &c.vars.Auction,
&c.stats.Sync.Auction.CurrentSlot, &c.stats.Sync.Auction.NextSlot,
c.cfg.ForgerAddress, blockNum)
}
func (c *Coordinator) canForge() bool { func (c *Coordinator) canForge() bool {
blockNum := c.stats.Eth.LastBlock.Num + 1 blockNum := c.stats.Eth.LastBlock.Num + 1
return canForge(&c.consts.Auction, &c.vars.Auction, return canForge(&c.consts.Auction, &c.vars.Auction,
@@ -262,21 +363,51 @@ func (c *Coordinator) canForge() bool {
} }
func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error { func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats) error {
canForge := c.canForge() nextBlock := c.stats.Eth.LastBlock.Num + 1
canForge := c.canForgeAt(nextBlock)
if c.cfg.ScheduleBatchBlocksAheadCheck != 0 && canForge {
canForge = c.canForgeAt(nextBlock + c.cfg.ScheduleBatchBlocksAheadCheck)
}
if c.pipeline == nil { if c.pipeline == nil {
if canForge { relativeBlock := c.consts.Auction.RelativeBlock(nextBlock)
if canForge && relativeBlock < c.cfg.StartSlotBlocksDelay {
log.Debugf("Coordinator: delaying pipeline start due to "+
"relativeBlock (%v) < cfg.StartSlotBlocksDelay (%v)",
relativeBlock, c.cfg.StartSlotBlocksDelay)
} else if canForge {
log.Infow("Coordinator: forging state begin", "block", log.Infow("Coordinator: forging state begin", "block",
stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch) stats.Eth.LastBlock.Num+1, "batch", stats.Sync.LastBatch.BatchNum)
batchNum := common.BatchNum(stats.Sync.LastBatch) fromBatch := fromBatch{
BatchNum: stats.Sync.LastBatch.BatchNum,
ForgerAddr: stats.Sync.LastBatch.ForgerAddr,
StateRoot: stats.Sync.LastBatch.StateRoot,
}
if c.lastNonFailedBatchNum > fromBatch.BatchNum {
fromBatch.BatchNum = c.lastNonFailedBatchNum
fromBatch.ForgerAddr = c.cfg.ForgerAddress
fromBatch.StateRoot = big.NewInt(0)
}
// Before starting the pipeline make sure we reset any
// l2tx from the pool that was forged in a batch that
// didn't end up being mined. We are already doing
// this in handleStopPipeline, but we do it again as a
// failsafe in case the last synced batchnum is
// different than in the previous call to l2DB.Reorg,
// or in case the node was restarted when there was a
// started batch that included l2txs but was not mined.
if err := c.l2DB.Reorg(fromBatch.BatchNum); err != nil {
return tracerr.Wrap(err)
}
var err error var err error
if c.pipeline, err = c.newPipeline(ctx); err != nil { if c.pipeline, err = c.newPipeline(ctx); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if err := c.pipeline.Start(batchNum, stats, &c.vars); err != nil { c.pipelineFromBatch = fromBatch
// Start the pipeline
if err := c.pipeline.Start(fromBatch.BatchNum, stats, &c.vars); err != nil {
c.pipeline = nil c.pipeline = nil
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
c.pipelineBatchNum = batchNum
} }
} else { } else {
if !canForge { if !canForge {
@@ -286,25 +417,12 @@ func (c *Coordinator) syncStats(ctx context.Context, stats *synchronizer.Stats)
} }
} }
if c.pipeline == nil { if c.pipeline == nil {
// Mark invalid in Pool due to forged L2Txs if _, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
// for _, batch := range batches { stats.Sync.LastBlock.Num, int64(stats.Sync.LastBatch.BatchNum)); err != nil {
// if err := c.l2DB.InvalidateOldNonces(
// idxsNonceFromL2Txs(batch.L2Txs), batch.Batch.BatchNum); err != nil {
// return err
// }
// }
if c.purger.CanInvalidate(stats.Sync.LastBlock.Num, stats.Sync.LastBatch) {
if err := c.txSelector.Reset(common.BatchNum(stats.Sync.LastBatch)); err != nil {
return tracerr.Wrap(err)
}
}
_, err := c.purger.InvalidateMaybe(c.l2DB, c.txSelector.LocalAccountsDB(),
stats.Sync.LastBlock.Num, stats.Sync.LastBatch)
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
_, err = c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num, stats.Sync.LastBatch) if _, err := c.purger.PurgeMaybe(c.l2DB, stats.Sync.LastBlock.Num,
if err != nil { int64(stats.Sync.LastBatch.BatchNum)); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -331,33 +449,43 @@ func (c *Coordinator) handleReorg(ctx context.Context, msg *MsgSyncReorg) error
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars) c.pipeline.SetSyncStatsVars(ctx, &msg.Stats, &msg.Vars)
} }
if common.BatchNum(c.stats.Sync.LastBatch) < c.pipelineBatchNum { if c.stats.Sync.LastBatch.ForgerAddr != c.cfg.ForgerAddress &&
// There's been a reorg and the batch from which the pipeline (c.stats.Sync.LastBatch.StateRoot == nil || c.pipelineFromBatch.StateRoot == nil ||
// was started was in a block that was discarded. The batch c.stats.Sync.LastBatch.StateRoot.Cmp(c.pipelineFromBatch.StateRoot) != 0) {
// may not be in the main chain, so we stop the pipeline as a // There's been a reorg and the batch state root from which the
// precaution (it will be started again once the node is in // pipeline was started has changed (probably because it was in
// sync). // a block that was discarded), and it was sent by a different
log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch < c.pipelineBatchNum", // coordinator than us. That batch may never be in the main
"sync.LastBatch", c.stats.Sync.LastBatch, // chain, so we stop the pipeline (it will be started again
"c.pipelineBatchNum", c.pipelineBatchNum) // once the node is in sync).
if err := c.handleStopPipeline(ctx, "reorg"); err != nil { log.Infow("Coordinator.handleReorg StopPipeline sync.LastBatch.ForgerAddr != cfg.ForgerAddr "+
"& sync.LastBatch.StateRoot != pipelineFromBatch.StateRoot",
"sync.LastBatch.StateRoot", c.stats.Sync.LastBatch.StateRoot,
"pipelineFromBatch.StateRoot", c.pipelineFromBatch.StateRoot)
c.txManager.DiscardPipeline(ctx, c.pipelineNum)
if err := c.handleStopPipeline(ctx, "reorg", 0); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string) error { // handleStopPipeline handles stopping the pipeline. If failedBatchNum is 0,
// the next pipeline will start from the last state of the synchronizer,
// otherwise, it will state from failedBatchNum-1.
func (c *Coordinator) handleStopPipeline(ctx context.Context, reason string, failedBatchNum common.BatchNum) error {
batchNum := c.stats.Sync.LastBatch.BatchNum
if failedBatchNum != 0 {
batchNum = failedBatchNum - 1
}
if c.pipeline != nil { if c.pipeline != nil {
c.pipeline.Stop(c.ctx) c.pipeline.Stop(c.ctx)
c.pipeline = nil c.pipeline = nil
} }
if err := c.l2DB.Reorg(common.BatchNum(c.stats.Sync.LastBatch)); err != nil { if err := c.l2DB.Reorg(batchNum); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if strings.Contains(reason, common.AuctionErrMsgCannotForge) { //nolint:staticcheck c.lastNonFailedBatchNum = batchNum
// TODO: Check that we are in a slot in which we can't forge
}
return nil return nil
} }
@@ -373,7 +501,7 @@ func (c *Coordinator) handleMsg(ctx context.Context, msg interface{}) error {
} }
case MsgStopPipeline: case MsgStopPipeline:
log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason) log.Infow("Coordinator received MsgStopPipeline", "reason", msg.Reason)
if err := c.handleStopPipeline(ctx, msg.Reason); err != nil { if err := c.handleStopPipeline(ctx, msg.Reason, msg.FailedBatchNum); err != nil {
return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err)) return tracerr.Wrap(fmt.Errorf("Coordinator.handleStopPipeline: %w", err))
} }
default: default:
@@ -396,7 +524,7 @@ func (c *Coordinator) Start() {
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
waitDuration := longWaitDuration timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
@@ -408,23 +536,45 @@ func (c *Coordinator) Start() {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.handleMsg", "err", err) log.Errorw("Coordinator.handleMsg", "err", err)
waitDuration = c.cfg.SyncRetryInterval if !timer.Stop() {
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
waitDuration = longWaitDuration case <-timer.C:
case <-time.After(waitDuration): timer.Reset(longWaitDuration)
if !c.stats.Synced() { if !c.stats.Synced() {
waitDuration = longWaitDuration
continue continue
} }
if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil { if err := c.syncStats(c.ctx, &c.stats); c.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("Coordinator.syncStats", "err", err) log.Errorw("Coordinator.syncStats", "err", err)
waitDuration = c.cfg.SyncRetryInterval if !timer.Stop() {
<-timer.C
}
timer.Reset(c.cfg.SyncRetryInterval)
continue continue
} }
waitDuration = longWaitDuration }
}
}()
c.wg.Add(1)
go func() {
for {
select {
case <-c.ctx.Done():
log.Info("Coordinator L2DB.PurgeByExternalDelete loop done")
c.wg.Done()
return
case <-time.After(c.cfg.PurgeByExtDelInterval):
c.mutexL2DBUpdateDelete.Lock()
if err := c.l2DB.PurgeByExternalDelete(); err != nil {
log.Errorw("L2DB.PurgeByExternalDelete", "err", err)
}
c.mutexL2DBUpdateDelete.Unlock()
} }
} }
}() }()

View File

@@ -105,8 +105,8 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, nil) historyDB := historydb.NewHistoryDB(db, db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB") txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")
require.NoError(t, err) require.NoError(t, err)
@@ -187,12 +187,12 @@ func newTestCoordinator(t *testing.T, forgerAddr ethCommon.Address, ethClient *t
&prover.MockClient{Delay: 400 * time.Millisecond}, &prover.MockClient{Delay: 400 * time.Millisecond},
} }
scConsts := &synchronizer.SCConsts{ scConsts := &common.SCConsts{
Rollup: *ethClientSetup.RollupConstants, Rollup: *ethClientSetup.RollupConstants,
Auction: *ethClientSetup.AuctionConstants, Auction: *ethClientSetup.AuctionConstants,
WDelayer: *ethClientSetup.WDelayerConstants, WDelayer: *ethClientSetup.WDelayerConstants,
} }
initSCVars := &synchronizer.SCVariables{ initSCVars := &common.SCVariables{
Rollup: *ethClientSetup.RollupVariables, Rollup: *ethClientSetup.RollupVariables,
Auction: *ethClientSetup.AuctionVariables, Auction: *ethClientSetup.AuctionVariables,
WDelayer: *ethClientSetup.WDelayerVariables, WDelayer: *ethClientSetup.WDelayerVariables,
@@ -261,8 +261,8 @@ func TestCoordinatorFlow(t *testing.T) {
var stats synchronizer.Stats var stats synchronizer.Stats
stats.Eth.LastBlock = *ethClient.CtlLastBlock() stats.Eth.LastBlock = *ethClient.CtlLastBlock()
stats.Sync.LastBlock = stats.Eth.LastBlock stats.Sync.LastBlock = stats.Eth.LastBlock
stats.Eth.LastBatch = ethClient.CtlLastForgedBatch() stats.Eth.LastBatchNum = ethClient.CtlLastForgedBatch()
stats.Sync.LastBatch = stats.Eth.LastBatch stats.Sync.LastBatch.BatchNum = common.BatchNum(stats.Eth.LastBatchNum)
canForge, err := ethClient.AuctionCanForge(forger, blockNum+1) canForge, err := ethClient.AuctionCanForge(forger, blockNum+1)
require.NoError(t, err) require.NoError(t, err)
var slot common.Slot var slot common.Slot
@@ -279,7 +279,7 @@ func TestCoordinatorFlow(t *testing.T) {
// Copy stateDB to synchronizer if there was a new batch // Copy stateDB to synchronizer if there was a new batch
source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch) source := fmt.Sprintf("%v/BatchNum%v", batchBuilderDBPath, stats.Sync.LastBatch)
dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch) dest := fmt.Sprintf("%v/BatchNum%v", syncDBPath, stats.Sync.LastBatch)
if stats.Sync.LastBatch != 0 { if stats.Sync.LastBatch.BatchNum != 0 {
if _, err := os.Stat(dest); os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
log.Infow("Making pebble checkpoint for sync", log.Infow("Making pebble checkpoint for sync",
"source", source, "dest", dest) "source", source, "dest", dest)
@@ -517,7 +517,7 @@ func TestCoordinatorStress(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
for { for {
blockData, _, err := syn.Sync2(ctx, nil) blockData, _, err := syn.Sync(ctx, nil)
if ctx.Err() != nil { if ctx.Err() != nil {
wg.Done() wg.Done()
return return
@@ -528,7 +528,7 @@ func TestCoordinatorStress(t *testing.T) {
coord.SendMsg(ctx, MsgSyncBlock{ coord.SendMsg(ctx, MsgSyncBlock{
Stats: *stats, Stats: *stats,
Batches: blockData.Rollup.Batches, Batches: blockData.Rollup.Batches,
Vars: synchronizer.SCVariablesPtr{ Vars: common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars, Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars, Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars, WDelayer: blockData.WDelayer.Vars,

View File

@@ -2,6 +2,7 @@ package coordinator
import ( import (
"context" "context"
"database/sql"
"fmt" "fmt"
"math/big" "math/big"
"sync" "sync"
@@ -21,31 +22,42 @@ import (
type statsVars struct { type statsVars struct {
Stats synchronizer.Stats Stats synchronizer.Stats
Vars synchronizer.SCVariablesPtr Vars common.SCVariablesPtr
}
type state struct {
batchNum common.BatchNum
lastScheduledL1BatchBlockNum int64
lastForgeL1TxsNum int64
lastSlotForged int64
} }
// Pipeline manages the forging of batches with parallel server proofs // Pipeline manages the forging of batches with parallel server proofs
type Pipeline struct { type Pipeline struct {
num int
cfg Config cfg Config
consts synchronizer.SCConsts consts common.SCConsts
// state // state
batchNum common.BatchNum state state
lastScheduledL1BatchBlockNum int64 started bool
lastForgeL1TxsNum int64 rw sync.RWMutex
started bool errAtBatchNum common.BatchNum
lastForgeTime time.Time
proversPool *ProversPool proversPool *ProversPool
provers []prover.Client provers []prover.Client
txManager *TxManager coord *Coordinator
historyDB *historydb.HistoryDB txManager *TxManager
l2DB *l2db.L2DB historyDB *historydb.HistoryDB
txSelector *txselector.TxSelector l2DB *l2db.L2DB
batchBuilder *batchbuilder.BatchBuilder txSelector *txselector.TxSelector
purger *Purger batchBuilder *batchbuilder.BatchBuilder
mutexL2DBUpdateDelete *sync.Mutex
purger *Purger
stats synchronizer.Stats stats synchronizer.Stats
vars synchronizer.SCVariables vars common.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
ctx context.Context ctx context.Context
@@ -53,17 +65,32 @@ type Pipeline struct {
cancel context.CancelFunc cancel context.CancelFunc
} }
func (p *Pipeline) setErrAtBatchNum(batchNum common.BatchNum) {
p.rw.Lock()
defer p.rw.Unlock()
p.errAtBatchNum = batchNum
}
func (p *Pipeline) getErrAtBatchNum() common.BatchNum {
p.rw.RLock()
defer p.rw.RUnlock()
return p.errAtBatchNum
}
// NewPipeline creates a new Pipeline // NewPipeline creates a new Pipeline
func NewPipeline(ctx context.Context, func NewPipeline(ctx context.Context,
cfg Config, cfg Config,
num int, // Pipeline sequential number
historyDB *historydb.HistoryDB, historyDB *historydb.HistoryDB,
l2DB *l2db.L2DB, l2DB *l2db.L2DB,
txSelector *txselector.TxSelector, txSelector *txselector.TxSelector,
batchBuilder *batchbuilder.BatchBuilder, batchBuilder *batchbuilder.BatchBuilder,
mutexL2DBUpdateDelete *sync.Mutex,
purger *Purger, purger *Purger,
coord *Coordinator,
txManager *TxManager, txManager *TxManager,
provers []prover.Client, provers []prover.Client,
scConsts *synchronizer.SCConsts, scConsts *common.SCConsts,
) (*Pipeline, error) { ) (*Pipeline, error) {
proversPool := NewProversPool(len(provers)) proversPool := NewProversPool(len(provers))
proversPoolSize := 0 proversPoolSize := 0
@@ -79,22 +106,25 @@ func NewPipeline(ctx context.Context,
return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool")) return nil, tracerr.Wrap(fmt.Errorf("no provers in the pool"))
} }
return &Pipeline{ return &Pipeline{
cfg: cfg, num: num,
historyDB: historyDB, cfg: cfg,
l2DB: l2DB, historyDB: historyDB,
txSelector: txSelector, l2DB: l2DB,
batchBuilder: batchBuilder, txSelector: txSelector,
provers: provers, batchBuilder: batchBuilder,
proversPool: proversPool, provers: provers,
purger: purger, proversPool: proversPool,
txManager: txManager, mutexL2DBUpdateDelete: mutexL2DBUpdateDelete,
consts: *scConsts, purger: purger,
statsVarsCh: make(chan statsVars, queueLen), coord: coord,
txManager: txManager,
consts: *scConsts,
statsVarsCh: make(chan statsVars, queueLen),
}, nil }, nil
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) { func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
select { select {
case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case p.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
@@ -103,54 +133,77 @@ func (p *Pipeline) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Sta
// reset pipeline state // reset pipeline state
func (p *Pipeline) reset(batchNum common.BatchNum, func (p *Pipeline) reset(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *common.SCVariables) error {
p.batchNum = batchNum p.state = state{
p.lastForgeL1TxsNum = stats.Sync.LastForgeL1TxsNum batchNum: batchNum,
lastForgeL1TxsNum: stats.Sync.LastForgeL1TxsNum,
lastScheduledL1BatchBlockNum: 0,
lastSlotForged: -1,
}
p.stats = *stats p.stats = *stats
p.vars = *vars p.vars = *vars
p.lastScheduledL1BatchBlockNum = 0
err := p.txSelector.Reset(p.batchNum) // Reset the StateDB in TxSelector and BatchBuilder from the
// synchronizer only if the checkpoint we reset from either:
// a. Doesn't exist in the TxSelector/BatchBuilder
// b. The batch has already been synced by the synchronizer and has a
// different MTRoot than the BatchBuilder
// Otherwise, reset from the local checkpoint.
// First attempt to reset from local checkpoint if such checkpoint exists
existsTxSelector, err := p.txSelector.LocalAccountsDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
err = p.batchBuilder.Reset(p.batchNum, true) fromSynchronizerTxSelector := !existsTxSelector
if err := p.txSelector.Reset(p.state.batchNum, fromSynchronizerTxSelector); err != nil {
return tracerr.Wrap(err)
}
existsBatchBuilder, err := p.batchBuilder.LocalStateDB().CheckpointExists(p.state.batchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
fromSynchronizerBatchBuilder := !existsBatchBuilder
if err := p.batchBuilder.Reset(p.state.batchNum, fromSynchronizerBatchBuilder); err != nil {
return tracerr.Wrap(err)
}
// After reset, check that if the batch exists in the historyDB, the
// stateRoot matches with the local one, if not, force a reset from
// synchronizer
batch, err := p.historyDB.GetBatch(p.state.batchNum)
if tracerr.Unwrap(err) == sql.ErrNoRows {
// nothing to do
} else if err != nil {
return tracerr.Wrap(err)
} else {
localStateRoot := p.batchBuilder.LocalStateDB().MT.Root().BigInt()
if batch.StateRoot.Cmp(localStateRoot) != 0 {
log.Debugw("localStateRoot (%v) != historyDB stateRoot (%v). "+
"Forcing reset from Synchronizer", localStateRoot, batch.StateRoot)
// StateRoot from synchronizer doesn't match StateRoot
// from batchBuilder, force a reset from synchronizer
if err := p.txSelector.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
if err := p.batchBuilder.Reset(p.state.batchNum, true); err != nil {
return tracerr.Wrap(err)
}
}
}
return nil return nil
} }
func (p *Pipeline) syncSCVars(vars synchronizer.SCVariablesPtr) { func (p *Pipeline) syncSCVars(vars common.SCVariablesPtr) {
if vars.Rollup != nil { updateSCVars(&p.vars, vars)
p.vars.Rollup = *vars.Rollup
}
if vars.Auction != nil {
p.vars.Auction = *vars.Auction
}
if vars.WDelayer != nil {
p.vars.WDelayer = *vars.WDelayer
}
} }
// handleForgeBatch calls p.forgeBatch to forge the batch and get the zkInputs, // handleForgeBatch waits for an available proof server, calls p.forgeBatch to
// and then waits for an available proof server and sends the zkInputs to it so // forge the batch and get the zkInputs, and then sends the zkInputs to the
// that the proof computation begins. // selected proof server so that the proof computation begins.
func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNum) (*BatchInfo, error) { func (p *Pipeline) handleForgeBatch(ctx context.Context,
batchInfo, err := p.forgeBatch(batchNum) batchNum common.BatchNum) (batchInfo *BatchInfo, err error) {
if ctx.Err() != nil { // 1. Wait for an available serverProof (blocking call)
return nil, ctx.Err()
} else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else {
log.Errorw("forgeBatch", "err", err)
}
return nil, err
}
// 6. Wait for an available server proof (blocking call)
serverProof, err := p.proversPool.Get(ctx) serverProof, err := p.proversPool.Get(ctx)
if ctx.Err() != nil { if ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
@@ -158,13 +211,43 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
log.Errorw("proversPool.Get", "err", err) log.Errorw("proversPool.Get", "err", err)
return nil, err return nil, err
} }
defer func() {
// If we encounter any error (notice that this function returns
// errors to notify that a batch is not forged not only because
// of unexpected errors but also due to benign causes), add the
// serverProof back to the pool
if err != nil {
p.proversPool.Add(ctx, serverProof)
}
}()
// 2. Forge the batch internally (make a selection of txs and prepare
// all the smart contract arguments)
p.mutexL2DBUpdateDelete.Lock()
batchInfo, err = p.forgeBatch(batchNum)
p.mutexL2DBUpdateDelete.Unlock()
if ctx.Err() != nil {
return nil, ctx.Err()
} else if err != nil {
if tracerr.Unwrap(err) == errLastL1BatchNotSynced {
log.Warnw("forgeBatch: scheduled L1Batch too early", "err", err,
"lastForgeL1TxsNum", p.state.lastForgeL1TxsNum,
"syncLastForgeL1TxsNum", p.stats.Sync.LastForgeL1TxsNum)
} else if tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
// no log
} else {
log.Errorw("forgeBatch", "err", err)
}
return nil, err
}
// 3. Send the ZKInputs to the proof server
batchInfo.ServerProof = serverProof batchInfo.ServerProof = serverProof
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil { if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
log.Errorw("sendServerProof", "err", err) log.Errorw("sendServerProof", "err", err)
batchInfo.ServerProof = nil
p.proversPool.Add(ctx, serverProof)
return nil, err return nil, err
} }
return batchInfo, nil return batchInfo, nil
@@ -172,7 +255,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context, batchNum common.BatchNu
// Start the forging pipeline // Start the forging pipeline
func (p *Pipeline) Start(batchNum common.BatchNum, func (p *Pipeline) Start(batchNum common.BatchNum,
stats *synchronizer.Stats, vars *synchronizer.SCVariables) error { stats *synchronizer.Stats, vars *common.SCVariables) error {
if p.started { if p.started {
log.Fatal("Pipeline already started") log.Fatal("Pipeline already started")
} }
@@ -188,7 +271,7 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Add(1) p.wg.Add(1)
go func() { go func() {
waitDuration := zeroDuration timer := time.NewTimer(zeroDuration)
for { for {
select { select {
case <-p.ctx.Done(): case <-p.ctx.Done():
@@ -198,20 +281,42 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
case statsVars := <-p.statsVarsCh: case statsVars := <-p.statsVarsCh:
p.stats = statsVars.Stats p.stats = statsVars.Stats
p.syncSCVars(statsVars.Vars) p.syncSCVars(statsVars.Vars)
case <-time.After(waitDuration): case <-timer.C:
batchNum = p.batchNum + 1 timer.Reset(p.cfg.ForgeRetryInterval)
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
batchNum = p.state.batchNum + 1
batchInfo, err := p.handleForgeBatch(p.ctx, batchNum) batchInfo, err := p.handleForgeBatch(p.ctx, batchNum)
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if tracerr.Unwrap(err) == errLastL1BatchNotSynced ||
tracerr.Unwrap(err) == errForgeNoTxsBeforeDelay ||
tracerr.Unwrap(err) == errForgeBeforeDelay {
continue
} else if err != nil { } else if err != nil {
waitDuration = p.cfg.SyncRetryInterval p.setErrAtBatchNum(batchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.handleForgBatch: %v", err),
FailedBatchNum: batchNum,
})
continue continue
} }
p.batchNum = batchNum p.lastForgeTime = time.Now()
p.state.batchNum = batchNum
select { select {
case batchChSentServerProof <- batchInfo: case batchChSentServerProof <- batchInfo:
case <-p.ctx.Done(): case <-p.ctx.Done():
} }
if !timer.Stop() {
<-timer.C
}
timer.Reset(zeroDuration)
} }
} }
}() }()
@@ -225,16 +330,27 @@ func (p *Pipeline) Start(batchNum common.BatchNum,
p.wg.Done() p.wg.Done()
return return
case batchInfo := <-batchChSentServerProof: case batchInfo := <-batchChSentServerProof:
// Once errAtBatchNum != 0, we stop forging
// batches because there's been an error and we
// wait for the pipeline to be stopped.
if p.getErrAtBatchNum() != 0 {
continue
}
err := p.waitServerProof(p.ctx, batchInfo) err := p.waitServerProof(p.ctx, batchInfo)
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
batchInfo.ServerProof = nil
if p.ctx.Err() != nil { if p.ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
log.Errorw("waitServerProof", "err", err) log.Errorw("waitServerProof", "err", err)
p.setErrAtBatchNum(batchInfo.BatchNum)
p.coord.SendMsg(p.ctx, MsgStopPipeline{
Reason: fmt.Sprintf(
"Pipeline.waitServerProof: %v", err),
FailedBatchNum: batchInfo.BatchNum,
})
continue continue
} }
// We are done with this serverProof, add it back to the pool
p.proversPool.Add(p.ctx, batchInfo.ServerProof)
p.txManager.AddBatch(p.ctx, batchInfo) p.txManager.AddBatch(p.ctx, batchInfo)
} }
} }
@@ -284,9 +400,10 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
// Structure to accumulate data and metadata of the batch
batchInfo = &BatchInfo{BatchNum: batchNum} // to accumulate metadata of the batch now := time.Now()
batchInfo.Debug.StartTimestamp = time.Now() batchInfo = &BatchInfo{PipelineNum: p.num, BatchNum: batchNum}
batchInfo.Debug.StartTimestamp = now
batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1 batchInfo.Debug.StartBlockNum = p.stats.Eth.LastBlock.Num + 1
selectionCfg := &txselector.SelectionConfig{ selectionCfg := &txselector.SelectionConfig{
@@ -300,22 +417,26 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
var auths [][]byte var auths [][]byte
var coordIdxs []common.Idx var coordIdxs []common.Idx
// Check if the slot is not yet fulfilled
slotCommitted := false
if p.stats.Sync.Auction.CurrentSlot.ForgerCommitment ||
p.stats.Sync.Auction.CurrentSlot.SlotNum == p.state.lastSlotForged {
slotCommitted = true
}
// If we haven't reached the ForgeDelay, skip forging the batch
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeDelay {
return nil, errForgeBeforeDelay
}
// 1. Decide if we forge L2Tx or L1+L2Tx // 1. Decide if we forge L2Tx or L1+L2Tx
if p.shouldL1L2Batch(batchInfo) { if p.shouldL1L2Batch(batchInfo) {
batchInfo.L1Batch = true batchInfo.L1Batch = true
defer func() { if p.state.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
// If there's no error, update the parameters related
// to the last L1Batch forged
if err == nil {
p.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.lastForgeL1TxsNum++
}
}()
if p.lastForgeL1TxsNum != p.stats.Sync.LastForgeL1TxsNum {
return nil, tracerr.Wrap(errLastL1BatchNotSynced) return nil, tracerr.Wrap(errLastL1BatchNotSynced)
} }
// 2a: L1+L2 txs // 2a: L1+L2 txs
l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.lastForgeL1TxsNum + 1) l1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -334,6 +455,43 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
l1UserTxsExtra = nil l1UserTxsExtra = nil
} }
// If there are no txs to forge, no l1UserTxs in the open queue to
// freeze, and we haven't reached the ForgeNoTxsDelay, skip forging the
// batch.
if slotCommitted && now.Sub(p.lastForgeTime) < p.cfg.ForgeNoTxsDelay {
noTxs := false
if len(l1UserTxsExtra) == 0 && len(l1CoordTxs) == 0 && len(poolL2Txs) == 0 {
if batchInfo.L1Batch {
// Query the L1UserTxs in the queue following
// the one we are trying to forge.
nextL1UserTxs, err := p.historyDB.GetUnforgedL1UserTxs(
p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, tracerr.Wrap(err)
}
// If there are future L1UserTxs, we forge a
// batch to advance the queues and forge the
// L1UserTxs in the future. Otherwise, skip.
if len(nextL1UserTxs) == 0 {
noTxs = true
}
} else {
noTxs = true
}
}
if noTxs {
if err := p.txSelector.Reset(batchInfo.BatchNum-1, false); err != nil {
return nil, tracerr.Wrap(err)
}
return nil, errForgeNoTxsBeforeDelay
}
}
if batchInfo.L1Batch {
p.state.lastScheduledL1BatchBlockNum = p.stats.Eth.LastBlock.Num + 1
p.state.lastForgeL1TxsNum++
}
// 3. Save metadata from TxSelector output for BatchNum // 3. Save metadata from TxSelector output for BatchNum
batchInfo.L1UserTxsExtra = l1UserTxsExtra batchInfo.L1UserTxsExtra = l1UserTxsExtra
batchInfo.L1CoordTxs = l1CoordTxs batchInfo.L1CoordTxs = l1CoordTxs
@@ -378,6 +536,8 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo, e
p.cfg.debugBatchStore(batchInfo) p.cfg.debugBatchStore(batchInfo)
log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum) log.Infow("Pipeline: batch forged internally", "batch", batchInfo.BatchNum)
p.state.lastSlotForged = p.stats.Sync.Auction.CurrentSlot.SlotNum
return batchInfo, nil return batchInfo, nil
} }
@@ -399,12 +559,12 @@ func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) er
func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool { func (p *Pipeline) shouldL1L2Batch(batchInfo *BatchInfo) bool {
// Take the lastL1BatchBlockNum as the biggest between the last // Take the lastL1BatchBlockNum as the biggest between the last
// scheduled one, and the synchronized one. // scheduled one, and the synchronized one.
lastL1BatchBlockNum := p.lastScheduledL1BatchBlockNum lastL1BatchBlockNum := p.state.lastScheduledL1BatchBlockNum
if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum { if p.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock lastL1BatchBlockNum = p.stats.Sync.LastL1BatchBlock
} }
// Set Debug information // Set Debug information
batchInfo.Debug.LastScheduledL1BatchBlockNum = p.lastScheduledL1BatchBlockNum batchInfo.Debug.LastScheduledL1BatchBlockNum = p.state.lastScheduledL1BatchBlockNum
batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock batchInfo.Debug.LastL1BatchBlock = p.stats.Sync.LastL1BatchBlock
batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum batchInfo.Debug.LastL1BatchBlockDelta = p.stats.Eth.LastBlock.Num + 1 - lastL1BatchBlockNum
batchInfo.Debug.L1BatchBlockScheduleDeadline = batchInfo.Debug.L1BatchBlockScheduleDeadline =

View File

@@ -25,6 +25,14 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestPipelineShouldL1L2Batch(t *testing.T) { func TestPipelineShouldL1L2Batch(t *testing.T) {
ethClientSetup := test.NewClientSetupExample() ethClientSetup := test.NewClientSetupExample()
ethClientSetup.ChainID = big.NewInt(int64(chainID)) ethClientSetup.ChainID = big.NewInt(int64(chainID))
@@ -77,7 +85,7 @@ func TestPipelineShouldL1L2Batch(t *testing.T) {
// //
// Scheduled L1Batch // Scheduled L1Batch
// //
pipeline.lastScheduledL1BatchBlockNum = startBlock pipeline.state.lastScheduledL1BatchBlockNum = startBlock
stats.Sync.LastL1BatchBlock = startBlock - 10 stats.Sync.LastL1BatchBlock = startBlock - 10
// We are are one block before the timeout range * 0.5 // We are are one block before the timeout range * 0.5
@@ -128,6 +136,11 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
blocks, err := tc.GenerateBlocksFromInstructions(set) blocks, err := tc.GenerateBlocksFromInstructions(set)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, blocks) require.NotNil(t, blocks)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("0")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("10941365282189107056349764238909072001483688090878331371699519307087372995595")
ethAddTokens(blocks, ethClient) ethAddTokens(blocks, ethClient)
err = ethClient.CtlAddBlocks(blocks) err = ethClient.CtlAddBlocks(blocks)
@@ -135,7 +148,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
ctx := context.Background() ctx := context.Background()
for { for {
syncBlock, discards, err := sync.Sync2(ctx, nil) syncBlock, discards, err := sync.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -172,7 +185,7 @@ func TestPipelineForgeBatchWithTxs(t *testing.T) {
// users with positive balances // users with positive balances
tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB) tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB)
syncStats := sync.Stats() syncStats := sync.Stats()
batchNum := common.BatchNum(syncStats.Sync.LastBatch) batchNum := syncStats.Sync.LastBatch.BatchNum
syncSCVars := sync.SCVars() syncSCVars := sync.SCVars()
pipeline, err := coord.newPipeline(ctx) pipeline, err := coord.newPipeline(ctx)
@@ -193,11 +206,7 @@ PoolTransfer(0) User2-User3: 300 (126)
require.NoError(t, err) require.NoError(t, err)
} }
err = pipeline.reset(batchNum, syncStats, &synchronizer.SCVariables{ err = pipeline.reset(batchNum, syncStats, syncSCVars)
Rollup: *syncSCVars.Rollup,
Auction: *syncSCVars.Auction,
WDelayer: *syncSCVars.WDelayer,
})
require.NoError(t, err) require.NoError(t, err)
// Sanity check // Sanity check
sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts() sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts()

View File

@@ -13,13 +13,23 @@ import (
// PurgerCfg is the purger configuration // PurgerCfg is the purger configuration
type PurgerCfg struct { type PurgerCfg struct {
// PurgeBatchDelay is the delay between batches to purge outdated transactions // PurgeBatchDelay is the delay between batches to purge outdated
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBatchDelay int64 PurgeBatchDelay int64
// InvalidateBatchDelay is the delay between batches to mark invalid transactions // InvalidateBatchDelay is the delay between batches to mark invalid
// transactions due to nonce lower than the account nonce.
InvalidateBatchDelay int64 InvalidateBatchDelay int64
// PurgeBlockDelay is the delay between blocks to purge outdated transactions // PurgeBlockDelay is the delay between blocks to purge outdated
// transactions. Oudated L2Txs are those that have been forged or
// marked as invalid for longer than the SafetyPeriod and pending L2Txs
// that have been in the pool for longer than TTL once there are
// MaxTxs.
PurgeBlockDelay int64 PurgeBlockDelay int64
// InvalidateBlockDelay is the delay between blocks to mark invalid transactions // InvalidateBlockDelay is the delay between blocks to mark invalid
// transactions due to nonce lower than the account nonce.
InvalidateBlockDelay int64 InvalidateBlockDelay int64
} }

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
return l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil) return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
} }
func newStateDB(t *testing.T) *statedb.LocalStateDB { func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -4,11 +4,13 @@ import (
"context" "context"
"fmt" "fmt"
"math/big" "math/big"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
@@ -29,23 +31,31 @@ type TxManager struct {
batchCh chan *BatchInfo batchCh chan *BatchInfo
chainID *big.Int chainID *big.Int
account accounts.Account account accounts.Account
consts synchronizer.SCConsts consts common.SCConsts
stats synchronizer.Stats stats synchronizer.Stats
vars synchronizer.SCVariables vars common.SCVariables
statsVarsCh chan statsVars statsVarsCh chan statsVars
queue []*BatchInfo discardPipelineCh chan int // int refers to the pipelineNum
minPipelineNum int
queue Queue
// lastSuccessBatch stores the last BatchNum that who's forge call was confirmed // lastSuccessBatch stores the last BatchNum that who's forge call was confirmed
lastSuccessBatch common.BatchNum lastSuccessBatch common.BatchNum
lastPendingBatch common.BatchNum // lastPendingBatch common.BatchNum
lastSuccessNonce uint64 // accNonce is the account nonce in the last mined block (due to mined txs)
lastPendingNonce uint64 accNonce uint64
// accNextNonce is the nonce that we should use to send the next tx.
// In some cases this will be a reused nonce of an already pending tx.
accNextNonce uint64
lastSentL1BatchBlockNum int64
} }
// NewTxManager creates a new TxManager // NewTxManager creates a new TxManager
func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB, func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterface, l2DB *l2db.L2DB,
coord *Coordinator, scConsts *synchronizer.SCConsts, initSCVars *synchronizer.SCVariables) (*TxManager, error) { coord *Coordinator, scConsts *common.SCConsts, initSCVars *common.SCVariables) (*TxManager, error) {
chainID, err := ethClient.EthChainID() chainID, err := ethClient.EthChainID()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -54,26 +64,19 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
lastSuccessNonce, err := ethClient.EthNonceAt(ctx, *address, nil) accNonce, err := ethClient.EthNonceAt(ctx, *address, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
lastPendingNonce, err := ethClient.EthPendingNonceAt(ctx, *address) log.Infow("TxManager started", "nonce", accNonce)
if err != nil {
return nil, err
}
if lastSuccessNonce != lastPendingNonce {
return nil, tracerr.Wrap(fmt.Errorf("lastSuccessNonce (%v) != lastPendingNonce (%v)",
lastSuccessNonce, lastPendingNonce))
}
log.Infow("TxManager started", "nonce", lastSuccessNonce)
return &TxManager{ return &TxManager{
cfg: *cfg, cfg: *cfg,
ethClient: ethClient, ethClient: ethClient,
l2DB: l2DB, l2DB: l2DB,
coord: coord, coord: coord,
batchCh: make(chan *BatchInfo, queueLen), batchCh: make(chan *BatchInfo, queueLen),
statsVarsCh: make(chan statsVars, queueLen), statsVarsCh: make(chan statsVars, queueLen),
discardPipelineCh: make(chan int, queueLen),
account: accounts.Account{ account: accounts.Account{
Address: *address, Address: *address,
}, },
@@ -82,8 +85,10 @@ func NewTxManager(ctx context.Context, cfg *Config, ethClient eth.ClientInterfac
vars: *initSCVars, vars: *initSCVars,
lastSuccessNonce: lastSuccessNonce, minPipelineNum: 0,
lastPendingNonce: lastPendingNonce, queue: NewQueue(),
accNonce: accNonce,
accNextNonce: accNonce,
}, nil }, nil
} }
@@ -97,35 +102,40 @@ func (t *TxManager) AddBatch(ctx context.Context, batchInfo *BatchInfo) {
} }
// SetSyncStatsVars is a thread safe method to sets the synchronizer Stats // SetSyncStatsVars is a thread safe method to sets the synchronizer Stats
func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *synchronizer.SCVariablesPtr) { func (t *TxManager) SetSyncStatsVars(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr) {
select { select {
case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}: case t.statsVarsCh <- statsVars{Stats: *stats, Vars: *vars}:
case <-ctx.Done(): case <-ctx.Done():
} }
} }
func (t *TxManager) syncSCVars(vars synchronizer.SCVariablesPtr) { // DiscardPipeline is a thread safe method to notify about a discarded pipeline
if vars.Rollup != nil { // due to a reorg
t.vars.Rollup = *vars.Rollup func (t *TxManager) DiscardPipeline(ctx context.Context, pipelineNum int) {
} select {
if vars.Auction != nil { case t.discardPipelineCh <- pipelineNum:
t.vars.Auction = *vars.Auction case <-ctx.Done():
}
if vars.WDelayer != nil {
t.vars.WDelayer = *vars.WDelayer
} }
} }
func (t *TxManager) syncSCVars(vars common.SCVariablesPtr) {
updateSCVars(&t.vars, vars)
}
// NewAuth generates a new auth object for an ethereum transaction // NewAuth generates a new auth object for an ethereum transaction
func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) { func (t *TxManager) NewAuth(ctx context.Context, batchInfo *BatchInfo) (*bind.TransactOpts, error) {
gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx) gasPrice, err := t.ethClient.EthSuggestGasPrice(ctx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
inc := new(big.Int).Set(gasPrice) if t.cfg.GasPriceIncPerc != 0 {
const gasPriceDiv = 100 inc := new(big.Int).Set(gasPrice)
inc.Div(inc, new(big.Int).SetUint64(gasPriceDiv)) inc.Mul(inc, new(big.Int).SetInt64(t.cfg.GasPriceIncPerc))
gasPrice.Add(gasPrice, inc) // nolint reason: to calculate percentages we use 100
inc.Div(inc, new(big.Int).SetUint64(100)) //nolint:gomnd
gasPrice.Add(gasPrice, inc)
}
// log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice) // log.Debugw("TxManager: transaction metadata", "gasPrice", gasPrice)
auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID) auth, err := bind.NewKeyStoreTransactorWithChainID(t.ethClient.EthKeyStore(), t.account, t.chainID)
@@ -133,42 +143,95 @@ func (t *TxManager) NewAuth(ctx context.Context) (*bind.TransactOpts, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
auth.Value = big.NewInt(0) // in wei auth.Value = big.NewInt(0) // in wei
// TODO: Calculate GasLimit based on the contents of the ForgeBatchArgs
auth.GasLimit = 1000000 gasLimit := t.cfg.ForgeBatchGasCost.Fixed +
uint64(len(batchInfo.L1UserTxsExtra))*t.cfg.ForgeBatchGasCost.L1UserTx +
uint64(len(batchInfo.L1CoordTxs))*t.cfg.ForgeBatchGasCost.L1CoordTx +
uint64(len(batchInfo.L2Txs))*t.cfg.ForgeBatchGasCost.L2Tx
auth.GasLimit = gasLimit
auth.GasPrice = gasPrice auth.GasPrice = gasPrice
auth.Nonce = nil auth.Nonce = nil
return auth, nil return auth, nil
} }
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo) error { func (t *TxManager) shouldSendRollupForgeBatch(batchInfo *BatchInfo) error {
// TODO: Check if we can forge in the next blockNum, abort if we can't nextBlock := t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.Status = StatusSent if !t.canForgeAt(nextBlock) {
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1 return tracerr.Wrap(fmt.Errorf("can't forge in the next block: %v", nextBlock))
batchInfo.Debug.SendTimestamp = time.Now() }
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub( if t.mustL1L2Batch(nextBlock) && !batchInfo.L1Batch {
batchInfo.Debug.StartTimestamp).Seconds() return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch in the next block: %v", nextBlock))
}
margin := t.cfg.SendBatchBlocksMarginCheck
if margin != 0 {
if !t.canForgeAt(nextBlock + margin) {
return tracerr.Wrap(fmt.Errorf("can't forge after %v blocks: %v",
margin, nextBlock))
}
if t.mustL1L2Batch(nextBlock+margin) && !batchInfo.L1Batch {
return tracerr.Wrap(fmt.Errorf("can't forge non-L1Batch after %v blocks: %v",
margin, nextBlock))
}
}
return nil
}
func addPerc(v *big.Int, p int64) *big.Int {
r := new(big.Int).Set(v)
r.Mul(r, big.NewInt(p))
// nolint reason: to calculate percentages we divide by 100
r.Div(r, big.NewInt(100)) //nolit:gomnd
return r.Add(v, r)
}
func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchInfo, resend bool) error {
var ethTx *types.Transaction var ethTx *types.Transaction
var err error var err error
auth, err := t.NewAuth(ctx) auth, err := t.NewAuth(ctx, batchInfo)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
auth.Nonce = big.NewInt(int64(t.lastPendingNonce)) auth.Nonce = big.NewInt(int64(t.accNextNonce))
t.lastPendingNonce++ if resend {
auth.Nonce = big.NewInt(int64(batchInfo.EthTx.Nonce()))
}
for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ { for attempt := 0; attempt < t.cfg.EthClientAttempts; attempt++ {
if auth.GasPrice.Cmp(t.cfg.MaxGasPrice) > 0 {
return tracerr.Wrap(fmt.Errorf("calculated gasPrice (%v) > maxGasPrice (%v)",
auth.GasPrice, t.cfg.MaxGasPrice))
}
// RollupForgeBatch() calls ethclient.SendTransaction()
ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth) ethTx, err = t.ethClient.RollupForgeBatch(batchInfo.ForgeBatchArgs, auth)
if err != nil { // We check the errors via strings because we match the
// if strings.Contains(err.Error(), common.AuctionErrMsgCannotForge) { // definition of the error from geth, with the string returned
// log.Errorw("TxManager ethClient.RollupForgeBatch", "err", err, // via RPC obtained by the client.
// "block", t.stats.Eth.LastBlock.Num+1) if err == nil {
// return tracerr.Wrap(err) break
// } } else if strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Add(auth.Nonce, big.NewInt(1))
attempt--
} else if strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch decrementing nonce",
"err", err, "nonce", auth.Nonce, "batchNum", batchInfo.BatchNum)
auth.Nonce.Sub(auth.Nonce, big.NewInt(1))
attempt--
} else if strings.Contains(err.Error(), core.ErrReplaceUnderpriced.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else if strings.Contains(err.Error(), core.ErrUnderpriced.Error()) {
log.Warnw("TxManager ethClient.RollupForgeBatch incrementing gasPrice",
"err", err, "gasPrice", auth.GasPrice, "batchNum", batchInfo.BatchNum)
auth.GasPrice = addPerc(auth.GasPrice, 10)
attempt--
} else {
log.Errorw("TxManager ethClient.RollupForgeBatch", log.Errorw("TxManager ethClient.RollupForgeBatch",
"attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1, "attempt", attempt, "err", err, "block", t.stats.Eth.LastBlock.Num+1,
"batchNum", batchInfo.BatchNum) "batchNum", batchInfo.BatchNum)
} else {
break
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -179,10 +242,29 @@ func (t *TxManager) sendRollupForgeBatch(ctx context.Context, batchInfo *BatchIn
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err)) return tracerr.Wrap(fmt.Errorf("reached max attempts for ethClient.RollupForgeBatch: %w", err))
} }
if !resend {
t.accNextNonce = auth.Nonce.Uint64() + 1
}
batchInfo.EthTx = ethTx batchInfo.EthTx = ethTx
log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash().Hex()) log.Infow("TxManager ethClient.RollupForgeBatch", "batch", batchInfo.BatchNum, "tx", ethTx.Hash())
now := time.Now()
batchInfo.SendTimestamp = now
if resend {
batchInfo.Debug.ResendNum++
}
batchInfo.Debug.Status = StatusSent
batchInfo.Debug.SendBlockNum = t.stats.Eth.LastBlock.Num + 1
batchInfo.Debug.SendTimestamp = batchInfo.SendTimestamp
batchInfo.Debug.StartToSendDelay = batchInfo.Debug.SendTimestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
t.lastPendingBatch = batchInfo.BatchNum
if !resend {
if batchInfo.L1Batch {
t.lastSentL1BatchBlockNum = t.stats.Eth.LastBlock.Num + 1
}
}
if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil { if err := t.l2DB.DoneForging(common.TxIDsFromL2Txs(batchInfo.L2Txs), batchInfo.BatchNum); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -225,13 +307,20 @@ func (t *TxManager) checkEthTransactionReceipt(ctx context.Context, batchInfo *B
func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) { func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*int64, error) {
receipt := batchInfo.Receipt receipt := batchInfo.Receipt
if receipt != nil { if receipt != nil {
if batchInfo.EthTx.Nonce()+1 > t.accNonce {
t.accNonce = batchInfo.EthTx.Nonce() + 1
}
if receipt.Status == types.ReceiptStatusFailed { if receipt.Status == types.ReceiptStatusFailed {
batchInfo.Debug.Status = StatusFailed batchInfo.Debug.Status = StatusFailed
t.cfg.debugBatchStore(batchInfo)
_, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber) _, err := t.ethClient.EthCall(ctx, batchInfo.EthTx, receipt.BlockNumber)
log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash.Hex(), log.Warnw("TxManager receipt status is failed", "tx", receipt.TxHash,
"batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(), "batch", batchInfo.BatchNum, "block", receipt.BlockNumber.Int64(),
"err", err) "err", err)
batchInfo.EthTxErr = err
if batchInfo.BatchNum <= t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum - 1
}
t.cfg.debugBatchStore(batchInfo)
return nil, tracerr.Wrap(fmt.Errorf( return nil, tracerr.Wrap(fmt.Errorf(
"ethereum transaction receipt status is failed: %w", err)) "ethereum transaction receipt status is failed: %w", err))
} else if receipt.Status == types.ReceiptStatusSuccessful { } else if receipt.Status == types.ReceiptStatusSuccessful {
@@ -239,6 +328,17 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64() batchInfo.Debug.MineBlockNum = receipt.BlockNumber.Int64()
batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum - batchInfo.Debug.StartToMineBlocksDelay = batchInfo.Debug.MineBlockNum -
batchInfo.Debug.StartBlockNum batchInfo.Debug.StartBlockNum
if batchInfo.Debug.StartToMineDelay == 0 {
if block, err := t.ethClient.EthBlockByNumber(ctx,
receipt.BlockNumber.Int64()); err != nil {
log.Warnw("TxManager: ethClient.EthBlockByNumber", "err", err)
} else {
batchInfo.Debug.SendToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.SendTimestamp).Seconds()
batchInfo.Debug.StartToMineDelay = block.Timestamp.Sub(
batchInfo.Debug.StartTimestamp).Seconds()
}
}
t.cfg.debugBatchStore(batchInfo) t.cfg.debugBatchStore(batchInfo)
if batchInfo.BatchNum > t.lastSuccessBatch { if batchInfo.BatchNum > t.lastSuccessBatch {
t.lastSuccessBatch = batchInfo.BatchNum t.lastSuccessBatch = batchInfo.BatchNum
@@ -250,11 +350,72 @@ func (t *TxManager) handleReceipt(ctx context.Context, batchInfo *BatchInfo) (*i
return nil, nil return nil, nil
} }
// TODO:
// - After sending a message: CancelPipeline, stop all consecutive pending Batches (transactions)
// Queue of BatchInfos
type Queue struct {
list []*BatchInfo
// nonceByBatchNum map[common.BatchNum]uint64
next int
}
// NewQueue returns a new queue
func NewQueue() Queue {
return Queue{
list: make([]*BatchInfo, 0),
// nonceByBatchNum: make(map[common.BatchNum]uint64),
next: 0,
}
}
// Len is the length of the queue
func (q *Queue) Len() int {
return len(q.list)
}
// At returns the BatchInfo at position (or nil if position is out of bounds)
func (q *Queue) At(position int) *BatchInfo {
if position >= len(q.list) {
return nil
}
return q.list[position]
}
// Next returns the next BatchInfo (or nil if queue is empty)
func (q *Queue) Next() (int, *BatchInfo) {
if len(q.list) == 0 {
return 0, nil
}
defer func() { q.next = (q.next + 1) % len(q.list) }()
return q.next, q.list[q.next]
}
// Remove removes the BatchInfo at position
func (q *Queue) Remove(position int) {
// batchInfo := q.list[position]
// delete(q.nonceByBatchNum, batchInfo.BatchNum)
q.list = append(q.list[:position], q.list[position+1:]...)
if len(q.list) == 0 {
q.next = 0
} else {
q.next = position % len(q.list)
}
}
// Push adds a new BatchInfo
func (q *Queue) Push(batchInfo *BatchInfo) {
q.list = append(q.list, batchInfo)
// q.nonceByBatchNum[batchInfo.BatchNum] = batchInfo.EthTx.Nonce()
}
// func (q *Queue) NonceByBatchNum(batchNum common.BatchNum) (uint64, bool) {
// nonce, ok := q.nonceByBatchNum[batchNum]
// return nonce, ok
// }
// Run the TxManager // Run the TxManager
func (t *TxManager) Run(ctx context.Context) { func (t *TxManager) Run(ctx context.Context) {
next := 0
waitDuration := longWaitDuration
var statsVars statsVars var statsVars statsVars
select { select {
case statsVars = <-t.statsVarsCh: case statsVars = <-t.statsVarsCh:
@@ -263,8 +424,9 @@ func (t *TxManager) Run(ctx context.Context) {
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
log.Infow("TxManager: received initial statsVars", log.Infow("TxManager: received initial statsVars",
"block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatch) "block", t.stats.Eth.LastBlock.Num, "batch", t.stats.Eth.LastBatchNum)
timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -273,8 +435,27 @@ func (t *TxManager) Run(ctx context.Context) {
case statsVars := <-t.statsVarsCh: case statsVars := <-t.statsVarsCh:
t.stats = statsVars.Stats t.stats = statsVars.Stats
t.syncSCVars(statsVars.Vars) t.syncSCVars(statsVars.Vars)
case pipelineNum := <-t.discardPipelineCh:
t.minPipelineNum = pipelineNum + 1
if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
continue
} else if err != nil {
log.Errorw("TxManager: removeBadBatchInfos", "err", err)
continue
}
case batchInfo := <-t.batchCh: case batchInfo := <-t.batchCh:
if err := t.sendRollupForgeBatch(ctx, batchInfo); ctx.Err() != nil { if batchInfo.PipelineNum < t.minPipelineNum {
log.Warnw("TxManager: batchInfo received pipelineNum < minPipelineNum",
"num", batchInfo.PipelineNum, "minNum", t.minPipelineNum)
}
if err := t.shouldSendRollupForgeBatch(batchInfo); err != nil {
log.Warnw("TxManager: shouldSend", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch shouldSend: %v", err)})
continue
}
if err := t.sendRollupForgeBatch(ctx, batchInfo, false); ctx.Err() != nil {
continue continue
} else if err != nil { } else if err != nil {
// If we reach here it's because our ethNode has // If we reach here it's because our ethNode has
@@ -282,19 +463,24 @@ func (t *TxManager) Run(ctx context.Context) {
// ethereum. This could be due to the ethNode // ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that // failure, or an invalid transaction (that
// can't be mined) // can't be mined)
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch send: %v", err)}) log.Warnw("TxManager: forgeBatch send failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch send: %v", err)})
continue continue
} }
t.queue = append(t.queue, batchInfo) t.queue.Push(batchInfo)
waitDuration = t.cfg.TxManagerCheckInterval if !timer.Stop() {
case <-time.After(waitDuration): <-timer.C
if len(t.queue) == 0 { }
waitDuration = longWaitDuration timer.Reset(t.cfg.TxManagerCheckInterval)
case <-timer.C:
queuePosition, batchInfo := t.queue.Next()
if batchInfo == nil {
timer.Reset(longWaitDuration)
continue continue
} }
current := next timer.Reset(t.cfg.TxManagerCheckInterval)
next = (current + 1) % len(t.queue)
batchInfo := t.queue[current]
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil { if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
@@ -304,7 +490,8 @@ func (t *TxManager) Run(ctx context.Context) {
// if it was not mined, mined and succesfull or // if it was not mined, mined and succesfull or
// mined and failed. This could be due to the // mined and failed. This could be due to the
// ethNode failure. // ethNode failure.
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch receipt: %v", err)}) t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch receipt: %v", err)})
} }
confirm, err := t.handleReceipt(ctx, batchInfo) confirm, err := t.handleReceipt(ctx, batchInfo)
@@ -312,32 +499,106 @@ func (t *TxManager) Run(ctx context.Context) {
continue continue
} else if err != nil { //nolint:staticcheck } else if err != nil { //nolint:staticcheck
// Transaction was rejected // Transaction was rejected
t.queue = append(t.queue[:current], t.queue[current+1:]...) if err := t.removeBadBatchInfos(ctx); ctx.Err() != nil {
if len(t.queue) == 0 { continue
next = 0 } else if err != nil {
} else { log.Errorw("TxManager: removeBadBatchInfos", "err", err)
next = current % len(t.queue) continue
} }
t.coord.SendMsg(ctx, MsgStopPipeline{Reason: fmt.Sprintf("forgeBatch reject: %v", err)}) t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch reject: %v", err)})
continue
} }
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks { now := time.Now()
log.Debugw("TxManager tx for RollupForgeBatch confirmed", if !t.cfg.EthNoReuseNonce && confirm == nil &&
"batch", batchInfo.BatchNum) now.Sub(batchInfo.SendTimestamp) > t.cfg.EthTxResendTimeout {
t.queue = append(t.queue[:current], t.queue[current+1:]...) log.Infow("TxManager: forgeBatch tx not been mined timeout, resending",
if len(t.queue) == 0 { "tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
next = 0 if err := t.sendRollupForgeBatch(ctx, batchInfo, true); ctx.Err() != nil {
} else { continue
next = current % len(t.queue) } else if err != nil {
// If we reach here it's because our ethNode has
// been unable to send the transaction to
// ethereum. This could be due to the ethNode
// failure, or an invalid transaction (that
// can't be mined)
log.Warnw("TxManager: forgeBatch resend failed", "err", err,
"batch", batchInfo.BatchNum)
t.coord.SendMsg(ctx, MsgStopPipeline{
Reason: fmt.Sprintf("forgeBatch resend: %v", err)})
continue
} }
} }
if confirm != nil && *confirm >= t.cfg.ConfirmBlocks {
log.Debugw("TxManager: forgeBatch tx confirmed",
"tx", batchInfo.EthTx.Hash(), "batch", batchInfo.BatchNum)
t.queue.Remove(queuePosition)
}
} }
} }
} }
// nolint reason: this function will be used in the future func (t *TxManager) removeBadBatchInfos(ctx context.Context) error {
//nolint:unused next := 0
func (t *TxManager) canForge(stats *synchronizer.Stats, blockNum int64) bool { for {
batchInfo := t.queue.At(next)
if batchInfo == nil {
break
}
if err := t.checkEthTransactionReceipt(ctx, batchInfo); ctx.Err() != nil {
return nil
} else if err != nil {
// Our ethNode is giving an error different
// than "not found" when getting the receipt
// for the transaction, so we can't figure out
// if it was not mined, mined and succesfull or
// mined and failed. This could be due to the
// ethNode failure.
next++
continue
}
confirm, err := t.handleReceipt(ctx, batchInfo)
if ctx.Err() != nil {
return nil
} else if err != nil {
// Transaction was rejected
if t.minPipelineNum <= batchInfo.PipelineNum {
t.minPipelineNum = batchInfo.PipelineNum + 1
}
t.queue.Remove(next)
continue
}
// If tx is pending but is from a cancelled pipeline, remove it
// from the queue
if confirm == nil {
if batchInfo.PipelineNum < t.minPipelineNum {
t.queue.Remove(next)
continue
}
}
next++
}
accNonce, err := t.ethClient.EthNonceAt(ctx, t.account.Address, nil)
if err != nil {
return err
}
if !t.cfg.EthNoReuseNonce {
t.accNextNonce = accNonce
}
return nil
}
func (t *TxManager) canForgeAt(blockNum int64) bool {
return canForge(&t.consts.Auction, &t.vars.Auction, return canForge(&t.consts.Auction, &t.vars.Auction,
&stats.Sync.Auction.CurrentSlot, &stats.Sync.Auction.NextSlot, &t.stats.Sync.Auction.CurrentSlot, &t.stats.Sync.Auction.NextSlot,
t.cfg.ForgerAddress, blockNum) t.cfg.ForgerAddress, blockNum)
} }
func (t *TxManager) mustL1L2Batch(blockNum int64) bool {
lastL1BatchBlockNum := t.lastSentL1BatchBlockNum
if t.stats.Sync.LastL1BatchBlock > lastL1BatchBlockNum {
lastL1BatchBlockNum = t.stats.Sync.LastL1BatchBlock
}
return blockNum-lastL1BatchBlockNum >= t.vars.Rollup.ForgeL1L2BatchTimeout-1
}

View File

@@ -0,0 +1,15 @@
package coordinator
import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddPerc(t *testing.T) {
assert.Equal(t, "110", addPerc(big.NewInt(100), 10).String())
assert.Equal(t, "101", addPerc(big.NewInt(100), 1).String())
assert.Equal(t, "12", addPerc(big.NewInt(10), 20).String())
assert.Equal(t, "1500", addPerc(big.NewInt(1000), 50).String())
}

View File

@@ -1,8 +1,11 @@
package historydb package historydb
import ( import (
"database/sql"
"errors" "errors"
"fmt" "fmt"
"math/big"
"time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
@@ -32,9 +35,18 @@ func (hdb *HistoryDB) GetBatchAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
// GetBatchInternalAPI return the batch with the given batchNum
func (hdb *HistoryDB) GetBatchInternalAPI(batchNum common.BatchNum) (*BatchAPI, error) {
return hdb.getBatchAPI(hdb.dbRead, batchNum)
}
func (hdb *HistoryDB) getBatchAPI(d meddler.DB, batchNum common.BatchNum) (*BatchAPI, error) {
batch := &BatchAPI{} batch := &BatchAPI{}
return batch, tracerr.Wrap(meddler.QueryRow( return batch, tracerr.Wrap(meddler.QueryRow(
hdb.db, batch, d, batch,
`SELECT batch.item_id, batch.batch_num, batch.eth_block_num, `SELECT batch.item_id, batch.batch_num, batch.eth_block_num,
batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root, batch.forger_addr, batch.fees_collected, batch.total_fees_usd, batch.state_root,
batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num, batch.num_accounts, batch.exit_root, batch.forge_l1_txs_num, batch.slot_num,
@@ -133,10 +145,10 @@ func (hdb *HistoryDB) GetBatchesAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr) query = hdb.dbRead.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
batchPtrs := []*BatchAPI{} batchPtrs := []*BatchAPI{}
if err := meddler.QueryAll(hdb.db, &batchPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &batchPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI) batches := db.SlicePtrsToSlice(batchPtrs).([]BatchAPI)
@@ -156,7 +168,7 @@ func (hdb *HistoryDB) GetBestBidAPI(slotNum *int64) (BidAPI, error) {
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.db, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url hdb.dbRead, bid, `SELECT bid.*, block.timestamp, coordinator.forger_addr, coordinator.url
FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num FROM bid INNER JOIN block ON bid.eth_block_num = block.eth_block_num
INNER JOIN ( INNER JOIN (
SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator SELECT bidder_addr, MAX(item_id) AS item_id FROM coordinator
@@ -180,6 +192,14 @@ func (hdb *HistoryDB) GetBestBidsAPI(
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
return hdb.getBestBidsAPI(hdb.dbRead, minSlotNum, maxSlotNum, bidderAddr, limit, order)
}
func (hdb *HistoryDB) getBestBidsAPI(
d meddler.DB,
minSlotNum, maxSlotNum *int64,
bidderAddr *ethCommon.Address,
limit *uint, order string,
) ([]BidAPI, uint64, error) {
var query string var query string
var args []interface{} var args []interface{}
// JOIN the best bid of each slot with the latest update of each coordinator // JOIN the best bid of each slot with the latest update of each coordinator
@@ -212,9 +232,9 @@ func (hdb *HistoryDB) GetBestBidsAPI(
if limit != nil { if limit != nil {
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
} }
query = hdb.db.Rebind(queryStr) query = hdb.dbRead.Rebind(queryStr)
bidPtrs := []*BidAPI{} bidPtrs := []*BidAPI{}
if err := meddler.QueryAll(hdb.db, &bidPtrs, query, args...); err != nil { if err := meddler.QueryAll(d, &bidPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// log.Debug(query) // log.Debug(query)
@@ -296,9 +316,9 @@ func (hdb *HistoryDB) GetBidsAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.db.Rebind(query) query = hdb.dbRead.Rebind(query)
bids := []*BidAPI{} bids := []*BidAPI{}
if err := meddler.QueryAll(hdb.db, &bids, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &bids, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(bids) == 0 { if len(bids) == 0 {
@@ -384,9 +404,9 @@ func (hdb *HistoryDB) GetTokensAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.db.Rebind(query) query = hdb.dbRead.Rebind(query)
tokens := []*TokenWithUSD{} tokens := []*TokenWithUSD{}
if err := meddler.QueryAll(hdb.db, &tokens, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &tokens, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(tokens) == 0 { if len(tokens) == 0 {
@@ -408,7 +428,7 @@ func (hdb *HistoryDB) GetTxAPI(txID common.TxID) (*TxAPI, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
tx := &TxAPI{} tx := &TxAPI{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.db, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position, hdb.dbRead, tx, `SELECT tx.item_id, tx.is_l1, tx.id, tx.type, tx.position,
hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj, hez_idx(tx.effective_from_idx, token.symbol) AS from_idx, tx.from_eth_addr, tx.from_bjj,
hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj, hez_idx(tx.to_idx, token.symbol) AS to_idx, tx.to_eth_addr, tx.to_bjj,
tx.amount, tx.amount_success, tx.token_id, tx.amount_usd, tx.amount, tx.amount_success, tx.token_id, tx.amount_usd,
@@ -541,10 +561,10 @@ func (hdb *HistoryDB) GetTxsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr) query = hdb.dbRead.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
txsPtrs := []*TxAPI{} txsPtrs := []*TxAPI{}
if err := meddler.QueryAll(hdb.db, &txsPtrs, query, args...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &txsPtrs, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI) txs := db.SlicePtrsToSlice(txsPtrs).([]TxAPI)
@@ -564,7 +584,7 @@ func (hdb *HistoryDB) GetExitAPI(batchNum *uint, idx *common.Idx) (*ExitAPI, err
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
exit := &ExitAPI{} exit := &ExitAPI{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.db, exit, `SELECT exit_tree.item_id, exit_tree.batch_num, hdb.dbRead, exit, `SELECT exit_tree.item_id, exit_tree.batch_num,
hez_idx(exit_tree.account_idx, token.symbol) AS account_idx, hez_idx(exit_tree.account_idx, token.symbol) AS account_idx,
account.bjj, account.eth_addr, account.bjj, account.eth_addr,
exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.merkle_proof, exit_tree.balance, exit_tree.instant_withdrawn,
@@ -685,10 +705,10 @@ func (hdb *HistoryDB) GetExitsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr) query = hdb.dbRead.Rebind(queryStr)
// log.Debug(query) // log.Debug(query)
exits := []*ExitAPI{} exits := []*ExitAPI{}
if err := meddler.QueryAll(hdb.db, &exits, query, args...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &exits, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(exits) == 0 { if len(exits) == 0 {
@@ -697,25 +717,6 @@ func (hdb *HistoryDB) GetExitsAPI(
return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil return db.SlicePtrsToSlice(exits).([]ExitAPI), exits[0].TotalItems - uint64(len(exits)), nil
} }
// GetBucketUpdatesAPI retrieves latest values for each bucket
func (hdb *HistoryDB) GetBucketUpdatesAPI() ([]BucketUpdateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
var bucketUpdates []*BucketUpdateAPI
err = meddler.QueryAll(
hdb.db, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info // GetCoordinatorsAPI returns a list of coordinators from the DB and pagination info
func (hdb *HistoryDB) GetCoordinatorsAPI( func (hdb *HistoryDB) GetCoordinatorsAPI(
bidderAddr, forgerAddr *ethCommon.Address, bidderAddr, forgerAddr *ethCommon.Address,
@@ -772,10 +773,10 @@ func (hdb *HistoryDB) GetCoordinatorsAPI(
queryStr += " DESC " queryStr += " DESC "
} }
queryStr += fmt.Sprintf("LIMIT %d;", *limit) queryStr += fmt.Sprintf("LIMIT %d;", *limit)
query = hdb.db.Rebind(queryStr) query = hdb.dbRead.Rebind(queryStr)
coordinators := []*CoordinatorAPI{} coordinators := []*CoordinatorAPI{}
if err := meddler.QueryAll(hdb.db, &coordinators, query, args...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &coordinators, query, args...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(coordinators) == 0 { if len(coordinators) == 0 {
@@ -795,34 +796,11 @@ func (hdb *HistoryDB) GetAuctionVarsAPI() (*common.AuctionVariables, error) {
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
auctionVars := &common.AuctionVariables{} auctionVars := &common.AuctionVariables{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.db, auctionVars, `SELECT * FROM auction_vars;`, hdb.dbRead, auctionVars, `SELECT * FROM auction_vars;`,
) )
return auctionVars, tracerr.Wrap(err) return auctionVars, tracerr.Wrap(err)
} }
// GetAuctionVarsUntilSetSlotNumAPI returns all the updates of the auction vars
// from the last entry in which DefaultSlotSetBidSlotNum <= slotNum
func (hdb *HistoryDB) GetAuctionVarsUntilSetSlotNumAPI(slotNum int64, maxItems int) ([]MinBidInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
auctionVars := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;
`
err = meddler.QueryAll(hdb.db, &auctionVars, query, slotNum, maxItems)
if err != nil {
return nil, tracerr.Wrap(err)
}
return db.SlicePtrsToSlice(auctionVars).([]MinBidInfo), nil
}
// GetAccountAPI returns an account by its index // GetAccountAPI returns an account by its index
func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) { func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
@@ -832,11 +810,19 @@ func (hdb *HistoryDB) GetAccountAPI(idx common.Idx) (*AccountAPI, error) {
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
account := &AccountAPI{} account := &AccountAPI{}
err = meddler.QueryRow(hdb.db, account, `SELECT account.item_id, hez_idx(account.idx, err = meddler.QueryRow(hdb.dbRead, account, `SELECT account.item_id, hez_idx(account.idx,
token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr, token.symbol) as idx, account.batch_num, account.bjj, account.eth_addr,
token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd,
FROM account INNER JOIN token ON account.token_id = token.token_id WHERE idx = $1;`, idx) token.usd_update, account_update.nonce, account_update.balance
FROM account inner JOIN (
SELECT idx, nonce, balance
FROM account_update
WHERE idx = $1
ORDER BY item_id DESC LIMIT 1
) AS account_update ON account_update.idx = account.idx
INNER JOIN token ON account.token_id = token.token_id
WHERE account.idx = $1;`, idx)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -864,8 +850,13 @@ func (hdb *HistoryDB) GetAccountsAPI(
queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num, queryStr := `SELECT account.item_id, hez_idx(account.idx, token.symbol) as idx, account.batch_num,
account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block, account.bjj, account.eth_addr, token.token_id, token.item_id AS token_item_id, token.eth_block_num AS token_block,
token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update, token.eth_addr as token_eth_addr, token.name, token.symbol, token.decimals, token.usd, token.usd_update,
COUNT(*) OVER() AS total_items account_update.nonce, account_update.balance, COUNT(*) OVER() AS total_items
FROM account INNER JOIN token ON account.token_id = token.token_id ` FROM account inner JOIN (
SELECT DISTINCT idx,
first_value(nonce) over(partition by idx ORDER BY item_id DESC) as nonce,
first_value(balance) over(partition by idx ORDER BY item_id DESC) as balance
FROM account_update
) AS account_update ON account_update.idx = account.idx INNER JOIN token ON account.token_id = token.token_id `
// Apply filters // Apply filters
nextIsAnd := false nextIsAnd := false
// ethAddr filter // ethAddr filter
@@ -914,10 +905,10 @@ func (hdb *HistoryDB) GetAccountsAPI(
if err != nil { if err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
query = hdb.db.Rebind(query) query = hdb.dbRead.Rebind(query)
accounts := []*AccountAPI{} accounts := []*AccountAPI{}
if err := meddler.QueryAll(hdb.db, &accounts, query, argsQ...); err != nil { if err := meddler.QueryAll(hdb.dbRead, &accounts, query, argsQ...); err != nil {
return nil, 0, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if len(accounts) == 0 { if len(accounts) == 0 {
@@ -928,99 +919,267 @@ func (hdb *HistoryDB) GetAccountsAPI(
accounts[0].TotalItems - uint64(len(accounts)), nil accounts[0].TotalItems - uint64(len(accounts)), nil
} }
// GetMetricsAPI returns metrics // GetCommonAccountAPI returns the account associated to an account idx
func (hdb *HistoryDB) GetMetricsAPI(lastBatchNum common.BatchNum) (*Metrics, error) { func (hdb *HistoryDB) GetCommonAccountAPI(idx common.Idx) (*common.Account, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
defer cancel() defer cancel()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{} account := &common.Account{}
metrics := &Metrics{}
err = meddler.QueryRow( err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs, hdb.dbRead, account, `SELECT * FROM account WHERE idx = $1;`, idx,
COALESCE (MIN(tx.batch_num), 0) as batch_num, COALESCE (MIN(block.timestamp), )
NOW()) AS min_timestamp, COALESCE (MAX(block.timestamp), NOW()) AS max_timestamp return account, tracerr.Wrap(err)
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num }
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`)
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release()
return hdb.getCoordinatorAPI(hdb.dbRead, bidderAddr)
}
seconds := metricsTotals.MaxTimestamp.Sub(metricsTotals.MinTimestamp).Seconds() func (hdb *HistoryDB) getCoordinatorAPI(d meddler.DB, bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
// Avoid dividing by 0 coordinator := &CoordinatorAPI{}
if seconds == 0 { err := meddler.QueryRow(
d, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// GetNodeInfoAPI retusnt he NodeInfo
func (hdb *HistoryDB) GetNodeInfoAPI() (*NodeInfo, error) {
cancel, err := hdb.apiConnCon.Acquire()
defer cancel()
if err != nil {
return nil, tracerr.Wrap(err)
}
defer hdb.apiConnCon.Release()
return hdb.GetNodeInfo()
}
// GetBucketUpdatesInternalAPI returns the latest bucket updates
func (hdb *HistoryDB) GetBucketUpdatesInternalAPI() ([]BucketUpdateAPI, error) {
var bucketUpdates []*BucketUpdateAPI
err := meddler.QueryAll(
hdb.dbRead, &bucketUpdates,
`SELECT num_bucket, withdrawals FROM bucket_update
WHERE item_id in(SELECT max(item_id) FROM bucket_update
group by num_bucket)
ORDER BY num_bucket ASC;`,
)
return db.SlicePtrsToSlice(bucketUpdates).([]BucketUpdateAPI), tracerr.Wrap(err)
}
// GetNextForgersInternalAPI returns next forgers
func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVariables,
auctionConsts *common.AuctionConstants,
lastBlock common.Block, currentSlot, lastClosedSlot int64) ([]NextForgerAPI, error) {
secondsPerBlock := int64(15) //nolint:gomnd
// currentSlot and lastClosedSlot included
limit := uint(lastClosedSlot - currentSlot + 1)
bids, _, err := hdb.getBestBidsAPI(hdb.dbRead, &currentSlot, &lastClosedSlot, nil, &limit, "ASC")
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(err)
}
nextForgers := []NextForgerAPI{}
// Get min bid info
var minBidInfo []MinBidInfo
if currentSlot >= auctionVars.DefaultSlotSetBidSlotNum {
// All min bids can be calculated with the last update of AuctionVariables
minBidInfo = []MinBidInfo{{
DefaultSlotSetBid: auctionVars.DefaultSlotSetBid,
DefaultSlotSetBidSlotNum: auctionVars.DefaultSlotSetBidSlotNum,
}}
} else {
// Get all the relevant updates from the DB
minBidInfo, err = hdb.getMinBidInfo(hdb.dbRead, currentSlot, lastClosedSlot)
if err != nil {
return nil, tracerr.Wrap(err)
}
}
// Create nextForger for each slot
for i := currentSlot; i <= lastClosedSlot; i++ {
fromBlock := i*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum
toBlock := (i+1)*int64(auctionConsts.BlocksPerSlot) +
auctionConsts.GenesisBlockNum - 1
nextForger := NextForgerAPI{
Period: Period{
SlotNum: i,
FromBlock: fromBlock,
ToBlock: toBlock,
FromTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(fromBlock-lastBlock.Num))),
ToTimestamp: lastBlock.Timestamp.Add(time.Second *
time.Duration(secondsPerBlock*(toBlock-lastBlock.Num))),
},
}
foundForger := false
// If there is a bid for a slot, get forger (coordinator)
for j := range bids {
slotNum := bids[j].SlotNum
if slotNum == i {
// There's a bid for the slot
// Check if the bid is greater than the minimum required
for i := 0; i < len(minBidInfo); i++ {
// Find the most recent update
if slotNum >= minBidInfo[i].DefaultSlotSetBidSlotNum {
// Get min bid
minBidSelector := slotNum % int64(len(auctionVars.DefaultSlotSetBid))
minBid := minBidInfo[i].DefaultSlotSetBid[minBidSelector]
// Check if the bid has beaten the minimum
bid, ok := new(big.Int).SetString(string(bids[j].BidValue), 10)
if !ok {
return nil, tracerr.New("Wrong bid value, error parsing it as big.Int")
}
if minBid.Cmp(bid) == 1 {
// Min bid is greater than bid, the slot will be forged by boot coordinator
break
}
foundForger = true
break
}
}
if !foundForger { // There is no bid or it's smaller than the minimum
break
}
coordinator, err := hdb.getCoordinatorAPI(hdb.dbRead, bids[j].Bidder)
if err != nil {
return nil, tracerr.Wrap(err)
}
nextForger.Coordinator = *coordinator
break
}
}
// If there is no bid, the coordinator that will forge is boot coordinator
if !foundForger {
nextForger.Coordinator = CoordinatorAPI{
Forger: auctionVars.BootCoordinator,
URL: auctionVars.BootCoordinatorURL,
}
}
nextForgers = append(nextForgers, nextForger)
}
return nextForgers, nil
}
// GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) {
var metrics MetricsAPI
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"`
ToBatchNum common.BatchNum `meddler:"-"`
ToTimestamp time.Time `meddler:"to_timestamp"`
}
p := &period{
ToBatchNum: lastBatchNum,
}
if err := meddler.QueryRow(
hdb.dbRead, p, `SELECT
COALESCE (MIN(batch.batch_num), 0) as from_batch_num,
COALESCE (MIN(block.timestamp), NOW()) AS from_timestamp,
COALESCE (MAX(block.timestamp), NOW()) AS to_timestamp
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of txs of that period
row := hdb.dbRead.QueryRow(
`SELECT COUNT(*) as total_txs FROM tx WHERE tx.batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var nTxs int
if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err)
}
// Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
if seconds == 0 { // Avoid dividing by 0
seconds++ seconds++
} }
metrics.TransactionsPerSecond = float64(nTxs) / seconds
metrics.TransactionsPerSecond = float64(metricsTotals.TotalTransactions) / seconds // Set txs/batch
nBatches := p.ToBatchNum - p.FromBatchNum + 1
if (lastBatchNum - metricsTotals.FirstBatchNum) > 0 { if nBatches == 0 { // Avoid dividing by 0
metrics.TransactionsPerBatch = float64(metricsTotals.TotalTransactions) / nBatches++
float64(lastBatchNum-metricsTotals.FirstBatchNum+1)
} else {
metrics.TransactionsPerBatch = float64(0)
} }
if (p.ToBatchNum - p.FromBatchNum) > 0 {
err = meddler.QueryRow( fmt.Printf("DBG ntxs: %v, nBatches: %v\n", nTxs, nBatches)
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches, metrics.TransactionsPerBatch = float64(nTxs) /
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch float64(nBatches)
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum) } else {
if err != nil { metrics.TransactionsPerBatch = 0
}
// Get total fee of that period
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (SUM(total_fees_usd), 0) FROM batch WHERE batch_num between $1 AND $2;`,
p.FromBatchNum, p.ToBatchNum,
)
var totalFee float64
if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if metricsTotals.TotalBatches > 0 { // Set batch frequency
metrics.BatchFrequency = seconds / float64(metricsTotals.TotalBatches) metrics.BatchFrequency = seconds / float64(nBatches)
} else { if nTxs > 0 {
metrics.BatchFrequency = 0 metrics.AvgTransactionFee = totalFee / float64(nTxs)
}
if metricsTotals.TotalTransactions > 0 {
metrics.AvgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else { } else {
metrics.AvgTransactionFee = 0 metrics.AvgTransactionFee = 0
} }
err = meddler.QueryRow( // Get and set amount of registered accounts
hdb.db, metrics, type registeredAccounts struct {
`SELECT COUNT(*) AS total_bjjs, COUNT(DISTINCT(bjj)) AS total_accounts FROM account;`) TotalIdx int64 `meddler:"total_idx"`
if err != nil { TotalBJJ int64 `meddler:"total_bjj"`
}
ra := &registeredAccounts{}
if err := meddler.QueryRow(
hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`,
); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
metrics.TotalAccounts = ra.TotalIdx
return metrics, nil metrics.TotalBJJs = ra.TotalBJJ
// Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
INNER JOIN block AS added ON tx.eth_block_num = added.eth_block_num
INNER JOIN batch AS forged_batch ON tx.batch_num = forged_batch.batch_num
INNER JOIN block AS forged ON forged_batch.eth_block_num = forged.eth_block_num
WHERE tx.batch_num between $1 and $2 AND tx.is_l1 AND tx.user_origin;`,
p.FromBatchNum, p.ToBatchNum,
)
var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err)
}
metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil
} }
// GetAvgTxFeeAPI returns average transaction fee of the last 1h // GetStateAPI returns the StateAPI
func (hdb *HistoryDB) GetAvgTxFeeAPI() (float64, error) { func (hdb *HistoryDB) GetStateAPI() (*StateAPI, error) {
cancel, err := hdb.apiConnCon.Acquire() cancel, err := hdb.apiConnCon.Acquire()
defer cancel() defer cancel()
if err != nil { if err != nil {
return 0, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
defer hdb.apiConnCon.Release() defer hdb.apiConnCon.Release()
metricsTotals := &MetricsTotals{} return hdb.getStateAPI(hdb.dbRead)
err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`)
if err != nil {
return 0, tracerr.Wrap(err)
}
err = meddler.QueryRow(
hdb.db, metricsTotals, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, metricsTotals.FirstBatchNum)
if err != nil {
return 0, tracerr.Wrap(err)
}
var avgTransactionFee float64
if metricsTotals.TotalTransactions > 0 {
avgTransactionFee = metricsTotals.TotalFeesUSD / float64(metricsTotals.TotalTransactions)
} else {
avgTransactionFee = 0
}
return avgTransactionFee, nil
} }

View File

@@ -27,30 +27,35 @@ const (
// HistoryDB persist the historic of the rollup // HistoryDB persist the historic of the rollup
type HistoryDB struct { type HistoryDB struct {
db *sqlx.DB dbRead *sqlx.DB
dbWrite *sqlx.DB
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
// NewHistoryDB initialize the DB // NewHistoryDB initialize the DB
func NewHistoryDB(db *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB { func NewHistoryDB(dbRead, dbWrite *sqlx.DB, apiConnCon *db.APIConnectionController) *HistoryDB {
return &HistoryDB{db: db, apiConnCon: apiConnCon} return &HistoryDB{
dbRead: dbRead,
dbWrite: dbWrite,
apiConnCon: apiConnCon,
}
} }
// DB returns a pointer to the L2DB.db. This method should be used only for // DB returns a pointer to the L2DB.db. This method should be used only for
// internal testing purposes. // internal testing purposes.
func (hdb *HistoryDB) DB() *sqlx.DB { func (hdb *HistoryDB) DB() *sqlx.DB {
return hdb.db return hdb.dbWrite
} }
// AddBlock insert a block into the DB // AddBlock insert a block into the DB
func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.db, block) } func (hdb *HistoryDB) AddBlock(block *common.Block) error { return hdb.addBlock(hdb.dbWrite, block) }
func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error { func (hdb *HistoryDB) addBlock(d meddler.DB, block *common.Block) error {
return tracerr.Wrap(meddler.Insert(d, "block", block)) return tracerr.Wrap(meddler.Insert(d, "block", block))
} }
// AddBlocks inserts blocks into the DB // AddBlocks inserts blocks into the DB
func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error { func (hdb *HistoryDB) AddBlocks(blocks []common.Block) error {
return tracerr.Wrap(hdb.addBlocks(hdb.db, blocks)) return tracerr.Wrap(hdb.addBlocks(hdb.dbWrite, blocks))
} }
func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error { func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
@@ -61,7 +66,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
timestamp, timestamp,
hash hash
) VALUES %s;`, ) VALUES %s;`,
blocks[:], blocks,
)) ))
} }
@@ -69,7 +74,7 @@ func (hdb *HistoryDB) addBlocks(d meddler.DB, blocks []common.Block) error {
func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) { func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
block := &common.Block{} block := &common.Block{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.db, block, hdb.dbRead, block,
"SELECT * FROM block WHERE eth_block_num = $1;", blockNum, "SELECT * FROM block WHERE eth_block_num = $1;", blockNum,
) )
return block, tracerr.Wrap(err) return block, tracerr.Wrap(err)
@@ -79,7 +84,7 @@ func (hdb *HistoryDB) GetBlock(blockNum int64) (*common.Block, error) {
func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) { func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &blocks, hdb.dbRead, &blocks,
"SELECT * FROM block ORDER BY eth_block_num;", "SELECT * FROM block ORDER BY eth_block_num;",
) )
return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err) return db.SlicePtrsToSlice(blocks).([]common.Block), tracerr.Wrap(err)
@@ -89,7 +94,7 @@ func (hdb *HistoryDB) GetAllBlocks() ([]common.Block, error) {
func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) { func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
var blocks []*common.Block var blocks []*common.Block
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &blocks, hdb.dbRead, &blocks,
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;", "SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2 ORDER BY eth_block_num;",
from, to, from, to,
) )
@@ -100,13 +105,13 @@ func (hdb *HistoryDB) getBlocks(from, to int64) ([]common.Block, error) {
func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) { func (hdb *HistoryDB) GetLastBlock() (*common.Block, error) {
block := &common.Block{} block := &common.Block{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.db, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;", hdb.dbRead, block, "SELECT * FROM block ORDER BY eth_block_num DESC LIMIT 1;",
) )
return block, tracerr.Wrap(err) return block, tracerr.Wrap(err)
} }
// AddBatch insert a Batch into the DB // AddBatch insert a Batch into the DB
func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.db, batch) } func (hdb *HistoryDB) AddBatch(batch *common.Batch) error { return hdb.addBatch(hdb.dbWrite, batch) }
func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error { func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
// Calculate total collected fees in USD // Calculate total collected fees in USD
// Get IDs of collected tokens for fees // Get IDs of collected tokens for fees
@@ -129,9 +134,9 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = hdb.db.Rebind(query) query = hdb.dbWrite.Rebind(query)
if err := meddler.QueryAll( if err := meddler.QueryAll(
hdb.db, &tokenPrices, query, args..., hdb.dbWrite, &tokenPrices, query, args...,
); err != nil { ); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -153,7 +158,7 @@ func (hdb *HistoryDB) addBatch(d meddler.DB, batch *common.Batch) error {
// AddBatches insert Bids into the DB // AddBatches insert Bids into the DB
func (hdb *HistoryDB) AddBatches(batches []common.Batch) error { func (hdb *HistoryDB) AddBatches(batches []common.Batch) error {
return tracerr.Wrap(hdb.addBatches(hdb.db, batches)) return tracerr.Wrap(hdb.addBatches(hdb.dbWrite, batches))
} }
func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error { func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
for i := 0; i < len(batches); i++ { for i := 0; i < len(batches); i++ {
@@ -164,11 +169,24 @@ func (hdb *HistoryDB) addBatches(d meddler.DB, batches []common.Batch) error {
return nil return nil
} }
// GetBatch returns the batch with the given batchNum
func (hdb *HistoryDB) GetBatch(batchNum common.BatchNum) (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch WHERE batch_num = $1;`,
batchNum,
)
return &batch, err
}
// GetAllBatches retrieve all batches from the DB // GetAllBatches retrieve all batches from the DB
func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) { func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &batches, hdb.dbRead, &batches,
`SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr, batch.fees_collected,
batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root, batch.fee_idxs_coordinator, batch.state_root, batch.num_accounts, batch.last_idx, batch.exit_root,
batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch batch.forge_l1_txs_num, batch.slot_num, batch.total_fees_usd FROM batch
@@ -181,7 +199,7 @@ func (hdb *HistoryDB) GetAllBatches() ([]common.Batch, error) {
func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) { func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, error) {
var batches []*common.Batch var batches []*common.Batch
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &batches, hdb.dbRead, &batches,
`SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator, `SELECT batch_num, eth_block_num, forger_addr, fees_collected, fee_idxs_coordinator,
state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd state_root, num_accounts, last_idx, exit_root, forge_l1_txs_num, slot_num, total_fees_usd
FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`, FROM batch WHERE $1 <= batch_num AND batch_num < $2 ORDER BY batch_num;`,
@@ -193,7 +211,7 @@ func (hdb *HistoryDB) GetBatches(from, to common.BatchNum) ([]common.Batch, erro
// GetFirstBatchBlockNumBySlot returns the ethereum block number of the first // GetFirstBatchBlockNumBySlot returns the ethereum block number of the first
// batch within a slot // batch within a slot
func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) { func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error) {
row := hdb.db.QueryRow( row := hdb.dbRead.QueryRow(
`SELECT eth_block_num FROM batch `SELECT eth_block_num FROM batch
WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum, WHERE slot_num = $1 ORDER BY batch_num ASC LIMIT 1;`, slotNum,
) )
@@ -203,14 +221,26 @@ func (hdb *HistoryDB) GetFirstBatchBlockNumBySlot(slotNum int64) (int64, error)
// GetLastBatchNum returns the BatchNum of the latest forged batch // GetLastBatchNum returns the BatchNum of the latest forged batch
func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) { func (hdb *HistoryDB) GetLastBatchNum() (common.BatchNum, error) {
row := hdb.db.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;") row := hdb.dbRead.QueryRow("SELECT batch_num FROM batch ORDER BY batch_num DESC LIMIT 1;")
var batchNum common.BatchNum var batchNum common.BatchNum
return batchNum, tracerr.Wrap(row.Scan(&batchNum)) return batchNum, tracerr.Wrap(row.Scan(&batchNum))
} }
// GetLastBatch returns the last forged batch
func (hdb *HistoryDB) GetLastBatch() (*common.Batch, error) {
var batch common.Batch
err := meddler.QueryRow(
hdb.dbRead, &batch, `SELECT batch.batch_num, batch.eth_block_num, batch.forger_addr,
batch.fees_collected, batch.fee_idxs_coordinator, batch.state_root,
batch.num_accounts, batch.last_idx, batch.exit_root, batch.forge_l1_txs_num,
batch.slot_num, batch.total_fees_usd FROM batch ORDER BY batch_num DESC LIMIT 1;`,
)
return &batch, err
}
// GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch // GetLastL1BatchBlockNum returns the blockNum of the latest forged l1Batch
func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) { func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
row := hdb.db.QueryRow(`SELECT eth_block_num FROM batch row := hdb.dbRead.QueryRow(`SELECT eth_block_num FROM batch
WHERE forge_l1_txs_num IS NOT NULL WHERE forge_l1_txs_num IS NOT NULL
ORDER BY batch_num DESC LIMIT 1;`) ORDER BY batch_num DESC LIMIT 1;`)
var blockNum int64 var blockNum int64
@@ -220,7 +250,7 @@ func (hdb *HistoryDB) GetLastL1BatchBlockNum() (int64, error) {
// GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged // GetLastL1TxsNum returns the greatest ForgeL1TxsNum in the DB from forged
// batches. If there's no batch in the DB (nil, nil) is returned. // batches. If there's no batch in the DB (nil, nil) is returned.
func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) { func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
row := hdb.db.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;") row := hdb.dbRead.QueryRow("SELECT MAX(forge_l1_txs_num) FROM batch;")
lastL1TxsNum := new(int64) lastL1TxsNum := new(int64)
return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum)) return lastL1TxsNum, tracerr.Wrap(row.Scan(&lastL1TxsNum))
} }
@@ -231,15 +261,15 @@ func (hdb *HistoryDB) GetLastL1TxsNum() (*int64, error) {
func (hdb *HistoryDB) Reorg(lastValidBlock int64) error { func (hdb *HistoryDB) Reorg(lastValidBlock int64) error {
var err error var err error
if lastValidBlock < 0 { if lastValidBlock < 0 {
_, err = hdb.db.Exec("DELETE FROM block;") _, err = hdb.dbWrite.Exec("DELETE FROM block;")
} else { } else {
_, err = hdb.db.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock) _, err = hdb.dbWrite.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock)
} }
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// AddBids insert Bids into the DB // AddBids insert Bids into the DB
func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.db, bids) } func (hdb *HistoryDB) AddBids(bids []common.Bid) error { return hdb.addBids(hdb.dbWrite, bids) }
func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error { func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
if len(bids) == 0 { if len(bids) == 0 {
return nil return nil
@@ -248,7 +278,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;", "INSERT INTO bid (slot_num, bid_value, eth_block_num, bidder_addr) VALUES %s;",
bids[:], bids,
)) ))
} }
@@ -256,7 +286,7 @@ func (hdb *HistoryDB) addBids(d meddler.DB, bids []common.Bid) error {
func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) { func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
var bids []*common.Bid var bids []*common.Bid
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &bids, hdb.dbRead, &bids,
`SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid `SELECT bid.slot_num, bid.bid_value, bid.eth_block_num, bid.bidder_addr FROM bid
ORDER BY item_id;`, ORDER BY item_id;`,
) )
@@ -267,7 +297,7 @@ func (hdb *HistoryDB) GetAllBids() ([]common.Bid, error) {
func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) { func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinator, error) {
bidCoord := &common.BidCoordinator{} bidCoord := &common.BidCoordinator{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.db, bidCoord, hdb.dbRead, bidCoord,
`SELECT ( `SELECT (
SELECT default_slot_set_bid SELECT default_slot_set_bid
FROM auction_vars FROM auction_vars
@@ -290,7 +320,7 @@ func (hdb *HistoryDB) GetBestBidCoordinator(slotNum int64) (*common.BidCoordinat
// AddCoordinators insert Coordinators into the DB // AddCoordinators insert Coordinators into the DB
func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error { func (hdb *HistoryDB) AddCoordinators(coordinators []common.Coordinator) error {
return tracerr.Wrap(hdb.addCoordinators(hdb.db, coordinators)) return tracerr.Wrap(hdb.addCoordinators(hdb.dbWrite, coordinators))
} }
func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error { func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordinator) error {
if len(coordinators) == 0 { if len(coordinators) == 0 {
@@ -299,13 +329,13 @@ func (hdb *HistoryDB) addCoordinators(d meddler.DB, coordinators []common.Coordi
return tracerr.Wrap(db.BulkInsert( return tracerr.Wrap(db.BulkInsert(
d, d,
"INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;", "INSERT INTO coordinator (bidder_addr, forger_addr, eth_block_num, url) VALUES %s;",
coordinators[:], coordinators,
)) ))
} }
// AddExitTree insert Exit tree into the DB // AddExitTree insert Exit tree into the DB
func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error { func (hdb *HistoryDB) AddExitTree(exitTree []common.ExitInfo) error {
return tracerr.Wrap(hdb.addExitTree(hdb.db, exitTree)) return tracerr.Wrap(hdb.addExitTree(hdb.dbWrite, exitTree))
} }
func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error { func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) error {
if len(exitTree) == 0 { if len(exitTree) == 0 {
@@ -315,7 +345,7 @@ func (hdb *HistoryDB) addExitTree(d meddler.DB, exitTree []common.ExitInfo) erro
d, d,
"INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+ "INSERT INTO exit_tree (batch_num, account_idx, merkle_proof, balance, "+
"instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;", "instant_withdrawn, delayed_withdraw_request, delayed_withdrawn) VALUES %s;",
exitTree[:], exitTree,
)) ))
} }
@@ -393,11 +423,13 @@ func (hdb *HistoryDB) updateExitTree(d sqlx.Ext, blockNum int64,
// AddToken insert a token into the DB // AddToken insert a token into the DB
func (hdb *HistoryDB) AddToken(token *common.Token) error { func (hdb *HistoryDB) AddToken(token *common.Token) error {
return tracerr.Wrap(meddler.Insert(hdb.db, "token", token)) return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "token", token))
} }
// AddTokens insert tokens into the DB // AddTokens insert tokens into the DB
func (hdb *HistoryDB) AddTokens(tokens []common.Token) error { return hdb.addTokens(hdb.db, tokens) } func (hdb *HistoryDB) AddTokens(tokens []common.Token) error {
return hdb.addTokens(hdb.dbWrite, tokens)
}
func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error { func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
if len(tokens) == 0 { if len(tokens) == 0 {
return nil return nil
@@ -418,16 +450,17 @@ func (hdb *HistoryDB) addTokens(d meddler.DB, tokens []common.Token) error {
symbol, symbol,
decimals decimals
) VALUES %s;`, ) VALUES %s;`,
tokens[:], tokens,
)) ))
} }
// UpdateTokenValue updates the USD value of a token // UpdateTokenValue updates the USD value of a token. Value is the price in
// USD of a normalized token (1 token = 10^decimals units)
func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error { func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error {
// Sanitize symbol // Sanitize symbol
tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ") tokenSymbol = strings.ToValidUTF8(tokenSymbol, " ")
_, err := hdb.db.Exec( _, err := hdb.dbWrite.Exec(
"UPDATE token SET usd = $1 WHERE symbol = $2;", "UPDATE token SET usd = $1 WHERE symbol = $2;",
value, tokenSymbol, value, tokenSymbol,
) )
@@ -438,7 +471,7 @@ func (hdb *HistoryDB) UpdateTokenValue(tokenSymbol string, value float64) error
func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) { func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
token := &TokenWithUSD{} token := &TokenWithUSD{}
err := meddler.QueryRow( err := meddler.QueryRow(
hdb.db, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID, hdb.dbRead, token, `SELECT * FROM token WHERE token_id = $1;`, tokenID,
) )
return token, tracerr.Wrap(err) return token, tracerr.Wrap(err)
} }
@@ -447,7 +480,7 @@ func (hdb *HistoryDB) GetToken(tokenID common.TokenID) (*TokenWithUSD, error) {
func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) { func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
var tokens []*TokenWithUSD var tokens []*TokenWithUSD
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &tokens, hdb.dbRead, &tokens,
"SELECT * FROM token ORDER BY token_id;", "SELECT * FROM token ORDER BY token_id;",
) )
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err) return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), tracerr.Wrap(err)
@@ -456,7 +489,7 @@ func (hdb *HistoryDB) GetAllTokens() ([]TokenWithUSD, error) {
// GetTokenSymbols returns all the token symbols from the DB // GetTokenSymbols returns all the token symbols from the DB
func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) { func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
var tokenSymbols []string var tokenSymbols []string
rows, err := hdb.db.Query("SELECT symbol FROM token;") rows, err := hdb.dbRead.Query("SELECT symbol FROM token;")
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
@@ -474,7 +507,7 @@ func (hdb *HistoryDB) GetTokenSymbols() ([]string, error) {
// AddAccounts insert accounts into the DB // AddAccounts insert accounts into the DB
func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error { func (hdb *HistoryDB) AddAccounts(accounts []common.Account) error {
return tracerr.Wrap(hdb.addAccounts(hdb.db, accounts)) return tracerr.Wrap(hdb.addAccounts(hdb.dbWrite, accounts))
} }
func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error { func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error {
if len(accounts) == 0 { if len(accounts) == 0 {
@@ -489,7 +522,7 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
bjj, bjj,
eth_addr eth_addr
) VALUES %s;`, ) VALUES %s;`,
accounts[:], accounts,
)) ))
} }
@@ -497,18 +530,49 @@ func (hdb *HistoryDB) addAccounts(d meddler.DB, accounts []common.Account) error
func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) { func (hdb *HistoryDB) GetAllAccounts() ([]common.Account, error) {
var accs []*common.Account var accs []*common.Account
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &accs, hdb.dbRead, &accs,
"SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;", "SELECT idx, token_id, batch_num, bjj, eth_addr FROM account ORDER BY idx;",
) )
return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err) return db.SlicePtrsToSlice(accs).([]common.Account), tracerr.Wrap(err)
} }
// AddAccountUpdates inserts accUpdates into the DB
func (hdb *HistoryDB) AddAccountUpdates(accUpdates []common.AccountUpdate) error {
return tracerr.Wrap(hdb.addAccountUpdates(hdb.dbWrite, accUpdates))
}
func (hdb *HistoryDB) addAccountUpdates(d meddler.DB, accUpdates []common.AccountUpdate) error {
if len(accUpdates) == 0 {
return nil
}
return tracerr.Wrap(db.BulkInsert(
d,
`INSERT INTO account_update (
eth_block_num,
batch_num,
idx,
nonce,
balance
) VALUES %s;`,
accUpdates,
))
}
// GetAllAccountUpdates returns all the AccountUpdate from the DB
func (hdb *HistoryDB) GetAllAccountUpdates() ([]common.AccountUpdate, error) {
var accUpdates []*common.AccountUpdate
err := meddler.QueryAll(
hdb.dbRead, &accUpdates,
"SELECT eth_block_num, batch_num, idx, nonce, balance FROM account_update ORDER BY idx;",
)
return db.SlicePtrsToSlice(accUpdates).([]common.AccountUpdate), tracerr.Wrap(err)
}
// AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // AddL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
// If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user, // If the tx is originated by a coordinator, BatchNum must be provided. If it's originated by a user,
// BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx. // BatchNum should be null, and the value will be setted by a trigger when a batch forges the tx.
// EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB. // EffectiveAmount and EffectiveDepositAmount are seted with default values by the DB.
func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error { func (hdb *HistoryDB) AddL1Txs(l1txs []common.L1Tx) error {
return tracerr.Wrap(hdb.addL1Txs(hdb.db, l1txs)) return tracerr.Wrap(hdb.addL1Txs(hdb.dbWrite, l1txs))
} }
// addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx. // addL1Txs inserts L1 txs to the DB. USD and DepositAmountUSD will be set automatically before storing the tx.
@@ -562,7 +626,7 @@ func (hdb *HistoryDB) addL1Txs(d meddler.DB, l1txs []common.L1Tx) error {
// AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx. // AddL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error { func (hdb *HistoryDB) AddL2Txs(l2txs []common.L2Tx) error {
return tracerr.Wrap(hdb.addL2Txs(hdb.db, l2txs)) return tracerr.Wrap(hdb.addL2Txs(hdb.dbWrite, l2txs))
} }
// addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx. // addL2Txs inserts L2 txs to the DB. TokenID, USD and FeeUSD will be set automatically before storing the tx.
@@ -621,7 +685,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
fee, fee,
nonce nonce
) VALUES %s;`, ) VALUES %s;`,
txs[:], txs,
)) ))
} }
@@ -629,7 +693,7 @@ func (hdb *HistoryDB) addTxs(d meddler.DB, txs []txWrite) error {
func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) { func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
var exits []*common.ExitInfo var exits []*common.ExitInfo
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &exits, hdb.dbRead, &exits,
`SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof, `SELECT exit_tree.batch_num, exit_tree.account_idx, exit_tree.merkle_proof,
exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request, exit_tree.balance, exit_tree.instant_withdrawn, exit_tree.delayed_withdraw_request,
exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`, exit_tree.delayed_withdrawn FROM exit_tree ORDER BY item_id;`,
@@ -641,7 +705,7 @@ func (hdb *HistoryDB) GetAllExits() ([]common.ExitInfo, error) {
func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) { func (hdb *HistoryDB) GetAllL1UserTxs() ([]common.L1Tx, error) {
var txs []*common.L1Tx var txs []*common.L1Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &txs, // Note that '\x' gets parsed as a big.Int with value = 0 hdb.dbRead, &txs, // Note that '\x' gets parsed as a big.Int with value = 0
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount, tx.amount, (CASE WHEN tx.batch_num IS NULL THEN NULL WHEN tx.amount_success THEN tx.amount ELSE '\x' END) AS effective_amount,
@@ -658,7 +722,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
// Since the query specifies that only coordinator txs are returned, it's safe to assume // Since the query specifies that only coordinator txs are returned, it's safe to assume
// that returned txs will always have effective amounts // that returned txs will always have effective amounts
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &txs, hdb.dbRead, &txs,
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.effective_from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, tx.amount AS effective_amount, tx.amount, tx.amount AS effective_amount,
@@ -673,7 +737,7 @@ func (hdb *HistoryDB) GetAllL1CoordinatorTxs() ([]common.L1Tx, error) {
func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) { func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
var txs []*common.L2Tx var txs []*common.L2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &txs, hdb.dbRead, &txs,
`SELECT tx.id, tx.batch_num, tx.position, `SELECT tx.id, tx.batch_num, tx.position,
tx.from_idx, tx.to_idx, tx.amount, tx.token_id, tx.from_idx, tx.to_idx, tx.amount, tx.token_id,
tx.fee, tx.nonce, tx.type, tx.eth_block_num tx.fee, tx.nonce, tx.type, tx.eth_block_num
@@ -686,7 +750,7 @@ func (hdb *HistoryDB) GetAllL2Txs() ([]common.L2Tx, error) {
func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) { func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
var txs []*common.L1Tx var txs []*common.L1Tx
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &txs, // only L1 user txs can have batch_num set to null hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin, `SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id, tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, NULL AS effective_amount, tx.amount, NULL AS effective_amount,
@@ -703,7 +767,7 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
// GetLastTxsPosition for a given to_forge_l1_txs_num // GetLastTxsPosition for a given to_forge_l1_txs_num
func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) { func (hdb *HistoryDB) GetLastTxsPosition(toForgeL1TxsNum int64) (int, error) {
row := hdb.db.QueryRow( row := hdb.dbRead.QueryRow(
"SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;", "SELECT position FROM tx WHERE to_forge_l1_txs_num = $1 ORDER BY position DESC;",
toForgeL1TxsNum, toForgeL1TxsNum,
) )
@@ -717,15 +781,15 @@ func (hdb *HistoryDB) GetSCVars() (*common.RollupVariables, *common.AuctionVaria
var rollup common.RollupVariables var rollup common.RollupVariables
var auction common.AuctionVariables var auction common.AuctionVariables
var wDelayer common.WDelayerVariables var wDelayer common.WDelayerVariables
if err := meddler.QueryRow(hdb.db, &rollup, if err := meddler.QueryRow(hdb.dbRead, &rollup,
"SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM rollup_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
if err := meddler.QueryRow(hdb.db, &auction, if err := meddler.QueryRow(hdb.dbRead, &auction,
"SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM auction_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
if err := meddler.QueryRow(hdb.db, &wDelayer, if err := meddler.QueryRow(hdb.dbRead, &wDelayer,
"SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil { "SELECT * FROM wdelayer_vars ORDER BY eth_block_num DESC LIMIT 1;"); err != nil {
return nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, tracerr.Wrap(err)
} }
@@ -756,7 +820,7 @@ func (hdb *HistoryDB) addBucketUpdates(d meddler.DB, bucketUpdates []common.Buck
block_stamp, block_stamp,
withdrawals withdrawals
) VALUES %s;`, ) VALUES %s;`,
bucketUpdates[:], bucketUpdates,
)) ))
} }
@@ -770,13 +834,25 @@ func (hdb *HistoryDB) AddBucketUpdatesTest(d meddler.DB, bucketUpdates []common.
func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) { func (hdb *HistoryDB) GetAllBucketUpdates() ([]common.BucketUpdate, error) {
var bucketUpdates []*common.BucketUpdate var bucketUpdates []*common.BucketUpdate
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &bucketUpdates, hdb.dbRead, &bucketUpdates,
`SELECT eth_block_num, num_bucket, block_stamp, withdrawals `SELECT eth_block_num, num_bucket, block_stamp, withdrawals
FROM bucket_update ORDER BY item_id;`, FROM bucket_update ORDER BY item_id;`,
) )
return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err) return db.SlicePtrsToSlice(bucketUpdates).([]common.BucketUpdate), tracerr.Wrap(err)
} }
func (hdb *HistoryDB) getMinBidInfo(d meddler.DB,
currentSlot, lastClosedSlot int64) ([]MinBidInfo, error) {
minBidInfo := []*MinBidInfo{}
query := `
SELECT DISTINCT default_slot_set_bid, default_slot_set_bid_slot_num FROM auction_vars
WHERE default_slot_set_bid_slot_num < $1
ORDER BY default_slot_set_bid_slot_num DESC
LIMIT $2;`
err := meddler.QueryAll(d, &minBidInfo, query, lastClosedSlot, int(lastClosedSlot-currentSlot)+1)
return db.SlicePtrsToSlice(minBidInfo).([]MinBidInfo), tracerr.Wrap(err)
}
func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error { func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.TokenExchange) error {
if len(tokenExchanges) == 0 { if len(tokenExchanges) == 0 {
return nil return nil
@@ -788,7 +864,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
eth_addr, eth_addr,
value_usd value_usd
) VALUES %s;`, ) VALUES %s;`,
tokenExchanges[:], tokenExchanges,
)) ))
} }
@@ -796,7 +872,7 @@ func (hdb *HistoryDB) addTokenExchanges(d meddler.DB, tokenExchanges []common.To
func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) { func (hdb *HistoryDB) GetAllTokenExchanges() ([]common.TokenExchange, error) {
var tokenExchanges []*common.TokenExchange var tokenExchanges []*common.TokenExchange
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &tokenExchanges, hdb.dbRead, &tokenExchanges,
"SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;", "SELECT eth_block_num, eth_addr, value_usd FROM token_exchange ORDER BY item_id;",
) )
return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err) return db.SlicePtrsToSlice(tokenExchanges).([]common.TokenExchange), tracerr.Wrap(err)
@@ -816,7 +892,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
token_addr, token_addr,
amount amount
) VALUES %s;`, ) VALUES %s;`,
escapeHatchWithdrawals[:], escapeHatchWithdrawals,
)) ))
} }
@@ -824,7 +900,7 @@ func (hdb *HistoryDB) addEscapeHatchWithdrawals(d meddler.DB,
func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) { func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHatchWithdrawal, error) {
var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal var escapeHatchWithdrawals []*common.WDelayerEscapeHatchWithdrawal
err := meddler.QueryAll( err := meddler.QueryAll(
hdb.db, &escapeHatchWithdrawals, hdb.dbRead, &escapeHatchWithdrawals,
"SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;", "SELECT eth_block_num, who_addr, to_addr, token_addr, amount FROM escape_hatch_withdrawal ORDER BY item_id;",
) )
return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal), return db.SlicePtrsToSlice(escapeHatchWithdrawals).([]common.WDelayerEscapeHatchWithdrawal),
@@ -837,7 +913,7 @@ func (hdb *HistoryDB) GetAllEscapeHatchWithdrawals() ([]common.WDelayerEscapeHat
// exist in the smart contracts. // exist in the smart contracts.
func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables, func (hdb *HistoryDB) SetInitialSCVars(rollup *common.RollupVariables,
auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error { auction *common.AuctionVariables, wDelayer *common.WDelayerVariables) error {
txn, err := hdb.db.Beginx() txn, err := hdb.dbWrite.Beginx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -921,7 +997,7 @@ func (hdb *HistoryDB) setExtraInfoForgedL1UserTxs(d sqlx.Ext, txs []common.L1Tx)
// the pagination system of the API/DB depends on this. Within blocks, all // the pagination system of the API/DB depends on this. Within blocks, all
// items should also be in the correct order (Accounts, Tokens, Txs, etc.) // items should also be in the correct order (Accounts, Tokens, Txs, etc.)
func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) { func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
txn, err := hdb.db.Beginx() txn, err := hdb.dbWrite.Beginx()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -993,6 +1069,11 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// Add accountBalances if it exists
if err := hdb.addAccountUpdates(txn, batch.UpdatedAccounts); err != nil {
return tracerr.Wrap(err)
}
// Set the EffectiveAmount and EffectiveDepositAmount of all the // Set the EffectiveAmount and EffectiveDepositAmount of all the
// L1UserTxs that have been forged in this batch // L1UserTxs that have been forged in this batch
if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil { if err = hdb.setExtraInfoForgedL1UserTxs(txn, batch.L1UserTxs); err != nil {
@@ -1070,27 +1151,16 @@ func (hdb *HistoryDB) AddBlockSCData(blockData *common.BlockData) (err error) {
return tracerr.Wrap(txn.Commit()) return tracerr.Wrap(txn.Commit())
} }
// GetCoordinatorAPI returns a coordinator by its bidderAddr
func (hdb *HistoryDB) GetCoordinatorAPI(bidderAddr ethCommon.Address) (*CoordinatorAPI, error) {
coordinator := &CoordinatorAPI{}
err := meddler.QueryRow(
hdb.db, coordinator,
"SELECT * FROM coordinator WHERE bidder_addr = $1 ORDER BY item_id DESC LIMIT 1;",
bidderAddr,
)
return coordinator, tracerr.Wrap(err)
}
// AddAuctionVars insert auction vars into the DB // AddAuctionVars insert auction vars into the DB
func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error { func (hdb *HistoryDB) AddAuctionVars(auctionVars *common.AuctionVariables) error {
return tracerr.Wrap(meddler.Insert(hdb.db, "auction_vars", auctionVars)) return tracerr.Wrap(meddler.Insert(hdb.dbWrite, "auction_vars", auctionVars))
} }
// GetTokensTest used to get tokens in a testing context // GetTokensTest used to get tokens in a testing context
func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) { func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
tokens := []*TokenWithUSD{} tokens := []*TokenWithUSD{}
if err := meddler.QueryAll( if err := meddler.QueryAll(
hdb.db, &tokens, hdb.dbRead, &tokens,
"SELECT * FROM TOKEN", "SELECT * FROM TOKEN",
); err != nil { ); err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -1100,3 +1170,49 @@ func (hdb *HistoryDB) GetTokensTest() ([]TokenWithUSD, error) {
} }
return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil return db.SlicePtrsToSlice(tokens).([]TokenWithUSD), nil
} }
// GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct {
TotalTxs int `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
}
ttsbn := &totalTxsSinceBatchNum{}
if err := meddler.QueryRow(
hdb.dbRead, ttsbn, `SELECT COUNT(tx.*) as total_txs,
COALESCE (MIN(tx.batch_num), 0) as batch_num
FROM tx INNER JOIN block ON tx.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '1 HOURS';`,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Get the amount of batches and acumulated fees for the last hour
type totalBatchesAndFee struct {
TotalBatches int `meddler:"total_batches"`
TotalFees float64 `meddler:"total_fees"`
}
tbf := &totalBatchesAndFee{}
if err := meddler.QueryRow(
hdb.dbRead, tbf, `SELECT COUNT(*) AS total_batches,
COALESCE (SUM(total_fees_usd), 0) AS total_fees FROM batch
WHERE batch_num > $1;`, ttsbn.FirstBatchNum,
); err != nil {
return nil, tracerr.Wrap(err)
}
// Update NodeInfo struct
var avgTransactionFee float64
if ttsbn.TotalTxs > 0 {
avgTransactionFee = tbf.TotalFees / float64(ttsbn.TotalTxs)
} else {
avgTransactionFee = 0
}
recommendedFee.ExistingAccount =
math.Max(avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccount =
math.Max(createAccountExtraFeePercentage*avgTransactionFee, minFeeUSD)
recommendedFee.CreatesAccountAndRegister =
math.Max(createAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD)
return &recommendedFee, nil
}

View File

@@ -39,12 +39,12 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
historyDB = NewHistoryDB(db, nil) historyDB = NewHistoryDB(db, db, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
historyDBWithACC = NewHistoryDB(db, apiConnCon) historyDBWithACC = NewHistoryDB(db, db, apiConnCon)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -203,6 +203,10 @@ func TestBatches(t *testing.T) {
fetchedLastBatchNum, err := historyDB.GetLastBatchNum() fetchedLastBatchNum, err := historyDB.GetLastBatchNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum) assert.Equal(t, batches[len(batches)-1].BatchNum, fetchedLastBatchNum)
// Test GetLastBatch
fetchedLastBatch, err := historyDB.GetLastBatch()
assert.NoError(t, err)
assert.Equal(t, &batches[len(batches)-1], fetchedLastBatch)
// Test GetLastL1TxsNum // Test GetLastL1TxsNum
fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum() fetchedLastL1TxsNum, err := historyDB.GetLastL1TxsNum()
assert.NoError(t, err) assert.NoError(t, err)
@@ -211,6 +215,12 @@ func TestBatches(t *testing.T) {
fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum() fetchedLastL1BatchBlockNum, err := historyDB.GetLastL1BatchBlockNum()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum) assert.Equal(t, lastL1BatchBlockNum, fetchedLastL1BatchBlockNum)
// Test GetBatch
fetchedBatch, err := historyDB.GetBatch(1)
require.NoError(t, err)
assert.Equal(t, &batches[0], fetchedBatch)
_, err = historyDB.GetBatch(common.BatchNum(len(batches) + 1))
assert.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
} }
func TestBids(t *testing.T) { func TestBids(t *testing.T) {
@@ -367,6 +377,22 @@ func TestAccounts(t *testing.T) {
accs[i].Balance = nil accs[i].Balance = nil
assert.Equal(t, accs[i], acc) assert.Equal(t, accs[i], acc)
} }
// Test AccountBalances
accUpdates := make([]common.AccountUpdate, len(accs))
for i, acc := range accs {
accUpdates[i] = common.AccountUpdate{
EthBlockNum: batches[acc.BatchNum-1].EthBlockNum,
BatchNum: acc.BatchNum,
Idx: acc.Idx,
Nonce: common.Nonce(i),
Balance: big.NewInt(int64(i)),
}
}
err = historyDB.AddAccountUpdates(accUpdates)
require.NoError(t, err)
fetchedAccBalances, err := historyDB.GetAllAccountUpdates()
require.NoError(t, err)
assert.Equal(t, accUpdates, fetchedAccBalances)
} }
func TestTxs(t *testing.T) { func TestTxs(t *testing.T) {
@@ -791,11 +817,11 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
} }
// Add second batch to trigger the update of the batch_num, // Add second batch to trigger the update of the batch_num,
// while avoiding the implicit call of setExtraInfoForgedL1UserTxs // while avoiding the implicit call of setExtraInfoForgedL1UserTxs
err = historyDB.addBlock(historyDB.db, &blocks[1].Block) err = historyDB.addBlock(historyDB.dbWrite, &blocks[1].Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.addBatch(historyDB.db, &blocks[1].Rollup.Batches[0].Batch) err = historyDB.addBatch(historyDB.dbWrite, &blocks[1].Rollup.Batches[0].Batch)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.addAccounts(historyDB.db, blocks[1].Rollup.Batches[0].CreatedAccounts) err = historyDB.addAccounts(historyDB.dbWrite, blocks[1].Rollup.Batches[0].CreatedAccounts)
require.NoError(t, err) require.NoError(t, err)
// Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block // Set the Effective{Amount,DepositAmount} of the L1UserTxs that are forged in the second block
@@ -805,7 +831,7 @@ func TestSetExtraInfoForgedL1UserTxs(t *testing.T) {
l1Txs[1].EffectiveAmount = big.NewInt(0) l1Txs[1].EffectiveAmount = big.NewInt(0)
l1Txs[2].EffectiveDepositAmount = big.NewInt(0) l1Txs[2].EffectiveDepositAmount = big.NewInt(0)
l1Txs[2].EffectiveAmount = big.NewInt(0) l1Txs[2].EffectiveAmount = big.NewInt(0)
err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.db, l1Txs) err = historyDB.setExtraInfoForgedL1UserTxs(historyDB.dbWrite, l1Txs)
require.NoError(t, err) require.NoError(t, err)
dbL1Txs, err := historyDB.GetAllL1UserTxs() dbL1Txs, err := historyDB.GetAllL1UserTxs()
@@ -892,10 +918,10 @@ func TestUpdateExitTree(t *testing.T) {
common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false, common.WithdrawInfo{Idx: 259, NumExitRoot: 3, InstantWithdraw: false,
Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr}, Owner: tc.UsersByIdx[259].Addr, Token: tokenAddr},
) )
err = historyDB.addBlock(historyDB.db, &block.Block) err = historyDB.addBlock(historyDB.dbWrite, &block.Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.updateExitTree(historyDB.db, block.Block.Num, err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num,
block.Rollup.Withdrawals, block.WDelayer.Withdrawals) block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
require.NoError(t, err) require.NoError(t, err)
@@ -925,10 +951,10 @@ func TestUpdateExitTree(t *testing.T) {
Token: tokenAddr, Token: tokenAddr,
Amount: big.NewInt(80), Amount: big.NewInt(80),
}) })
err = historyDB.addBlock(historyDB.db, &block.Block) err = historyDB.addBlock(historyDB.dbWrite, &block.Block)
require.NoError(t, err) require.NoError(t, err)
err = historyDB.updateExitTree(historyDB.db, block.Block.Num, err = historyDB.updateExitTree(historyDB.dbWrite, block.Block.Num,
block.Rollup.Withdrawals, block.WDelayer.Withdrawals) block.Rollup.Withdrawals, block.WDelayer.Withdrawals)
require.NoError(t, err) require.NoError(t, err)
@@ -971,7 +997,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
URL: "bar", URL: "bar",
}, },
} }
err = historyDB.addCoordinators(historyDB.db, coords) err = historyDB.addCoordinators(historyDB.dbWrite, coords)
require.NoError(t, err) require.NoError(t, err)
bids := []common.Bid{ bids := []common.Bid{
@@ -989,7 +1015,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
}, },
} }
err = historyDB.addBids(historyDB.db, bids) err = historyDB.addBids(historyDB.dbWrite, bids)
require.NoError(t, err) require.NoError(t, err)
forger10, err := historyDB.GetBestBidCoordinator(10) forger10, err := historyDB.GetBestBidCoordinator(10)
@@ -1027,7 +1053,7 @@ func TestAddBucketUpdates(t *testing.T) {
Withdrawals: big.NewInt(42), Withdrawals: big.NewInt(42),
}, },
} }
err := historyDB.addBucketUpdates(historyDB.db, bucketUpdates) err := historyDB.addBucketUpdates(historyDB.dbWrite, bucketUpdates)
require.NoError(t, err) require.NoError(t, err)
dbBucketUpdates, err := historyDB.GetAllBucketUpdates() dbBucketUpdates, err := historyDB.GetAllBucketUpdates()
require.NoError(t, err) require.NoError(t, err)
@@ -1052,7 +1078,7 @@ func TestAddTokenExchanges(t *testing.T) {
ValueUSD: 67890, ValueUSD: 67890,
}, },
} }
err := historyDB.addTokenExchanges(historyDB.db, tokenExchanges) err := historyDB.addTokenExchanges(historyDB.dbWrite, tokenExchanges)
require.NoError(t, err) require.NoError(t, err)
dbTokenExchanges, err := historyDB.GetAllTokenExchanges() dbTokenExchanges, err := historyDB.GetAllTokenExchanges()
require.NoError(t, err) require.NoError(t, err)
@@ -1081,7 +1107,7 @@ func TestAddEscapeHatchWithdrawals(t *testing.T) {
Amount: big.NewInt(20003), Amount: big.NewInt(20003),
}, },
} }
err := historyDB.addEscapeHatchWithdrawals(historyDB.db, escapeHatchWithdrawals) err := historyDB.addEscapeHatchWithdrawals(historyDB.dbWrite, escapeHatchWithdrawals)
require.NoError(t, err) require.NoError(t, err)
dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals() dbEscapeHatchWithdrawals, err := historyDB.GetAllEscapeHatchWithdrawals()
require.NoError(t, err) require.NoError(t, err)
@@ -1146,19 +1172,15 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches-1), res.TransactionsPerBatch) assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
// Frequency is not exactly the desired one, some decimals may appear // Frequency is not exactly the desired one, some decimals may appear
assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency)) // There is a -2 as time for first and last batch is not taken into account
assert.Less(t, res.BatchFrequency, float64(frequency+1)) assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
// Truncate frecuency into an int to do an exact check assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees
@@ -1185,7 +1207,8 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
set = append(set, til.Instruction{Typ: til.TypeNewBlock}) set = append(set, til.Instruction{Typ: til.TypeNewBlock})
// Transfers // Transfers
for x := 0; x < 6000; x++ { const numBlocks int = 30
for x := 0; x < numBlocks; x++ {
set = append(set, til.Instruction{ set = append(set, til.Instruction{
Typ: common.TxTypeTransfer, Typ: common.TxTypeTransfer,
TokenID: common.TokenID(0), TokenID: common.TokenID(0),
@@ -1209,36 +1232,31 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
err = tc.FillBlocksExtra(blocks, &tilCfgExtra) err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err) require.NoError(t, err)
const numBatches int = 6002 const numBatches int = 2 + numBlocks
const numTx int = 6003 const blockNum = 4 + numBlocks
const blockNum = 6005 - 1
// Sanity check // Sanity check
require.Equal(t, blockNum, len(blocks)) require.Equal(t, blockNum, len(blocks))
// Adding one batch per block // Adding one batch per block
// batch frequency can be chosen // batch frequency can be chosen
const frequency int = 15 const blockTime time.Duration = 3600 * time.Second
now := time.Now()
require.NoError(t, err)
for i := range blocks { for i := range blocks {
blocks[i].Block.Timestamp = time.Now().Add(-time.Second * time.Duration(frequency*(len(blocks)-i))) blocks[i].Block.Timestamp = now.Add(-time.Duration(len(blocks)-1-i) * blockTime)
err = historyDB.AddBlockSCData(&blocks[i]) err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDBWithACC.GetMetricsAPI(common.BatchNum(numBatches)) res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, math.Trunc((float64(numTx)/float64(numBatches-1))/0.001)*0.001, math.Trunc(res.TransactionsPerBatch/0.001)*0.001) assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
// Frequency is not exactly the desired one, some decimals may appear assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
assert.GreaterOrEqual(t, res.BatchFrequency, float64(frequency)) assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
assert.Less(t, res.BatchFrequency, float64(frequency+1))
// Truncate frecuency into an int to do an exact check
assert.Equal(t, frequency, int(res.BatchFrequency))
// This may also be different in some decimals
// Truncate it to the third decimal to compare
assert.Equal(t, math.Trunc((float64(numTx)/float64(frequency*blockNum-frequency))/0.001)*0.001, math.Trunc(res.TransactionsPerSecond/0.001)*0.001)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TotalAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.TotalBJJs)
// Til does not set fees // Til does not set fees
@@ -1247,13 +1265,7 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
func TestGetMetricsAPIEmpty(t *testing.T) { func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsAPI(0) _, err := historyDBWithACC.GetMetricsInternalAPI(0)
assert.NoError(t, err)
}
func TestGetAvgTxFeeEmpty(t *testing.T) {
test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetAvgTxFeeAPI()
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -1442,3 +1454,65 @@ func setTestBlocks(from, to int64) []common.Block {
} }
return blocks return blocks
} }
func TestNodeInfo(t *testing.T) {
test.WipeDB(historyDB.DB())
err := historyDB.SetStateInternalAPI(&StateAPI{})
require.NoError(t, err)
clientSetup := test.NewClientSetupExample()
constants := &Constants{
SCConsts: common.SCConsts{
Rollup: *clientSetup.RollupConstants,
Auction: *clientSetup.AuctionConstants,
WDelayer: *clientSetup.WDelayerConstants,
},
ChainID: 42,
HermezAddress: clientSetup.AuctionConstants.HermezRollup,
}
err = historyDB.SetConstants(constants)
require.NoError(t, err)
// Test parameters
stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{
ForgeDelay: 3.1,
},
Network: NetworkAPI{
LastEthBlock: 12,
LastSyncBlock: 34,
},
Metrics: MetricsAPI{
TransactionsPerBatch: 1.1,
TotalAccounts: 42,
},
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),
WithdrawalDelayer: *clientSetup.WDelayerVariables,
RecommendedFee: common.RecommendedFee{
ExistingAccount: 0.15,
},
}
err = historyDB.SetStateInternalAPI(stateAPI)
require.NoError(t, err)
nodeConfig := &NodeConfig{
MaxPoolTxs: 123,
MinFeeUSD: 0.5,
}
err = historyDB.SetNodeConfig(nodeConfig)
require.NoError(t, err)
dbConstants, err := historyDB.GetConstants()
require.NoError(t, err)
assert.Equal(t, constants, dbConstants)
dbNodeConfig, err := historyDB.GetNodeConfig()
require.NoError(t, err)
assert.Equal(t, nodeConfig, dbNodeConfig)
dbStateAPI, err := historyDB.getStateAPI(historyDB.dbRead)
require.NoError(t, err)
assert.Equal(t, stateAPI, dbStateAPI)
}

169
db/historydb/nodeinfo.go Normal file
View File

@@ -0,0 +1,169 @@
package historydb
import (
"time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr"
"github.com/russross/meddler"
)
const (
createAccountExtraFeePercentage float64 = 2
createAccountInternalExtraFeePercentage float64 = 2.5
)
// Period represents a time period in ethereum
type Period struct {
SlotNum int64 `json:"slotNum"`
FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"`
FromTimestamp time.Time `json:"fromTimestamp"`
ToTimestamp time.Time `json:"toTimestamp"`
}
// NextForgerAPI represents the next forger exposed via the API
type NextForgerAPI struct {
Coordinator CoordinatorAPI `json:"coordinator"`
Period Period `json:"period"`
}
// NetworkAPI is the network state exposed via the API
type NetworkAPI struct {
LastEthBlock int64 `json:"lastEthereumBlock"`
LastSyncBlock int64 `json:"lastSynchedBlock"`
LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"`
}
// NodePublicConfig is the configuration of the node that is exposed via API
type NodePublicConfig struct {
// ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"`
}
// StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"`
Auction AuctionVariablesAPI `json:"auction"`
WithdrawalDelayer common.WDelayerVariables `json:"withdrawalDelayer"`
RecommendedFee common.RecommendedFee `json:"recommendedFee"`
}
// Constants contains network constants
type Constants struct {
common.SCConsts
ChainID uint16
HermezAddress ethCommon.Address
}
// NodeConfig contains the node config exposed in the API
type NodeConfig struct {
MaxPoolTxs uint32 `meddler:"max_pool_txs"`
MinFeeUSD float64 `meddler:"min_fee"`
}
// NodeInfo contains information about he node used when serving the API
type NodeInfo struct {
ItemID int `meddler:"item_id,pk"`
StateAPI *StateAPI `meddler:"state,json"`
NodeConfig *NodeConfig `meddler:"config,json"`
Constants *Constants `meddler:"constants,json"`
}
// GetNodeInfo returns the NodeInfo
func (hdb *HistoryDB) GetNodeInfo() (*NodeInfo, error) {
ni := &NodeInfo{}
err := meddler.QueryRow(
hdb.dbRead, ni, `SELECT * FROM node_info WHERE item_id = 1;`,
)
return ni, tracerr.Wrap(err)
}
// GetConstants returns the Constats
func (hdb *HistoryDB) GetConstants() (*Constants, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT constants FROM node_info WHERE item_id = 1;",
)
return nodeInfo.Constants, tracerr.Wrap(err)
}
// SetConstants sets the Constants
func (hdb *HistoryDB) SetConstants(constants *Constants) error {
_constants := struct {
Constants *Constants `meddler:"constants,json"`
}{constants}
values, err := meddler.Default.Values(&_constants, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET constants = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetStateInternalAPI returns the StateAPI
func (hdb *HistoryDB) GetStateInternalAPI() (*StateAPI, error) {
return hdb.getStateAPI(hdb.dbRead)
}
func (hdb *HistoryDB) getStateAPI(d meddler.DB) (*StateAPI, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
d, &nodeInfo,
"SELECT state FROM node_info WHERE item_id = 1;",
)
return nodeInfo.StateAPI, tracerr.Wrap(err)
}
// SetStateInternalAPI sets the StateAPI
func (hdb *HistoryDB) SetStateInternalAPI(stateAPI *StateAPI) error {
_stateAPI := struct {
StateAPI *StateAPI `meddler:"state,json"`
}{stateAPI}
values, err := meddler.Default.Values(&_stateAPI, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET state = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}
// GetNodeConfig returns the NodeConfig
func (hdb *HistoryDB) GetNodeConfig() (*NodeConfig, error) {
var nodeInfo NodeInfo
err := meddler.QueryRow(
hdb.dbRead, &nodeInfo,
"SELECT config FROM node_info WHERE item_id = 1;",
)
return nodeInfo.NodeConfig, tracerr.Wrap(err)
}
// SetNodeConfig sets the NodeConfig
func (hdb *HistoryDB) SetNodeConfig(nodeConfig *NodeConfig) error {
_nodeConfig := struct {
NodeConfig *NodeConfig `meddler:"config,json"`
}{nodeConfig}
values, err := meddler.Default.Values(&_nodeConfig, false)
if err != nil {
return tracerr.Wrap(err)
}
_, err = hdb.dbWrite.Exec(
"UPDATE node_info SET config = $1 WHERE item_id = 1;",
values[0],
)
return tracerr.Wrap(err)
}

View File

@@ -239,8 +239,8 @@ type AccountAPI struct {
BatchNum common.BatchNum `meddler:"batch_num"` BatchNum common.BatchNum `meddler:"batch_num"`
PublicKey apitypes.HezBJJ `meddler:"bjj"` PublicKey apitypes.HezBJJ `meddler:"bjj"`
EthAddr apitypes.HezEthAddr `meddler:"eth_addr"` EthAddr apitypes.HezEthAddr `meddler:"eth_addr"`
Nonce common.Nonce `meddler:"-"` // max of 40 bits used Nonce common.Nonce `meddler:"nonce"` // max of 40 bits used
Balance *apitypes.BigIntStr `meddler:"-"` // max of 192 bits used Balance *apitypes.BigIntStr `meddler:"balance"` // max of 192 bits used
TotalItems uint64 `meddler:"total_items"` TotalItems uint64 `meddler:"total_items"`
FirstItem uint64 `meddler:"first_item"` FirstItem uint64 `meddler:"first_item"`
LastItem uint64 `meddler:"last_item"` LastItem uint64 `meddler:"last_item"`
@@ -302,25 +302,15 @@ type BatchAPI struct {
LastItem uint64 `json:"-" meddler:"last_item"` LastItem uint64 `json:"-" meddler:"last_item"`
} }
// Metrics define metrics of the network // MetricsAPI define metrics of the network
type Metrics struct { type MetricsAPI struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"` TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"` BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"` TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"` TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"` TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"`
AvgTransactionFee float64 `json:"avgTransactionFee"` AvgTransactionFee float64 `json:"avgTransactionFee"`
} EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimatedTimeToForgeL1"`
// MetricsTotals is used to get temporal information from HistoryDB
// to calculate data to be stored into the Metrics struct
type MetricsTotals struct {
TotalTransactions uint64 `meddler:"total_txs"`
FirstBatchNum common.BatchNum `meddler:"batch_num"`
TotalBatches int64 `meddler:"total_batches"`
TotalFeesUSD float64 `meddler:"total_fees"`
MinTimestamp time.Time `meddler:"min_timestamp,utctime"`
MaxTimestamp time.Time `meddler:"max_timestamp,utctime"`
} }
// BidAPI is a representation of a bid with additional information // BidAPI is a representation of a bid with additional information
@@ -373,6 +363,27 @@ type RollupVariablesAPI struct {
SafeMode bool `json:"safeMode" meddler:"safe_mode"` SafeMode bool `json:"safeMode" meddler:"safe_mode"`
} }
// NewRollupVariablesAPI creates a RollupVariablesAPI from common.RollupVariables
func NewRollupVariablesAPI(rollupVariables *common.RollupVariables) *RollupVariablesAPI {
rollupVars := RollupVariablesAPI{
EthBlockNum: rollupVariables.EthBlockNum,
FeeAddToken: apitypes.NewBigIntStr(rollupVariables.FeeAddToken),
ForgeL1L2BatchTimeout: rollupVariables.ForgeL1L2BatchTimeout,
WithdrawalDelay: rollupVariables.WithdrawalDelay,
SafeMode: rollupVariables.SafeMode,
}
for i, bucket := range rollupVariables.Buckets {
rollupVars.Buckets[i] = BucketParamsAPI{
CeilUSD: apitypes.NewBigIntStr(bucket.CeilUSD),
Withdrawals: apitypes.NewBigIntStr(bucket.Withdrawals),
BlockWithdrawalRate: apitypes.NewBigIntStr(bucket.BlockWithdrawalRate),
MaxWithdrawals: apitypes.NewBigIntStr(bucket.MaxWithdrawals),
}
}
return &rollupVars
}
// AuctionVariablesAPI are the variables of the Auction Smart Contract // AuctionVariablesAPI are the variables of the Auction Smart Contract
type AuctionVariablesAPI struct { type AuctionVariablesAPI struct {
EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"` EthBlockNum int64 `json:"ethereumBlockNum" meddler:"eth_block_num"`
@@ -397,3 +408,28 @@ type AuctionVariablesAPI struct {
// SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before // SlotDeadline Number of blocks at the end of a slot in which any coordinator can forge if the winner has not forged one before
SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"` SlotDeadline uint8 `json:"slotDeadline" meddler:"slot_deadline" validate:"required"`
} }
// NewAuctionVariablesAPI creates a AuctionVariablesAPI from common.AuctionVariables
func NewAuctionVariablesAPI(auctionVariables *common.AuctionVariables) *AuctionVariablesAPI {
auctionVars := AuctionVariablesAPI{
EthBlockNum: auctionVariables.EthBlockNum,
DonationAddress: auctionVariables.DonationAddress,
BootCoordinator: auctionVariables.BootCoordinator,
BootCoordinatorURL: auctionVariables.BootCoordinatorURL,
DefaultSlotSetBidSlotNum: auctionVariables.DefaultSlotSetBidSlotNum,
ClosedAuctionSlots: auctionVariables.ClosedAuctionSlots,
OpenAuctionSlots: auctionVariables.OpenAuctionSlots,
Outbidding: auctionVariables.Outbidding,
SlotDeadline: auctionVariables.SlotDeadline,
}
for i, slot := range auctionVariables.DefaultSlotSetBid {
auctionVars.DefaultSlotSetBid[i] = apitypes.NewBigIntStr(slot)
}
for i, ratio := range auctionVariables.AllocationRatio {
auctionVars.AllocationRatio[i] = ratio
}
return &auctionVars
}

View File

@@ -425,12 +425,13 @@ func (k *KVDB) MakeCheckpoint() error {
} }
// if checkpoint BatchNum already exist in disk, delete it // if checkpoint BatchNum already exist in disk, delete it
if _, err := os.Stat(checkpointPath); !os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(checkpointPath); err != nil { if err := os.RemoveAll(checkpointPath); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
// execute Checkpoint // execute Checkpoint
@@ -451,12 +452,25 @@ func (k *KVDB) MakeCheckpoint() error {
return nil return nil
} }
// CheckpointExists returns true if the checkpoint exists
func (k *KVDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
source := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(source); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// DeleteCheckpoint removes if exist the checkpoint of the given batchNum // DeleteCheckpoint removes if exist the checkpoint of the given batchNum
func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error { func (k *KVDB) DeleteCheckpoint(batchNum common.BatchNum) error {
checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum)) checkpointPath := path.Join(k.cfg.Path, fmt.Sprintf("%s%d", PathBatchNum, batchNum))
if _, err := os.Stat(checkpointPath); os.IsNotExist(err) { if _, err := os.Stat(checkpointPath); os.IsNotExist(err) {
return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum)) return tracerr.Wrap(fmt.Errorf("Checkpoint with batchNum %d does not exist in DB", batchNum))
} else if err != nil {
return tracerr.Wrap(err)
} }
return os.RemoveAll(checkpointPath) return os.RemoveAll(checkpointPath)
@@ -520,6 +534,8 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
if _, err := os.Stat(source); os.IsNotExist(err) { if _, err := os.Stat(source); os.IsNotExist(err) {
// if kvdb does not have checkpoint at batchNum, return err // if kvdb does not have checkpoint at batchNum, return err
return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source)) return tracerr.Wrap(fmt.Errorf("Checkpoint \"%v\" does not exist", source))
} else if err != nil {
return tracerr.Wrap(err)
} }
// By locking we allow calling MakeCheckpointFromTo from multiple // By locking we allow calling MakeCheckpointFromTo from multiple
// places at the same time for the same stateDB. This allows the // places at the same time for the same stateDB. This allows the
@@ -533,12 +549,13 @@ func (k *KVDB) MakeCheckpointFromTo(fromBatchNum common.BatchNum, dest string) e
func pebbleMakeCheckpoint(source, dest string) error { func pebbleMakeCheckpoint(source, dest string) error {
// Remove dest folder (if it exists) before doing the checkpoint // Remove dest folder (if it exists) before doing the checkpoint
if _, err := os.Stat(dest); !os.IsNotExist(err) { if _, err := os.Stat(dest); os.IsNotExist(err) {
} else if err != nil {
return tracerr.Wrap(err)
} else {
if err := os.RemoveAll(dest); err != nil { if err := os.RemoveAll(dest); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} else if err != nil && !os.IsNotExist(err) {
return tracerr.Wrap(err)
} }
sto, err := pebble.NewPebbleStorage(source, false) sto, err := pebble.NewPebbleStorage(source, false)

View File

@@ -1,12 +1,18 @@
package l2db package l2db
import ( import (
"fmt"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/russross/meddler" "github.com/russross/meddler"
) )
var (
errPoolFull = fmt.Errorf("the pool is at full capacity. More transactions are not accepted currently")
)
// AddAccountCreationAuthAPI inserts an account creation authorization into the DB // AddAccountCreationAuthAPI inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuthAPI(auth *common.AccountCreationAuth) error {
cancel, err := l2db.apiConnCon.Acquire() cancel, err := l2db.apiConnCon.Acquire()
@@ -28,7 +34,7 @@ func (l2db *L2DB) GetAccountCreationAuthAPI(addr ethCommon.Address) (*AccountCre
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
auth := new(AccountCreationAuthAPI) auth := new(AccountCreationAuthAPI)
return auth, tracerr.Wrap(meddler.QueryRow( return auth, tracerr.Wrap(meddler.QueryRow(
l2db.db, auth, l2db.dbRead, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;", "SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr, addr,
)) ))
@@ -42,20 +48,54 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;", row := l2db.dbRead.QueryRow(`SELECT
common.PoolL2TxStatePending, ($1::NUMERIC * COALESCE(token.usd, 0) * fee_percentage($2::NUMERIC)) /
) (10.0 ^ token.decimals::NUMERIC)
var totalTxs uint32 FROM token WHERE token.token_id = $3;`,
if err := row.Scan(&totalTxs); err != nil { tx.AmountFloat, tx.Fee, tx.TokenID)
var feeUSD float64
if err := row.Scan(&feeUSD); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if totalTxs >= l2db.maxTxs { if feeUSD < l2db.minFeeUSD {
return tracerr.New( return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)",
"The pool is at full capacity. More transactions are not accepted currently", feeUSD, l2db.minFeeUSD))
)
} }
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
// Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false)
if err != nil {
return err
}
valuesPart, err := meddler.Default.PlaceholdersString(tx, false)
if err != nil {
return err
}
values, err := meddler.Default.Values(tx, false)
if err != nil {
return err
}
q := fmt.Sprintf(
`INSERT INTO tx_pool (%s)
SELECT %s
WHERE (SELECT COUNT(*) FROM tx_pool WHERE state = $%v) < $%v;`,
namesPart, valuesPart,
len(values)+1, len(values)+2) //nolint:gomnd
values = append(values, common.PoolL2TxStatePending, l2db.maxTxs)
res, err := l2db.dbWrite.Exec(q, values...)
if err != nil {
return tracerr.Wrap(err)
}
rowsAffected, err := res.RowsAffected()
if err != nil {
return tracerr.Wrap(err)
}
if rowsAffected == 0 {
return tracerr.Wrap(errPoolFull)
}
return nil
} }
// selectPoolTxAPI select part of queries to get PoolL2TxRead // selectPoolTxAPI select part of queries to get PoolL2TxRead
@@ -78,7 +118,7 @@ func (l2db *L2DB) GetTxAPI(txID common.TxID) (*PoolTxAPI, error) {
defer l2db.apiConnCon.Release() defer l2db.apiConnCon.Release()
tx := new(PoolTxAPI) tx := new(PoolTxAPI)
return tx, tracerr.Wrap(meddler.QueryRow( return tx, tracerr.Wrap(meddler.QueryRow(
l2db.db, tx, l2db.dbRead, tx,
selectPoolTxAPI+"WHERE tx_id = $1;", selectPoolTxAPI+"WHERE tx_id = $1;",
txID, txID,
)) ))

View File

@@ -21,10 +21,12 @@ import (
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant // L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
// due to them being forged or invalid after a safety period // due to them being forged or invalid after a safety period
type L2DB struct { type L2DB struct {
db *sqlx.DB dbRead *sqlx.DB
dbWrite *sqlx.DB
safetyPeriod common.BatchNum safetyPeriod common.BatchNum
ttl time.Duration ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
@@ -32,17 +34,20 @@ type L2DB struct {
// To create it, it's needed db connection, safety period expressed in batches, // To create it, it's needed db connection, safety period expressed in batches,
// maxTxs that the DB should have and TTL (time to live) for pending txs. // maxTxs that the DB should have and TTL (time to live) for pending txs.
func NewL2DB( func NewL2DB(
db *sqlx.DB, dbRead, dbWrite *sqlx.DB,
safetyPeriod common.BatchNum, safetyPeriod common.BatchNum,
maxTxs uint32, maxTxs uint32,
minFeeUSD float64,
TTL time.Duration, TTL time.Duration,
apiConnCon *db.APIConnectionController, apiConnCon *db.APIConnectionController,
) *L2DB { ) *L2DB {
return &L2DB{ return &L2DB{
db: db, dbRead: dbRead,
dbWrite: dbWrite,
safetyPeriod: safetyPeriod, safetyPeriod: safetyPeriod,
ttl: TTL, ttl: TTL,
maxTxs: maxTxs, maxTxs: maxTxs,
minFeeUSD: minFeeUSD,
apiConnCon: apiConnCon, apiConnCon: apiConnCon,
} }
} }
@@ -50,12 +55,18 @@ func NewL2DB(
// DB returns a pointer to the L2DB.db. This method should be used only for // DB returns a pointer to the L2DB.db. This method should be used only for
// internal testing purposes. // internal testing purposes.
func (l2db *L2DB) DB() *sqlx.DB { func (l2db *L2DB) DB() *sqlx.DB {
return l2db.db return l2db.dbWrite
}
// MinFeeUSD returns the minimum fee in USD that is required to accept txs into
// the pool
func (l2db *L2DB) MinFeeUSD() float64 {
return l2db.minFeeUSD
} }
// AddAccountCreationAuth inserts an account creation authorization into the DB // AddAccountCreationAuth inserts an account creation authorization into the DB
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error { func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error {
_, err := l2db.db.Exec( _, err := l2db.dbWrite.Exec(
`INSERT INTO account_creation_auth (eth_addr, bjj, signature) `INSERT INTO account_creation_auth (eth_addr, bjj, signature)
VALUES ($1, $2, $3);`, VALUES ($1, $2, $3);`,
auth.EthAddr, auth.BJJ, auth.Signature, auth.EthAddr, auth.BJJ, auth.Signature,
@@ -67,30 +78,12 @@ func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error
func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) { func (l2db *L2DB) GetAccountCreationAuth(addr ethCommon.Address) (*common.AccountCreationAuth, error) {
auth := new(common.AccountCreationAuth) auth := new(common.AccountCreationAuth)
return auth, tracerr.Wrap(meddler.QueryRow( return auth, tracerr.Wrap(meddler.QueryRow(
l2db.db, auth, l2db.dbRead, auth,
"SELECT * FROM account_creation_auth WHERE eth_addr = $1;", "SELECT * FROM account_creation_auth WHERE eth_addr = $1;",
addr, addr,
)) ))
} }
// AddTx inserts a tx to the pool
func (l2db *L2DB) AddTx(tx *PoolL2TxWrite) error {
row := l2db.db.QueryRow(
"SELECT COUNT(*) FROM tx_pool WHERE state = $1;",
common.PoolL2TxStatePending,
)
var totalTxs uint32
if err := row.Scan(&totalTxs); err != nil {
return tracerr.Wrap(err)
}
if totalTxs >= l2db.maxTxs {
return tracerr.New(
"The pool is at full capacity. More transactions are not accepted currently",
)
}
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", tx))
}
// UpdateTxsInfo updates the parameter Info of the pool transactions // UpdateTxsInfo updates the parameter Info of the pool transactions
func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error { func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
if len(txs) == 0 { if len(txs) == 0 {
@@ -114,7 +107,7 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
WHERE tx_pool.tx_id = tx_update.id; WHERE tx_pool.tx_id = tx_update.id;
` `
if len(txUpdates) > 0 { if len(txUpdates) > 0 {
if _, err := sqlx.NamedExec(l2db.db, query, txUpdates); err != nil { if _, err := sqlx.NamedExec(l2db.dbWrite, query, txUpdates); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
} }
@@ -122,9 +115,8 @@ func (l2db *L2DB) UpdateTxsInfo(txs []common.PoolL2Tx) error {
return nil return nil
} }
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes, // NewPoolL2TxWriteFromPoolL2Tx creates a new PoolL2TxWrite from a PoolL2Tx
// but in production txs will only be inserted through the API func NewPoolL2TxWriteFromPoolL2Tx(tx *common.PoolL2Tx) *PoolL2TxWrite {
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
// transform tx from *common.PoolL2Tx to PoolL2TxWrite // transform tx from *common.PoolL2Tx to PoolL2TxWrite
insertTx := &PoolL2TxWrite{ insertTx := &PoolL2TxWrite{
TxID: tx.TxID, TxID: tx.TxID,
@@ -166,8 +158,15 @@ func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
f := new(big.Float).SetInt(tx.Amount) f := new(big.Float).SetInt(tx.Amount)
amountF, _ := f.Float64() amountF, _ := f.Float64()
insertTx.AmountFloat = amountF insertTx.AmountFloat = amountF
return insertTx
}
// AddTxTest inserts a tx into the L2DB. This is useful for test purposes,
// but in production txs will only be inserted through the API
func (l2db *L2DB) AddTxTest(tx *common.PoolL2Tx) error {
insertTx := NewPoolL2TxWriteFromPoolL2Tx(tx)
// insert tx // insert tx
return tracerr.Wrap(meddler.Insert(l2db.db, "tx_pool", insertTx)) return tracerr.Wrap(meddler.Insert(l2db.dbWrite, "tx_pool", insertTx))
} }
// selectPoolTxCommon select part of queries to get common.PoolL2Tx // selectPoolTxCommon select part of queries to get common.PoolL2Tx
@@ -176,14 +175,15 @@ tx_pool.to_bjj, tx_pool.token_id, tx_pool.amount, tx_pool.fee, tx_pool.nonce,
tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx, tx_pool.state, tx_pool.info, tx_pool.signature, tx_pool.timestamp, rq_from_idx,
rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount, rq_to_idx, tx_pool.rq_to_eth_addr, tx_pool.rq_to_bjj, tx_pool.rq_token_id, tx_pool.rq_amount,
tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type, tx_pool.rq_fee, tx_pool.rq_nonce, tx_pool.tx_type,
fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f AS fee_usd, token.usd_update (fee_percentage(tx_pool.fee::NUMERIC) * token.usd * tx_pool.amount_f) /
(10.0 ^ token.decimals::NUMERIC) AS fee_usd, token.usd_update
FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id ` FROM tx_pool INNER JOIN token ON tx_pool.token_id = token.token_id `
// GetTx return the specified Tx in common.PoolL2Tx format // GetTx return the specified Tx in common.PoolL2Tx format
func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) { func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
tx := new(common.PoolL2Tx) tx := new(common.PoolL2Tx)
return tx, tracerr.Wrap(meddler.QueryRow( return tx, tracerr.Wrap(meddler.QueryRow(
l2db.db, tx, l2db.dbRead, tx,
selectPoolTxCommon+"WHERE tx_id = $1;", selectPoolTxCommon+"WHERE tx_id = $1;",
txID, txID,
)) ))
@@ -193,7 +193,7 @@ func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) { func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
var txs []*common.PoolL2Tx var txs []*common.PoolL2Tx
err := meddler.QueryAll( err := meddler.QueryAll(
l2db.db, &txs, l2db.dbRead, &txs,
selectPoolTxCommon+"WHERE state = $1", selectPoolTxCommon+"WHERE state = $1",
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
) )
@@ -218,8 +218,8 @@ func (l2db *L2DB) StartForging(txIDs []common.TxID, batchNum common.BatchNum) er
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.db.Rebind(query) query = l2db.dbWrite.Rebind(query)
_, err = l2db.db.Exec(query, args...) _, err = l2db.dbWrite.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -241,8 +241,8 @@ func (l2db *L2DB) DoneForging(txIDs []common.TxID, batchNum common.BatchNum) err
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.db.Rebind(query) query = l2db.dbWrite.Rebind(query)
_, err = l2db.db.Exec(query, args...) _, err = l2db.dbWrite.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -263,8 +263,8 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
query = l2db.db.Rebind(query) query = l2db.dbWrite.Rebind(query)
_, err = l2db.db.Exec(query, args...) _, err = l2db.dbWrite.Exec(query, args...)
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -272,7 +272,7 @@ func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID, batchNum common.BatchNum) e
// of unique FromIdx // of unique FromIdx
func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) { func (l2db *L2DB) GetPendingUniqueFromIdxs() ([]common.Idx, error) {
var idxs []common.Idx var idxs []common.Idx
rows, err := l2db.db.Query(`SELECT DISTINCT from_idx FROM tx_pool rows, err := l2db.dbRead.Query(`SELECT DISTINCT from_idx FROM tx_pool
WHERE state = $1;`, common.PoolL2TxStatePending) WHERE state = $1;`, common.PoolL2TxStatePending)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -313,7 +313,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
// named query which works with slices, and doens't handle an extra // named query which works with slices, and doens't handle an extra
// individual argument. // individual argument.
query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum) query := fmt.Sprintf(invalidateOldNoncesQuery, batchNum)
if _, err := sqlx.NamedExec(l2db.db, query, updatedAccounts); err != nil { if _, err := sqlx.NamedExec(l2db.dbWrite, query, updatedAccounts); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -322,7 +322,7 @@ func (l2db *L2DB) InvalidateOldNonces(updatedAccounts []common.IdxNonce, batchNu
// Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg. // Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchain reorg.
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending // The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error { func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
_, err := l2db.db.Exec( _, err := l2db.dbWrite.Exec(
`UPDATE tx_pool SET batch_num = NULL, state = $1 `UPDATE tx_pool SET batch_num = NULL, state = $1
WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`, WHERE (state = $2 OR state = $3 OR state = $4) AND batch_num > $5`,
common.PoolL2TxStatePending, common.PoolL2TxStatePending,
@@ -338,7 +338,7 @@ func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
// it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded // it also deletes pending txs that have been in the L2DB for longer than the ttl if maxTxs has been exceeded
func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) { func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
now := time.Now().UTC().Unix() now := time.Now().UTC().Unix()
_, err = l2db.db.Exec( _, err = l2db.dbWrite.Exec(
`DELETE FROM tx_pool WHERE ( `DELETE FROM tx_pool WHERE (
batch_num < $1 AND (state = $2 OR state = $3) batch_num < $1 AND (state = $2 OR state = $3)
) OR ( ) OR (
@@ -354,3 +354,14 @@ func (l2db *L2DB) Purge(currentBatchNum common.BatchNum) (err error) {
) )
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// PurgeByExternalDelete deletes all pending transactions marked with true in
// the `external_delete` column. An external process can set this column to
// true to instruct the coordinator to delete the tx when possible.
func (l2db *L2DB) PurgeByExternalDelete() error {
_, err := l2db.dbWrite.Exec(
`DELETE from tx_pool WHERE (external_delete = true AND state = $1);`,
common.PoolL2TxStatePending,
)
return tracerr.Wrap(err)
}

View File

@@ -1,8 +1,8 @@
package l2db package l2db
import ( import (
"math" "database/sql"
"math/big" "fmt"
"os" "os"
"testing" "testing"
"time" "time"
@@ -20,12 +20,14 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var decimals = uint64(3)
var tokenValue = 1.0 // The price update gives a value of 1.0 USD to the token
var l2DB *L2DB var l2DB *L2DB
var l2DBWithACC *L2DB var l2DBWithACC *L2DB
var historyDB *historydb.HistoryDB var historyDB *historydb.HistoryDB
var tc *til.Context var tc *til.Context
var tokens map[common.TokenID]historydb.TokenWithUSD var tokens map[common.TokenID]historydb.TokenWithUSD
var tokensValue map[common.TokenID]float64
var accs map[common.Idx]common.Account var accs map[common.Idx]common.Account
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@@ -35,11 +37,11 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
l2DB = NewL2DB(db, 10, 1000, 24*time.Hour, nil) l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second) apiConnCon := dbUtils.NewAPICnnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, 10, 1000, 24*time.Hour, apiConnCon) l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, nil) historyDB = historydb.NewHistoryDB(db, db, nil)
// Run tests // Run tests
result := m.Run() result := m.Run()
// Close DB // Close DB
@@ -58,10 +60,10 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
AddToken(1) AddToken(1)
AddToken(2) AddToken(2)
CreateAccountDeposit(1) A: 2000 CreateAccountDeposit(1) A: 20000
CreateAccountDeposit(2) A: 2000 CreateAccountDeposit(2) A: 20000
CreateAccountDeposit(1) B: 1000 CreateAccountDeposit(1) B: 10000
CreateAccountDeposit(2) B: 1000 CreateAccountDeposit(2) B: 10000
> batchL1 > batchL1
> batchL1 > batchL1
> block > block
@@ -82,15 +84,23 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for i := range blocks {
block := &blocks[i]
for j := range block.Rollup.AddedTokens {
token := &block.Rollup.AddedTokens[j]
token.Name = fmt.Sprintf("Token %d", token.TokenID)
token.Symbol = fmt.Sprintf("TK%d", token.TokenID)
token.Decimals = decimals
}
}
tokens = make(map[common.TokenID]historydb.TokenWithUSD) tokens = make(map[common.TokenID]historydb.TokenWithUSD)
tokensValue = make(map[common.TokenID]float64) // tokensValue = make(map[common.TokenID]float64)
accs = make(map[common.Idx]common.Account) accs = make(map[common.Idx]common.Account)
value := 5 * 5.389329
now := time.Now().UTC() now := time.Now().UTC()
// Add all blocks except for the last one // Add all blocks except for the last one
for i := range blocks[:len(blocks)-1] { for i := range blocks[:len(blocks)-1] {
err = historyDB.AddBlockSCData(&blocks[i]) if err := historyDB.AddBlockSCData(&blocks[i]); err != nil {
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
for _, batch := range blocks[i].Rollup.Batches { for _, batch := range blocks[i].Rollup.Batches {
@@ -106,39 +116,38 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
Name: token.Name, Name: token.Name,
Symbol: token.Symbol, Symbol: token.Symbol,
Decimals: token.Decimals, Decimals: token.Decimals,
USD: &tokenValue,
USDUpdate: &now,
} }
tokensValue[token.TokenID] = value / math.Pow(10, float64(token.Decimals))
readToken.USDUpdate = &now
readToken.USD = &value
tokens[token.TokenID] = readToken tokens[token.TokenID] = readToken
} // Set value to the tokens
// Set value to the tokens (tokens have no symbol) err := historyDB.UpdateTokenValue(readToken.Symbol, *readToken.USD)
tokenSymbol := "" if err != nil {
err := historyDB.UpdateTokenValue(tokenSymbol, value) return tracerr.Wrap(err)
if err != nil { }
return tracerr.Wrap(err)
} }
} }
return nil return nil
} }
func generatePoolL2Txs() ([]common.PoolL2Tx, error) { func generatePoolL2Txs() ([]common.PoolL2Tx, error) {
// Fee = 126 corresponds to ~10%
setPool := ` setPool := `
Type: PoolL2 Type: PoolL2
PoolTransfer(1) A-B: 6 (4) PoolTransfer(1) A-B: 6000 (126)
PoolTransfer(2) A-B: 3 (1) PoolTransfer(2) A-B: 3000 (126)
PoolTransfer(1) B-A: 5 (2) PoolTransfer(1) B-A: 5000 (126)
PoolTransfer(2) B-A: 10 (3) PoolTransfer(2) B-A: 10000 (126)
PoolTransfer(1) A-B: 7 (2) PoolTransfer(1) A-B: 7000 (126)
PoolTransfer(2) A-B: 2 (1) PoolTransfer(2) A-B: 2000 (126)
PoolTransfer(1) B-A: 8 (2) PoolTransfer(1) B-A: 8000 (126)
PoolTransfer(2) B-A: 1 (1) PoolTransfer(2) B-A: 1000 (126)
PoolTransfer(1) A-B: 3 (1) PoolTransfer(1) A-B: 3000 (126)
PoolTransferToEthAddr(2) B-A: 5 (2) PoolTransferToEthAddr(2) B-A: 5000 (126)
PoolTransferToBJJ(2) B-A: 5 (2) PoolTransferToBJJ(2) B-A: 5000 (126)
PoolExit(1) A: 5 (2) PoolExit(1) A: 5000 (126)
PoolExit(2) B: 3 (1) PoolExit(2) B: 3000 (126)
` `
poolL2Txs, err := tc.GeneratePoolL2Txs(setPool) poolL2Txs, err := tc.GeneratePoolL2Txs(setPool)
if err != nil { if err != nil {
@@ -153,25 +162,74 @@ func TestAddTxTest(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
assert.NoError(t, err) require.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx) assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone() nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone) assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset) assert.Equal(t, 0, offset)
} }
} }
func TestAddTxAPI(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
oldMaxTxs := l2DBWithACC.maxTxs
// set max number of pending txs that can be kept in the pool to 5
l2DBWithACC.maxTxs = 5
poolL2Txs, err := generatePoolL2Txs()
txs := make([]*PoolL2TxWrite, len(poolL2Txs))
for i := range poolL2Txs {
txs[i] = NewPoolL2TxWriteFromPoolL2Tx(&poolL2Txs[i])
}
require.NoError(t, err)
require.GreaterOrEqual(t, len(poolL2Txs), 8)
for i := range txs[:5] {
err := l2DBWithACC.AddTxAPI(txs[i])
require.NoError(t, err)
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
require.NoError(t, err)
assertTx(t, &poolL2Txs[i], fetchedTx)
nameZone, offset := fetchedTx.Timestamp.Zone()
assert.Equal(t, "UTC", nameZone)
assert.Equal(t, 0, offset)
}
err = l2DBWithACC.AddTxAPI(txs[5])
assert.Equal(t, errPoolFull, tracerr.Unwrap(err))
// reset maxTxs to original value
l2DBWithACC.maxTxs = oldMaxTxs
// set minFeeUSD to a high value than the tx feeUSD to test the error
// of inserting a tx with lower than min fee
oldMinFeeUSD := l2DBWithACC.minFeeUSD
tx := txs[5]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount, decimals, tokenValue)
// set minFeeUSD higher than the tx fee to trigger the error
l2DBWithACC.minFeeUSD = feeAmountUSD + 1
err = l2DBWithACC.AddTxAPI(tx)
require.Error(t, err)
assert.Regexp(t, "tx.feeUSD (.*) < minFeeUSD (.*)", err.Error())
// reset minFeeUSD to original value
l2DBWithACC.minFeeUSD = oldMinFeeUSD
}
func TestUpdateTxsInfo(t *testing.T) { func TestUpdateTxsInfo(t *testing.T) {
err := prepareHistoryDB(historyDB) err := prepareHistoryDB(historyDB)
if err != nil { if err != nil {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
require.NoError(t, err) require.NoError(t, err)
@@ -185,7 +243,7 @@ func TestUpdateTxsInfo(t *testing.T) {
for i := range poolL2Txs { for i := range poolL2Txs {
fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID) fetchedTx, err := l2DB.GetTx(poolL2Txs[i].TxID)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "test", fetchedTx.Info) assert.Equal(t, "test", fetchedTx.Info)
} }
} }
@@ -203,9 +261,8 @@ func assertTx(t *testing.T, expected, actual *common.PoolL2Tx) {
assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix()) assert.Less(t, token.USDUpdate.Unix()-3, actual.AbsoluteFeeUpdate.Unix())
expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate expected.AbsoluteFeeUpdate = actual.AbsoluteFeeUpdate
// Set expected fee // Set expected fee
f := new(big.Float).SetInt(expected.Amount) amountUSD := common.TokensToUSD(expected.Amount, token.Decimals, *token.USD)
amountF, _ := f.Float64() expected.AbsoluteFee = amountUSD * expected.Fee.Percentage()
expected.AbsoluteFee = *token.USD * amountF * expected.Fee.Percentage()
test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee) test.AssertUSD(t, &expected.AbsoluteFee, &actual.AbsoluteFee)
} }
assert.Equal(t, expected, actual) assert.Equal(t, expected, actual)
@@ -230,19 +287,28 @@ func TestGetPending(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
var pendingTxs []*common.PoolL2Tx var pendingTxs []*common.PoolL2Tx
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
pendingTxs = append(pendingTxs, &poolL2Txs[i]) pendingTxs = append(pendingTxs, &poolL2Txs[i])
} }
fetchedTxs, err := l2DB.GetPendingTxs() fetchedTxs, err := l2DB.GetPendingTxs()
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, len(pendingTxs), len(fetchedTxs)) assert.Equal(t, len(pendingTxs), len(fetchedTxs))
for i := range fetchedTxs { for i := range fetchedTxs {
assertTx(t, pendingTxs[i], &fetchedTxs[i]) assertTx(t, pendingTxs[i], &fetchedTxs[i])
} }
// Check AbsoluteFee amount
for i := range fetchedTxs {
tx := &fetchedTxs[i]
feeAmount, err := common.CalcFeeAmount(tx.Amount, tx.Fee)
require.NoError(t, err)
feeAmountUSD := common.TokensToUSD(feeAmount,
tokens[tx.TokenID].Decimals, *tokens[tx.TokenID].USD)
assert.InEpsilon(t, feeAmountUSD, tx.AbsoluteFee, 0.01)
}
} }
func TestStartForging(t *testing.T) { func TestStartForging(t *testing.T) {
@@ -253,13 +319,13 @@ func TestStartForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -267,11 +333,11 @@ func TestStartForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range startForgingTxIDs { for _, id := range startForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForging, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -285,13 +351,13 @@ func TestDoneForging(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
var startForgingTxIDs []common.TxID var startForgingTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
} }
@@ -299,7 +365,7 @@ func TestDoneForging(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum) err = l2DB.StartForging(startForgingTxIDs, fakeBatchNum)
assert.NoError(t, err) require.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -311,12 +377,12 @@ func TestDoneForging(t *testing.T) {
} }
// Done forging txs // Done forging txs
err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, fakeBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range doneForgingTxIDs { for _, id := range doneForgingTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateForged, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -330,13 +396,13 @@ func TestInvalidate(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
var invalidTxIDs []common.TxID var invalidTxIDs []common.TxID
randomizer := 0 randomizer := 0
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 { if poolL2Txs[i].State != common.PoolL2TxStateInvalid && randomizer%2 == 0 {
randomizer++ randomizer++
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
@@ -344,11 +410,11 @@ func TestInvalidate(t *testing.T) {
} }
// Invalidate txs // Invalidate txs
err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, fakeBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State) assert.Equal(t, common.PoolL2TxStateInvalid, fetchedTx.State)
assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum) assert.Equal(t, &fakeBatchNum, fetchedTx.BatchNum)
} }
@@ -362,7 +428,7 @@ func TestInvalidateOldNonces(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
// Update Accounts currentNonce // Update Accounts currentNonce
var updateAccounts []common.IdxNonce var updateAccounts []common.IdxNonce
var currentNonce = common.Nonce(1) var currentNonce = common.Nonce(1)
@@ -379,13 +445,13 @@ func TestInvalidateOldNonces(t *testing.T) {
invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID) invalidTxIDs = append(invalidTxIDs, poolL2Txs[i].TxID)
} }
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
} }
// sanity check // sanity check
require.Greater(t, len(invalidTxIDs), 0) require.Greater(t, len(invalidTxIDs), 0)
err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum) err = l2DB.InvalidateOldNonces(updateAccounts, fakeBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Fetch txs and check that they've been updated correctly // Fetch txs and check that they've been updated correctly
for _, id := range invalidTxIDs { for _, id := range invalidTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
@@ -407,7 +473,7 @@ func TestReorg(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -418,7 +484,7 @@ func TestReorg(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -430,7 +496,7 @@ func TestReorg(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
assert.NoError(t, err) require.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -455,22 +521,22 @@ func TestReorg(t *testing.T) {
// Invalidate txs BEFORE reorgBatch --> nonReorg // Invalidate txs BEFORE reorgBatch --> nonReorg
err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch) err = l2DB.InvalidateTxs(invalidTxIDs, lastValidBatch)
assert.NoError(t, err) require.NoError(t, err)
// Done forging txs in reorgBatch --> Reorg // Done forging txs in reorgBatch --> Reorg
err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch) err = l2DB.DoneForging(doneForgingTxIDs, reorgBatch)
assert.NoError(t, err) require.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
assert.NoError(t, err) require.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -487,7 +553,7 @@ func TestReorg2(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
reorgedTxIDs := []common.TxID{} reorgedTxIDs := []common.TxID{}
nonReorgedTxIDs := []common.TxID{} nonReorgedTxIDs := []common.TxID{}
@@ -498,7 +564,7 @@ func TestReorg2(t *testing.T) {
// Add txs to DB // Add txs to DB
for i := range poolL2Txs { for i := range poolL2Txs {
err := l2DB.AddTxTest(&poolL2Txs[i]) err := l2DB.AddTxTest(&poolL2Txs[i])
assert.NoError(t, err) require.NoError(t, err)
if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 { if poolL2Txs[i].State == common.PoolL2TxStatePending && randomizer%2 == 0 {
startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID) startForgingTxIDs = append(startForgingTxIDs, poolL2Txs[i].TxID)
allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID) allTxRandomize = append(allTxRandomize, poolL2Txs[i].TxID)
@@ -510,7 +576,7 @@ func TestReorg2(t *testing.T) {
} }
// Start forging txs // Start forging txs
err = l2DB.StartForging(startForgingTxIDs, lastValidBatch) err = l2DB.StartForging(startForgingTxIDs, lastValidBatch)
assert.NoError(t, err) require.NoError(t, err)
var doneForgingTxIDs []common.TxID var doneForgingTxIDs []common.TxID
randomizer = 0 randomizer = 0
@@ -532,22 +598,22 @@ func TestReorg2(t *testing.T) {
} }
// Done forging txs BEFORE reorgBatch --> nonReorg // Done forging txs BEFORE reorgBatch --> nonReorg
err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch) err = l2DB.DoneForging(doneForgingTxIDs, lastValidBatch)
assert.NoError(t, err) require.NoError(t, err)
// Invalidate txs in reorgBatch --> Reorg // Invalidate txs in reorgBatch --> Reorg
err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch) err = l2DB.InvalidateTxs(invalidTxIDs, reorgBatch)
assert.NoError(t, err) require.NoError(t, err)
err = l2DB.Reorg(lastValidBatch) err = l2DB.Reorg(lastValidBatch)
assert.NoError(t, err) require.NoError(t, err)
for _, id := range reorgedTxIDs { for _, id := range reorgedTxIDs {
tx, err := l2DBWithACC.GetTxAPI(id) tx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Nil(t, tx.BatchNum) assert.Nil(t, tx.BatchNum)
assert.Equal(t, common.PoolL2TxStatePending, tx.State) assert.Equal(t, common.PoolL2TxStatePending, tx.State)
} }
for _, id := range nonReorgedTxIDs { for _, id := range nonReorgedTxIDs {
fetchedTx, err := l2DBWithACC.GetTxAPI(id) fetchedTx, err := l2DBWithACC.GetTxAPI(id)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum) assert.Equal(t, lastValidBatch, *fetchedTx.BatchNum)
} }
} }
@@ -563,7 +629,7 @@ func TestPurge(t *testing.T) {
var poolL2Tx []common.PoolL2Tx var poolL2Tx []common.PoolL2Tx
for i := 0; i < generateTx; i++ { for i := 0; i < generateTx; i++ {
poolL2TxAux, err := generatePoolL2Txs() poolL2TxAux, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
poolL2Tx = append(poolL2Tx, poolL2TxAux...) poolL2Tx = append(poolL2Tx, poolL2TxAux...)
} }
@@ -590,39 +656,39 @@ func TestPurge(t *testing.T) {
deletedIDs = append(deletedIDs, poolL2Tx[i].TxID) deletedIDs = append(deletedIDs, poolL2Tx[i].TxID)
} }
err := l2DB.AddTxTest(&tx) err := l2DB.AddTxTest(&tx)
assert.NoError(t, err) require.NoError(t, err)
} }
// Set batchNum keeped txs // Set batchNum keeped txs
for i := range keepedIDs { for i := range keepedIDs {
_, err = l2DB.db.Exec( _, err = l2DB.dbWrite.Exec(
"UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;", "UPDATE tx_pool SET batch_num = $1 WHERE tx_id = $2;",
safeBatchNum, keepedIDs[i], safeBatchNum, keepedIDs[i],
) )
assert.NoError(t, err) require.NoError(t, err)
} }
// Start forging txs and set batchNum // Start forging txs and set batchNum
err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.StartForging(doneForgingTxIDs, toDeleteBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Done forging txs and set batchNum // Done forging txs and set batchNum
err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum) err = l2DB.DoneForging(doneForgingTxIDs, toDeleteBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Invalidate txs and set batchNum // Invalidate txs and set batchNum
err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum) err = l2DB.InvalidateTxs(invalidTxIDs, toDeleteBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Update timestamp of afterTTL txs // Update timestamp of afterTTL txs
deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0) deleteTimestamp := time.Unix(time.Now().UTC().Unix()-int64(l2DB.ttl.Seconds()+float64(4*time.Second)), 0)
for _, id := range afterTTLIDs { for _, id := range afterTTLIDs {
// Set timestamp // Set timestamp
_, err = l2DB.db.Exec( _, err = l2DB.dbWrite.Exec(
"UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;", "UPDATE tx_pool SET timestamp = $1, state = $2 WHERE tx_id = $3;",
deleteTimestamp, common.PoolL2TxStatePending, id, deleteTimestamp, common.PoolL2TxStatePending, id,
) )
assert.NoError(t, err) require.NoError(t, err)
} }
// Purge txs // Purge txs
err = l2DB.Purge(safeBatchNum) err = l2DB.Purge(safeBatchNum)
assert.NoError(t, err) require.NoError(t, err)
// Check results // Check results
for _, id := range deletedIDs { for _, id := range deletedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
@@ -630,7 +696,7 @@ func TestPurge(t *testing.T) {
} }
for _, id := range keepedIDs { for _, id := range keepedIDs {
_, err := l2DB.GetTx(id) _, err := l2DB.GetTx(id)
assert.NoError(t, err) require.NoError(t, err)
} }
} }
@@ -644,10 +710,10 @@ func TestAuth(t *testing.T) {
for i := 0; i < len(auths); i++ { for i := 0; i < len(auths); i++ {
// Add to the DB // Add to the DB
err := l2DB.AddAccountCreationAuth(auths[i]) err := l2DB.AddAccountCreationAuth(auths[i])
assert.NoError(t, err) require.NoError(t, err)
// Fetch from DB // Fetch from DB
auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr) auth, err := l2DB.GetAccountCreationAuth(auths[i].EthAddr)
assert.NoError(t, err) require.NoError(t, err)
// Check fetched vs generated // Check fetched vs generated
assert.Equal(t, auths[i].EthAddr, auth.EthAddr) assert.Equal(t, auths[i].EthAddr, auth.EthAddr)
assert.Equal(t, auths[i].BJJ, auth.BJJ) assert.Equal(t, auths[i].BJJ, auth.BJJ)
@@ -665,7 +731,7 @@ func TestAddGet(t *testing.T) {
log.Error("Error prepare historyDB", err) log.Error("Error prepare historyDB", err)
} }
poolL2Txs, err := generatePoolL2Txs() poolL2Txs, err := generatePoolL2Txs()
assert.NoError(t, err) require.NoError(t, err)
// We will work with only 3 txs // We will work with only 3 txs
require.GreaterOrEqual(t, len(poolL2Txs), 3) require.GreaterOrEqual(t, len(poolL2Txs), 3)
@@ -701,3 +767,56 @@ func TestAddGet(t *testing.T) {
assert.Equal(t, txs[i], *dbTx) assert.Equal(t, txs[i], *dbTx)
} }
} }
func TestPurgeByExternalDelete(t *testing.T) {
err := prepareHistoryDB(historyDB)
if err != nil {
log.Error("Error prepare historyDB", err)
}
txs, err := generatePoolL2Txs()
require.NoError(t, err)
// We will work with 8 txs
require.GreaterOrEqual(t, len(txs), 8)
txs = txs[:8]
for i := range txs {
require.NoError(t, l2DB.AddTxTest(&txs[i]))
}
// We will recreate this scenario:
// tx index, status , external_delete
// 0 , pending, false
// 1 , pending, false
// 2 , pending, true // will be deleted
// 3 , pending, true // will be deleted
// 4 , fging , false
// 5 , fging , false
// 6 , fging , true
// 7 , fging , true
require.NoError(t, l2DB.StartForging(
[]common.TxID{txs[4].TxID, txs[5].TxID, txs[6].TxID, txs[7].TxID},
1))
_, err = l2DB.dbWrite.Exec(
`UPDATE tx_pool SET external_delete = true WHERE
tx_id IN ($1, $2, $3, $4)
;`,
txs[2].TxID, txs[3].TxID, txs[6].TxID, txs[7].TxID,
)
require.NoError(t, err)
require.NoError(t, l2DB.PurgeByExternalDelete())
// Query txs that are have been not deleted
for _, i := range []int{0, 1, 4, 5, 6, 7} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.NoError(t, err)
}
// Query txs that have been deleted
for _, i := range []int{2, 3} {
txID := txs[i].TxID
_, err := l2DB.GetTx(txID)
require.Equal(t, sql.ErrNoRows, tracerr.Unwrap(err))
}
}

View File

@@ -34,6 +34,7 @@ type PoolL2TxWrite struct {
RqFee *common.FeeSelector `meddler:"rq_fee"` RqFee *common.FeeSelector `meddler:"rq_fee"`
RqNonce *common.Nonce `meddler:"rq_nonce"` RqNonce *common.Nonce `meddler:"rq_nonce"`
Type common.TxType `meddler:"tx_type"` Type common.TxType `meddler:"tx_type"`
ClientIP string `meddler:"client_ip"`
} }
// PoolTxAPI represents a L2 Tx pool with extra metadata used by the API // PoolTxAPI represents a L2 Tx pool with extra metadata used by the API
@@ -94,7 +95,6 @@ func (tx PoolTxAPI) MarshalJSON() ([]byte, error) {
"info": tx.Info, "info": tx.Info,
"signature": tx.Signature, "signature": tx.Signature,
"timestamp": tx.Timestamp, "timestamp": tx.Timestamp,
"batchNum": tx.BatchNum,
"requestFromAccountIndex": tx.RqFromIdx, "requestFromAccountIndex": tx.RqFromIdx,
"requestToAccountIndex": tx.RqToIdx, "requestToAccountIndex": tx.RqToIdx,
"requestToHezEthereumAddress": tx.RqToEthAddr, "requestToHezEthereumAddress": tx.RqToEthAddr,

View File

@@ -47,7 +47,7 @@ CREATE TABLE token (
name VARCHAR(20) NOT NULL, name VARCHAR(20) NOT NULL,
symbol VARCHAR(10) NOT NULL, symbol VARCHAR(10) NOT NULL,
decimals INT NOT NULL, decimals INT NOT NULL,
usd NUMERIC, usd NUMERIC, -- value of a normalized token (1 token = 10^decimals units)
usd_update TIMESTAMP WITHOUT TIME ZONE usd_update TIMESTAMP WITHOUT TIME ZONE
); );
@@ -100,6 +100,15 @@ CREATE TABLE account (
eth_addr BYTEA NOT NULL eth_addr BYTEA NOT NULL
); );
CREATE TABLE account_update (
item_id SERIAL,
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
idx BIGINT NOT NULL REFERENCES account (idx) ON DELETE CASCADE,
nonce BIGINT NOT NULL,
balance BYTEA NOT NULL
);
CREATE TABLE exit_tree ( CREATE TABLE exit_tree (
item_id SERIAL PRIMARY KEY, item_id SERIAL PRIMARY KEY,
batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE, batch_num BIGINT REFERENCES batch (batch_num) ON DELETE CASCADE,
@@ -618,7 +627,9 @@ CREATE TABLE tx_pool (
rq_amount BYTEA, rq_amount BYTEA,
rq_fee SMALLINT, rq_fee SMALLINT,
rq_nonce BIGINT, rq_nonce BIGINT,
tx_type VARCHAR(40) NOT NULL tx_type VARCHAR(40) NOT NULL,
client_ip VARCHAR,
external_delete BOOLEAN NOT NULL DEFAULT false
); );
-- +migrate StatementBegin -- +migrate StatementBegin
@@ -650,35 +661,47 @@ CREATE TABLE account_creation_auth (
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now()) timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT timezone('utc', now())
); );
CREATE TABLE node_info (
item_id SERIAL PRIMARY KEY,
state BYTEA, -- object returned by GET /state
config BYTEA, -- Node config
-- max_pool_txs BIGINT, -- L2DB config
-- min_fee NUMERIC, -- L2DB config
constants BYTEA -- info of the network that is constant
);
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
-- +migrate Down -- +migrate Down
-- drop triggers -- triggers
DROP TRIGGER trigger_token_usd_update ON token; DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
DROP TRIGGER trigger_set_tx ON tx; DROP TRIGGER IF EXISTS trigger_set_tx ON tx;
DROP TRIGGER trigger_forge_l1_txs ON batch; DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch;
DROP TRIGGER trigger_set_pool_tx ON tx_pool; DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool;
-- drop functions -- functions
DROP FUNCTION hez_idx; DROP FUNCTION IF EXISTS hez_idx;
DROP FUNCTION set_token_usd_update; DROP FUNCTION IF EXISTS set_token_usd_update;
DROP FUNCTION fee_percentage; DROP FUNCTION IF EXISTS fee_percentage;
DROP FUNCTION set_tx; DROP FUNCTION IF EXISTS set_tx;
DROP FUNCTION forge_l1_user_txs; DROP FUNCTION IF EXISTS forge_l1_user_txs;
DROP FUNCTION set_pool_tx; DROP FUNCTION IF EXISTS set_pool_tx;
-- drop tables -- drop tables IF EXISTS
DROP TABLE account_creation_auth; DROP TABLE IF EXISTS node_info;
DROP TABLE tx_pool; DROP TABLE IF EXISTS account_creation_auth;
DROP TABLE auction_vars; DROP TABLE IF EXISTS tx_pool;
DROP TABLE rollup_vars; DROP TABLE IF EXISTS auction_vars;
DROP TABLE escape_hatch_withdrawal; DROP TABLE IF EXISTS rollup_vars;
DROP TABLE bucket_update; DROP TABLE IF EXISTS escape_hatch_withdrawal;
DROP TABLE token_exchange; DROP TABLE IF EXISTS bucket_update;
DROP TABLE wdelayer_vars; DROP TABLE IF EXISTS token_exchange;
DROP TABLE tx; DROP TABLE IF EXISTS wdelayer_vars;
DROP TABLE exit_tree; DROP TABLE IF EXISTS tx;
DROP TABLE account; DROP TABLE IF EXISTS exit_tree;
DROP TABLE token; DROP TABLE IF EXISTS account_update;
DROP TABLE bid; DROP TABLE IF EXISTS account;
DROP TABLE batch; DROP TABLE IF EXISTS token;
DROP TABLE coordinator; DROP TABLE IF EXISTS bid;
DROP TABLE block; DROP TABLE IF EXISTS batch;
-- drop sequences DROP TABLE IF EXISTS coordinator;
DROP SEQUENCE tx_item_id; DROP TABLE IF EXISTS block;
-- sequences
DROP SEQUENCE IF EXISTS tx_item_id;

View File

@@ -498,11 +498,17 @@ func NewLocalStateDB(cfg Config, synchronizerDB *StateDB) (*LocalStateDB, error)
}, nil }, nil
} }
// CheckpointExists returns true if the checkpoint exists
func (l *LocalStateDB) CheckpointExists(batchNum common.BatchNum) (bool, error) {
return l.db.CheckpointExists(batchNum)
}
// Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it // Reset performs a reset in the LocaStateDB. If fromSynchronizer is true, it
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. // gets the state from LocalStateDB.synchronizerStateDB for the given batchNum.
// If fromSynchronizer is false, get the state from LocalStateDB checkpoints. // If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error { func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
if fromSynchronizer { if fromSynchronizer {
log.Debugw("Making StateDB ResetFromSynchronizer", "batch", batchNum, "type", l.cfg.Type)
if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil { if err := l.db.ResetFromSynchronizer(batchNum, l.synchronizerStateDB.db); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }

View File

@@ -13,9 +13,9 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/marusama/semaphore/v2"
migrate "github.com/rubenv/sql-migrate" migrate "github.com/rubenv/sql-migrate"
"github.com/russross/meddler" "github.com/russross/meddler"
"golang.org/x/sync/semaphore"
) )
var migrations *migrate.PackrMigrationSource var migrations *migrate.PackrMigrationSource
@@ -89,14 +89,14 @@ func InitSQLDB(port int, host, user, password, name string) (*sqlx.DB, error) {
// APIConnectionController is used to limit the SQL open connections used by the API // APIConnectionController is used to limit the SQL open connections used by the API
type APIConnectionController struct { type APIConnectionController struct {
smphr semaphore.Semaphore smphr *semaphore.Weighted
timeout time.Duration timeout time.Duration
} }
// NewAPICnnectionController initialize APIConnectionController // NewAPICnnectionController initialize APIConnectionController
func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController { func NewAPICnnectionController(maxConnections int, timeout time.Duration) *APIConnectionController {
return &APIConnectionController{ return &APIConnectionController{
smphr: semaphore.New(maxConnections), smphr: semaphore.NewWeighted(int64(maxConnections)),
timeout: timeout, timeout: timeout,
} }
} }

View File

@@ -324,5 +324,6 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction,
Value: tx.Value(), Value: tx.Value(),
Data: tx.Data(), Data: tx.Data(),
} }
return c.client.CallContract(ctx, msg, blockNum) result, err := c.client.CallContract(ctx, msg, blockNum)
return result, tracerr.Wrap(err)
} }

View File

@@ -316,7 +316,7 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
} }
consts, err := c.RollupConstants() consts, err := c.RollupConstants()
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(fmt.Errorf("RollupConstants at %v: %w", address, err))
} }
c.consts = consts c.consts = consts
return c, nil return c, nil
@@ -327,7 +327,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
if auth == nil { if auth == nil {
auth, err = c.client.NewAuth() auth, err = c.client.NewAuth()
if err != nil { if err != nil {
return nil, err return nil, tracerr.Wrap(err)
} }
auth.GasLimit = 1000000 auth.GasLimit = 1000000
} }
@@ -393,7 +393,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs, auth *bind.T
l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch, l1CoordinatorBytes, l1l2TxData, feeIdxCoordinator, args.VerifierIdx, args.L1Batch,
args.ProofA, args.ProofB, args.ProofC) args.ProofA, args.ProofB, args.ProofC)
if err != nil { if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("Failed Hermez.ForgeBatch: %w", err)) return nil, tracerr.Wrap(fmt.Errorf("Hermez.ForgeBatch: %w", err))
} }
return tx, nil return tx, nil
} }
@@ -939,7 +939,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
FeeIdxCoordinator: []common.Idx{}, FeeIdxCoordinator: []common.Idx{},
} }
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1) lenL1L2TxsBytes := int((nLevels/8)*2 + common.Float40BytesLength + 1)
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.RollupConstL1CoordinatorTotalBytes
numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes numBytesL1TxCoord := numTxsL1Coord * lenL1L2TxsBytes

3
go.mod
View File

@@ -17,11 +17,11 @@ require (
github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee github.com/jmoiron/sqlx v1.2.1-0.20200615141059-0794cb1f47ee
github.com/joho/godotenv v1.3.0 github.com/joho/godotenv v1.3.0
github.com/lib/pq v1.8.0 github.com/lib/pq v1.8.0
github.com/marusama/semaphore/v2 v2.4.1
github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c github.com/miguelmota/go-ethereum-hdwallet v0.0.0-20200123000308-a60dcd172b4c
github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/mapstructure v1.3.0 github.com/mitchellh/mapstructure v1.3.0
github.com/prometheus/client_golang v1.3.0
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/russross/meddler v1.0.0 github.com/russross/meddler v1.0.0
github.com/stretchr/testify v1.6.1 github.com/stretchr/testify v1.6.1
@@ -29,5 +29,6 @@ require (
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
gopkg.in/go-playground/validator.v9 v9.29.1 gopkg.in/go-playground/validator.v9 v9.29.1
) )

19
go.sum
View File

@@ -24,6 +24,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
@@ -66,6 +68,7 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
@@ -84,6 +87,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
@@ -169,6 +174,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -415,9 +422,6 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marusama/semaphore v0.0.0-20190110074507-6952cef993b2 h1:sq+a5mb8zHbmHhrIH06oqIMGsanjpbxNgxEgZVfgpvQ=
github.com/marusama/semaphore/v2 v2.4.1 h1:Y29DhhFMvreVgoqF9EtaSJAF9t2E7Sk7i5VW81sqB8I=
github.com/marusama/semaphore/v2 v2.4.1/go.mod h1:z9nMiNUekt/LTpTUQdpp+4sJeYqUGpwMHfW0Z8V8fnQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@@ -441,6 +445,7 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
@@ -541,23 +546,27 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
@@ -599,6 +608,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -617,6 +628,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=

View File

@@ -67,6 +67,11 @@ func Init(levelStr string, outputs []string) {
func sprintStackTrace(st []tracerr.Frame) string { func sprintStackTrace(st []tracerr.Frame) string {
builder := strings.Builder{} builder := strings.Builder{}
// Skip deepest frame because it belongs to the go runtime and we don't
// care about it.
if len(st) > 0 {
st = st[:len(st)-1]
}
for _, f := range st { for _, f := range st {
builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func)) builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func))
} }

View File

@@ -2,7 +2,9 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@@ -51,9 +53,10 @@ const (
// Node is the Hermez Node // Node is the Hermez Node
type Node struct { type Node struct {
nodeAPI *NodeAPI nodeAPI *NodeAPI
debugAPI *debugapi.DebugAPI stateAPIUpdater *api.StateAPIUpdater
priceUpdater *priceupdater.PriceUpdater debugAPI *debugapi.DebugAPI
priceUpdater *priceupdater.PriceUpdater
// Coordinator // Coordinator
coord *coordinator.Coordinator coord *coordinator.Coordinator
@@ -61,27 +64,48 @@ type Node struct {
sync *synchronizer.Synchronizer sync *synchronizer.Synchronizer
// General // General
cfg *config.Node cfg *config.Node
mode Mode mode Mode
sqlConn *sqlx.DB sqlConnRead *sqlx.DB
ctx context.Context sqlConnWrite *sqlx.DB
wg sync.WaitGroup historyDB *historydb.HistoryDB
cancel context.CancelFunc ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
} }
// NewNode creates a Node // NewNode creates a Node
func NewNode(mode Mode, cfg *config.Node) (*Node, error) { func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
meddler.Debug = cfg.Debug.MeddlerLogs meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection // Stablish DB connection
db, err := dbUtils.InitSQLDB( dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.Port, cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.Host, cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.User, cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.Password, cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.Name, cfg.PostgreSQL.NameWrite,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
} }
var apiConnCon *dbUtils.APIConnectionController var apiConnCon *dbUtils.APIConnectionController
if cfg.API.Explorer || mode == ModeCoordinator { if cfg.API.Explorer || mode == ModeCoordinator {
@@ -91,7 +115,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
) )
} }
historyDB := historydb.NewHistoryDB(db, apiConnCon) historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
ethClient, err := ethclient.Dial(cfg.Web3.URL) ethClient, err := ethclient.Dial(cfg.Web3.URL)
if err != nil { if err != nil {
@@ -102,8 +126,8 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
var keyStore *ethKeystore.KeyStore var keyStore *ethKeystore.KeyStore
if mode == ModeCoordinator { if mode == ModeCoordinator {
ethCfg = eth.EthereumConfig{ ethCfg = eth.EthereumConfig{
CallGasLimit: cfg.Coordinator.EthClient.CallGasLimit, CallGasLimit: 0, // cfg.Coordinator.EthClient.CallGasLimit,
GasPriceDiv: cfg.Coordinator.EthClient.GasPriceDiv, GasPriceDiv: 0, // cfg.Coordinator.EthClient.GasPriceDiv,
} }
scryptN := ethKeystore.StandardScryptN scryptN := ethKeystore.StandardScryptN
@@ -115,6 +139,23 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path, keyStore = ethKeystore.NewKeyStore(cfg.Coordinator.EthClient.Keystore.Path,
scryptN, scryptP) scryptN, scryptP)
balance, err := ethClient.BalanceAt(context.TODO(), cfg.Coordinator.ForgerAddress, nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
minForgeBalance := cfg.Coordinator.MinimumForgeAddressBalance
if minForgeBalance != nil && balance.Cmp(minForgeBalance) == -1 {
return nil, tracerr.Wrap(fmt.Errorf(
"forger account balance is less than cfg.Coordinator.MinimumForgeAddressBalance: %v < %v",
balance.Int64(), minForgeBalance))
}
log.Infow("forger ethereum account balance",
"addr", cfg.Coordinator.ForgerAddress,
"balance", balance.Int64(),
"minForgeBalance", minForgeBalance.Int64(),
)
// Unlock Coordinator ForgerAddr in the keystore to make calls // Unlock Coordinator ForgerAddr in the keystore to make calls
// to ForgeBatch in the smart contract // to ForgeBatch in the smart contract
if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) { if !keyStore.HasAddress(cfg.Coordinator.ForgerAddress) {
@@ -190,19 +231,42 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
initSCVars := sync.SCVars() initSCVars := sync.SCVars()
scConsts := synchronizer.SCConsts{ scConsts := common.SCConsts{
Rollup: *sync.RollupConstants(), Rollup: *sync.RollupConstants(),
Auction: *sync.AuctionConstants(), Auction: *sync.AuctionConstants(),
WDelayer: *sync.WDelayerConstants(), WDelayer: *sync.WDelayerConstants(),
} }
hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
}
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
return nil, tracerr.Wrap(err)
}
hdbConsts := historydb.Constants{
SCConsts: common.SCConsts{
Rollup: scConsts.Rollup,
Auction: scConsts.Auction,
WDelayer: scConsts.WDelayer,
},
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
}
if err := historyDB.SetConstants(&hdbConsts); err != nil {
return nil, tracerr.Wrap(err)
}
stateAPIUpdater := api.NewStateAPIUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
var l2DB *l2db.L2DB var l2DB *l2db.L2DB
if mode == ModeCoordinator { if mode == ModeCoordinator {
l2DB = l2db.NewL2DB( l2DB = l2db.NewL2DB(
db, dbRead, dbWrite,
cfg.Coordinator.L2DB.SafetyPeriod, cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration, cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon, apiConnCon,
) )
@@ -244,9 +308,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if err != nil {
return nil, tracerr.Wrap(err)
}
serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs)) serverProofs := make([]prover.Client, len(cfg.Coordinator.ServerProofs))
for i, serverProofCfg := range cfg.Coordinator.ServerProofs { for i, serverProofCfg := range cfg.Coordinator.ServerProofs {
serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL, serverProofs[i] = prover.NewProofServerClient(serverProofCfg.URL,
@@ -298,9 +359,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
ConfirmBlocks: cfg.Coordinator.ConfirmBlocks, ConfirmBlocks: cfg.Coordinator.ConfirmBlocks,
L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc, L1BatchTimeoutPerc: cfg.Coordinator.L1BatchTimeoutPerc,
ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration, ForgeRetryInterval: cfg.Coordinator.ForgeRetryInterval.Duration,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration,
ForgeNoTxsDelay: cfg.Coordinator.ForgeNoTxsDelay.Duration,
SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration, SyncRetryInterval: cfg.Coordinator.SyncRetryInterval.Duration,
PurgeByExtDelInterval: cfg.Coordinator.PurgeByExtDelInterval.Duration,
EthClientAttempts: cfg.Coordinator.EthClient.Attempts, EthClientAttempts: cfg.Coordinator.EthClient.Attempts,
EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration, EthClientAttemptsDelay: cfg.Coordinator.EthClient.AttemptsDelay.Duration,
EthNoReuseNonce: cfg.Coordinator.EthClient.NoReuseNonce,
EthTxResendTimeout: cfg.Coordinator.EthClient.TxResendTimeout.Duration,
MaxGasPrice: cfg.Coordinator.EthClient.MaxGasPrice,
GasPriceIncPerc: cfg.Coordinator.EthClient.GasPriceIncPerc,
TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration, TxManagerCheckInterval: cfg.Coordinator.EthClient.CheckLoopInterval.Duration,
DebugBatchPath: cfg.Coordinator.Debug.BatchPath, DebugBatchPath: cfg.Coordinator.Debug.BatchPath,
Purger: coordinator.PurgerCfg{ Purger: coordinator.PurgerCfg{
@@ -309,6 +377,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay, PurgeBlockDelay: cfg.Coordinator.L2DB.PurgeBlockDelay,
InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay, InvalidateBlockDelay: cfg.Coordinator.L2DB.InvalidateBlockDelay,
}, },
ForgeBatchGasCost: cfg.Coordinator.EthClient.ForgeBatchGasCost,
VerifierIdx: uint8(verifierIdx), VerifierIdx: uint8(verifierIdx),
TxProcessorConfig: txProcessorCfg, TxProcessorConfig: txProcessorCfg,
}, },
@@ -319,11 +388,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
serverProofs, serverProofs,
client, client,
&scConsts, &scConsts,
&synchronizer.SCVariables{ initSCVars,
Rollup: *initSCVars.Rollup,
Auction: *initSCVars.Auction,
WDelayer: *initSCVars.WDelayer,
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -331,6 +396,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
var nodeAPI *NodeAPI var nodeAPI *NodeAPI
if cfg.API.Address != "" { if cfg.API.Address != "" {
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
if cfg.API.UpdateMetricsInterval.Duration == 0 { if cfg.API.UpdateMetricsInterval.Duration == 0 {
return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v", return nil, tracerr.Wrap(fmt.Errorf("invalid cfg.API.UpdateMetricsInterval: %v",
cfg.API.UpdateMetricsInterval.Duration)) cfg.API.UpdateMetricsInterval.Duration))
@@ -350,22 +420,11 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
coord, cfg.API.Explorer, coord, cfg.API.Explorer,
server, server,
historyDB, historyDB,
stateDB,
l2DB, l2DB,
&api.Config{
RollupConstants: scConsts.Rollup,
AuctionConstants: scConsts.Auction,
WDelayerConstants: scConsts.WDelayer,
ChainID: chainIDU16,
HermezAddress: cfg.SmartContracts.Rollup,
},
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
nodeAPI.api.SetRollupVariables(*initSCVars.Rollup)
nodeAPI.api.SetAuctionVariables(*initSCVars.Auction)
nodeAPI.api.SetWDelayerVariables(*initSCVars.WDelayer)
} }
var debugAPI *debugapi.DebugAPI var debugAPI *debugapi.DebugAPI
if cfg.Debug.APIAddress != "" { if cfg.Debug.APIAddress != "" {
@@ -378,19 +437,138 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
return &Node{ return &Node{
nodeAPI: nodeAPI, stateAPIUpdater: stateAPIUpdater,
debugAPI: debugAPI, nodeAPI: nodeAPI,
priceUpdater: priceUpdater, debugAPI: debugAPI,
coord: coord, priceUpdater: priceUpdater,
sync: sync, coord: coord,
cfg: cfg, sync: sync,
mode: mode, cfg: cfg,
sqlConn: db, mode: mode,
ctx: ctx, sqlConnRead: dbRead,
cancel: cancel, sqlConnWrite: dbWrite,
historyDB: historyDB,
ctx: ctx,
cancel: cancel,
}, nil }, nil
} }
// APIServer is a server that only runs the API
type APIServer struct {
nodeAPI *NodeAPI
mode Mode
ctx context.Context
wg sync.WaitGroup
cancel context.CancelFunc
}
// NewAPIServer creates a new APIServer
func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
meddler.Debug = cfg.Debug.MeddlerLogs
// Stablish DB connection
dbWrite, err := dbUtils.InitSQLDB(
cfg.PostgreSQL.PortWrite,
cfg.PostgreSQL.HostWrite,
cfg.PostgreSQL.UserWrite,
cfg.PostgreSQL.PasswordWrite,
cfg.PostgreSQL.NameWrite,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
var dbRead *sqlx.DB
if cfg.PostgreSQL.HostRead == "" {
dbRead = dbWrite
} else if cfg.PostgreSQL.HostRead == cfg.PostgreSQL.HostWrite {
return nil, tracerr.Wrap(fmt.Errorf(
"PostgreSQL.HostRead and PostgreSQL.HostWrite must be different",
))
} else {
dbRead, err = dbUtils.InitSQLDB(
cfg.PostgreSQL.PortRead,
cfg.PostgreSQL.HostRead,
cfg.PostgreSQL.UserRead,
cfg.PostgreSQL.PasswordRead,
cfg.PostgreSQL.NameRead,
)
if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("dbUtils.InitSQLDB: %w", err))
}
}
apiConnCon := dbUtils.NewAPICnnectionController(
cfg.API.MaxSQLConnections,
cfg.API.SQLConnectionTimeout.Duration,
)
historyDB := historydb.NewHistoryDB(dbRead, dbWrite, apiConnCon)
var l2DB *l2db.L2DB
if mode == ModeCoordinator {
l2DB = l2db.NewL2DB(
dbRead, dbWrite,
0,
cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD,
0,
apiConnCon,
)
}
if cfg.Debug.GinDebugMode {
gin.SetMode(gin.DebugMode)
} else {
gin.SetMode(gin.ReleaseMode)
}
server := gin.Default()
coord := false
if mode == ModeCoordinator {
coord = cfg.Coordinator.API.Coordinator
}
nodeAPI, err := NewNodeAPI(
cfg.API.Address,
coord, cfg.API.Explorer,
server,
historyDB,
l2DB,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
return &APIServer{
nodeAPI: nodeAPI,
mode: mode,
ctx: ctx,
cancel: cancel,
}, nil
}
// Start the APIServer
func (s *APIServer) Start() {
log.Infow("Starting api server...", "mode", s.mode)
log.Info("Starting NodeAPI...")
s.wg.Add(1)
go func() {
defer func() {
log.Info("NodeAPI routine stopped")
s.wg.Done()
}()
if err := s.nodeAPI.Run(s.ctx); err != nil {
if s.ctx.Err() != nil {
return
}
log.Fatalw("NodeAPI.Run", "err", err)
}
}()
}
// Stop the APIServer
func (s *APIServer) Stop() {
log.Infow("Stopping NodeAPI...")
s.cancel()
s.wg.Wait()
}
// NodeAPI holds the node http API // NodeAPI holds the node http API
type NodeAPI struct { //nolint:golint type NodeAPI struct { //nolint:golint
api *api.API api *api.API
@@ -410,9 +588,7 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints bool, coordinatorEndpoints, explorerEndpoints bool,
server *gin.Engine, server *gin.Engine,
hdb *historydb.HistoryDB, hdb *historydb.HistoryDB,
sdb *statedb.StateDB,
l2db *l2db.L2DB, l2db *l2db.L2DB,
config *api.Config,
) (*NodeAPI, error) { ) (*NodeAPI, error) {
engine := gin.Default() engine := gin.Default()
engine.NoRoute(handleNoRoute) engine.NoRoute(handleNoRoute)
@@ -421,9 +597,7 @@ func NewNodeAPI(
coordinatorEndpoints, explorerEndpoints, coordinatorEndpoints, explorerEndpoints,
engine, engine,
hdb, hdb,
sdb,
l2db, l2db,
config,
) )
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -439,16 +613,20 @@ func NewNodeAPI(
// cancelation. // cancelation.
func (a *NodeAPI) Run(ctx context.Context) error { func (a *NodeAPI) Run(ctx context.Context) error {
server := &http.Server{ server := &http.Server{
Addr: a.addr,
Handler: a.engine, Handler: a.engine,
// TODO: Figure out best parameters for production // TODO: Figure out best parameters for production
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", a.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("NodeAPI is ready at %v", a.addr)
go func() { go func() {
log.Infof("NodeAPI is ready at %v", a.addr) if err := server.Serve(listener); err != nil &&
if err := server.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed { tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()
@@ -464,60 +642,57 @@ func (a *NodeAPI) Run(ctx context.Context) error {
return nil return nil
} }
func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr, func (n *Node) handleNewBlock(ctx context.Context, stats *synchronizer.Stats, vars *common.SCVariablesPtr,
batches []common.BatchData) { batches []common.BatchData) error {
if n.mode == ModeCoordinator { if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{ n.coord.SendMsg(ctx, coordinator.MsgSyncBlock{
Stats: *stats, Stats: *stats,
Vars: vars, Vars: *vars,
Batches: batches, Batches: batches,
}) })
} }
if n.nodeAPI != nil { n.stateAPIUpdater.SetSCVars(vars)
if vars.Rollup != nil { if stats.Synced() {
n.nodeAPI.api.SetRollupVariables(*vars.Rollup) if err := n.stateAPIUpdater.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatchNum),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("ApiStateUpdater.UpdateNetworkInfo", "err", err)
} }
if vars.Auction != nil { } else {
n.nodeAPI.api.SetAuctionVariables(*vars.Auction) n.stateAPIUpdater.UpdateNetworkInfoBlock(
}
if vars.WDelayer != nil {
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
}
if stats.Synced() {
if err := n.nodeAPI.api.UpdateNetworkInfo(
stats.Eth.LastBlock, stats.Sync.LastBlock,
common.BatchNum(stats.Eth.LastBatch),
stats.Sync.Auction.CurrentSlot.SlotNum,
); err != nil {
log.Errorw("API.UpdateNetworkInfo", "err", err)
}
}
}
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, vars synchronizer.SCVariablesPtr) {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: vars,
})
}
if n.nodeAPI != nil {
vars := n.sync.SCVars()
n.nodeAPI.api.SetRollupVariables(*vars.Rollup)
n.nodeAPI.api.SetAuctionVariables(*vars.Auction)
n.nodeAPI.api.SetWDelayerVariables(*vars.WDelayer)
n.nodeAPI.api.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock, stats.Eth.LastBlock, stats.Sync.LastBlock,
) )
} }
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
}
func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats,
vars *common.SCVariables) error {
if n.mode == ModeCoordinator {
n.coord.SendMsg(ctx, coordinator.MsgSyncReorg{
Stats: *stats,
Vars: *vars.AsPtr(),
})
}
n.stateAPIUpdater.SetSCVars(vars.AsPtr())
n.stateAPIUpdater.UpdateNetworkInfoBlock(
stats.Eth.LastBlock, stats.Sync.LastBlock,
)
if err := n.stateAPIUpdater.Store(); err != nil {
return tracerr.Wrap(err)
}
return nil
} }
// TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we // TODO(Edu): Consider keeping the `lastBlock` inside synchronizer so that we
// don't have to pass it around. // don't have to pass it around.
func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) { func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, time.Duration, error) {
blockData, discarded, err := n.sync.Sync2(ctx, lastBlock) blockData, discarded, err := n.sync.Sync(ctx, lastBlock)
stats := n.sync.Stats() stats := n.sync.Stats()
if err != nil { if err != nil {
// case: error // case: error
@@ -526,16 +701,20 @@ func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common
// case: reorg // case: reorg
log.Infow("Synchronizer.Sync reorg", "discarded", *discarded) log.Infow("Synchronizer.Sync reorg", "discarded", *discarded)
vars := n.sync.SCVars() vars := n.sync.SCVars()
n.handleReorg(ctx, stats, vars) if err := n.handleReorg(ctx, stats, vars); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return nil, time.Duration(0), nil return nil, time.Duration(0), nil
} else if blockData != nil { } else if blockData != nil {
// case: new block // case: new block
vars := synchronizer.SCVariablesPtr{ vars := common.SCVariablesPtr{
Rollup: blockData.Rollup.Vars, Rollup: blockData.Rollup.Vars,
Auction: blockData.Auction.Vars, Auction: blockData.Auction.Vars,
WDelayer: blockData.WDelayer.Vars, WDelayer: blockData.WDelayer.Vars,
} }
n.handleNewBlock(ctx, stats, vars, blockData.Rollup.Batches) if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil {
return nil, time.Duration(0), tracerr.Wrap(err)
}
return &blockData.Block, time.Duration(0), nil return &blockData.Block, time.Duration(0), nil
} else { } else {
// case: no block // case: no block
@@ -554,7 +733,9 @@ func (n *Node) StartSynchronizer() {
// the last synced one) is synchronized // the last synced one) is synchronized
stats := n.sync.Stats() stats := n.sync.Stats()
vars := n.sync.SCVars() vars := n.sync.SCVars()
n.handleNewBlock(n.ctx, stats, vars, []common.BatchData{}) if err := n.handleNewBlock(n.ctx, stats, vars.AsPtr(), []common.BatchData{}); err != nil {
log.Fatalw("Node.handleNewBlock", "err", err)
}
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
@@ -573,7 +754,13 @@ func (n *Node) StartSynchronizer() {
if n.ctx.Err() != nil { if n.ctx.Err() != nil {
continue continue
} }
log.Errorw("Synchronizer.Sync", "err", err) if errors.Is(err, eth.ErrBlockHashMismatchEvent) {
log.Warnw("Synchronizer.Sync", "err", err)
} else if errors.Is(err, synchronizer.ErrUnknownBlock) {
log.Warnw("Synchronizer.Sync", "err", err)
} else {
log.Errorw("Synchronizer.Sync", "err", err)
}
} }
} }
} }
@@ -634,15 +821,26 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup
if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
log.Info("API.UpdateMetrics loop done") log.Info("ApiStateUpdater.UpdateMetrics loop done")
n.wg.Done() n.wg.Done()
return return
case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration): case <-time.After(n.cfg.API.UpdateMetricsInterval.Duration):
if err := n.nodeAPI.api.UpdateMetrics(); err != nil { if err := n.stateAPIUpdater.UpdateMetrics(); err != nil {
log.Errorw("API.UpdateMetrics", "err", err) log.Errorw("ApiStateUpdater.UpdateMetrics", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
} }
} }
@@ -650,15 +848,26 @@ func (n *Node) StartNodeAPI() {
n.wg.Add(1) n.wg.Add(1)
go func() { go func() {
// Do an initial update on startup
if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("ApiStateUpdater.UpdateRecommendedFee", "err", err)
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
}
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():
log.Info("API.UpdateRecommendedFee loop done") log.Info("ApiStateUpdaterAPI.UpdateRecommendedFee loop done")
n.wg.Done() n.wg.Done()
return return
case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration): case <-time.After(n.cfg.API.UpdateRecommendedFeeInterval.Duration):
if err := n.nodeAPI.api.UpdateRecommendedFee(); err != nil { if err := n.stateAPIUpdater.UpdateRecommendedFee(); err != nil {
log.Errorw("API.UpdateRecommendedFee", "err", err) log.Errorw("ApiStateUpdaterAPI.UpdateRecommendedFee", "err", err)
continue
}
if err := n.stateAPIUpdater.Store(); err != nil {
log.Errorw("ApiStateUpdater.Store", "err", err)
} }
} }
} }

View File

@@ -20,7 +20,7 @@ func TestPriceUpdater(t *testing.T) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
assert.NoError(t, err) assert.NoError(t, err)
historyDB := historydb.NewHistoryDB(db, nil) historyDB := historydb.NewHistoryDB(db, db, nil)
// Clean DB // Clean DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
// Populate DB // Populate DB

44
synchronizer/metrics.go Normal file
View File

@@ -0,0 +1,44 @@
package synchronizer
import "github.com/prometheus/client_golang/prometheus"
var (
metricReorgsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "sync_reorgs",
Help: "",
},
)
metricSyncedLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_block_num",
Help: "",
},
)
metricEthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_block_num",
Help: "",
},
)
metricSyncedLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_batch_num",
Help: "",
},
)
metricEthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_batch_num",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricReorgsCount)
prometheus.MustRegister(metricSyncedLastBlockNum)
prometheus.MustRegister(metricEthLastBlockNum)
prometheus.MustRegister(metricSyncedLastBatchNum)
prometheus.MustRegister(metricEthLastBatchNum)
}

View File

@@ -18,6 +18,19 @@ import (
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
const (
// errStrUnknownBlock is the string returned by geth when querying an
// unknown block
errStrUnknownBlock = "unknown block"
)
var (
// ErrUnknownBlock is the error returned by the Synchronizer when a
// block is queried by hash but the ethereum node doesn't find it due
// to it being discarded from a reorg.
ErrUnknownBlock = fmt.Errorf("unknown block")
)
// Stats of the syncrhonizer // Stats of the syncrhonizer
type Stats struct { type Stats struct {
Eth struct { Eth struct {
@@ -25,12 +38,12 @@ type Stats struct {
Updated time.Time Updated time.Time
FirstBlockNum int64 FirstBlockNum int64
LastBlock common.Block LastBlock common.Block
LastBatch int64 LastBatchNum int64
} }
Sync struct { Sync struct {
Updated time.Time Updated time.Time
LastBlock common.Block LastBlock common.Block
LastBatch int64 LastBatch common.Batch
// LastL1BatchBlock is the last ethereum block in which an // LastL1BatchBlock is the last ethereum block in which an
// l1Batch was forged // l1Batch was forged
LastL1BatchBlock int64 LastL1BatchBlock int64
@@ -77,13 +90,13 @@ func (s *StatsHolder) UpdateCurrentNextSlot(current *common.Slot, next *common.S
} }
// UpdateSync updates the synchronizer stats // UpdateSync updates the synchronizer stats
func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.BatchNum, func (s *StatsHolder) UpdateSync(lastBlock *common.Block, lastBatch *common.Batch,
lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) { lastL1BatchBlock *int64, lastForgeL1TxsNum *int64) {
now := time.Now() now := time.Now()
s.rw.Lock() s.rw.Lock()
s.Sync.LastBlock = *lastBlock s.Sync.LastBlock = *lastBlock
if lastBatch != nil { if lastBatch != nil {
s.Sync.LastBatch = int64(*lastBatch) s.Sync.LastBatch = *lastBatch
} }
if lastL1BatchBlock != nil { if lastL1BatchBlock != nil {
s.Sync.LastL1BatchBlock = *lastL1BatchBlock s.Sync.LastL1BatchBlock = *lastL1BatchBlock
@@ -105,16 +118,16 @@ func (s *StatsHolder) UpdateEth(ethClient eth.ClientInterface) error {
lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1) lastBlock, err := ethClient.EthBlockByNumber(context.TODO(), -1)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
} }
lastBatch, err := ethClient.RollupLastForgedBatch() lastBatchNum, err := ethClient.RollupLastForgedBatch()
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("RollupLastForgedBatch: %w", err))
} }
s.rw.Lock() s.rw.Lock()
s.Eth.Updated = now s.Eth.Updated = now
s.Eth.LastBlock = *lastBlock s.Eth.LastBlock = *lastBlock
s.Eth.LastBatch = lastBatch s.Eth.LastBatchNum = lastBatchNum
s.rw.Unlock() s.rw.Unlock()
return nil return nil
} }
@@ -139,6 +152,10 @@ func (s *StatsHolder) CopyStats() *Stats {
sCopy.Sync.Auction.NextSlot.DefaultSlotBid = sCopy.Sync.Auction.NextSlot.DefaultSlotBid =
common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid) common.CopyBigInt(s.Sync.Auction.NextSlot.DefaultSlotBid)
} }
if s.Sync.LastBatch.StateRoot != nil {
sCopy.Sync.LastBatch.StateRoot =
common.CopyBigInt(s.Sync.LastBatch.StateRoot)
}
s.rw.RUnlock() s.rw.RUnlock()
return &sCopy return &sCopy
} }
@@ -152,9 +169,9 @@ func (s *StatsHolder) blocksPerc() float64 {
float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1)) float64(s.Eth.LastBlock.Num-(s.Eth.FirstBlockNum-1))
} }
func (s *StatsHolder) batchesPerc(batchNum int64) float64 { func (s *StatsHolder) batchesPerc(batchNum common.BatchNum) float64 {
return float64(batchNum) * 100.0 / return float64(batchNum) * 100.0 /
float64(s.Eth.LastBatch) float64(s.Eth.LastBatchNum)
} }
// StartBlockNums sets the first block used to start tracking the smart // StartBlockNums sets the first block used to start tracking the smart
@@ -166,26 +183,26 @@ type StartBlockNums struct {
} }
// SCVariables joins all the smart contract variables in a single struct // SCVariables joins all the smart contract variables in a single struct
type SCVariables struct { // type SCVariables struct {
Rollup common.RollupVariables `validate:"required"` // Rollup common.RollupVariables `validate:"required"`
Auction common.AuctionVariables `validate:"required"` // Auction common.AuctionVariables `validate:"required"`
WDelayer common.WDelayerVariables `validate:"required"` // WDelayer common.WDelayerVariables `validate:"required"`
} // }
//
// SCVariablesPtr joins all the smart contract variables as pointers in a single // // SCVariablesPtr joins all the smart contract variables as pointers in a single
// struct // // struct
type SCVariablesPtr struct { // type SCVariablesPtr struct {
Rollup *common.RollupVariables `validate:"required"` // Rollup *common.RollupVariables `validate:"required"`
Auction *common.AuctionVariables `validate:"required"` // Auction *common.AuctionVariables `validate:"required"`
WDelayer *common.WDelayerVariables `validate:"required"` // WDelayer *common.WDelayerVariables `validate:"required"`
} // }
//
// SCConsts joins all the smart contract constants in a single struct // // SCConsts joins all the smart contract constants in a single struct
type SCConsts struct { // type SCConsts struct {
Rollup common.RollupConstants // Rollup common.RollupConstants
Auction common.AuctionConstants // Auction common.AuctionConstants
WDelayer common.WDelayerConstants // WDelayer common.WDelayerConstants
} // }
// Config is the Synchronizer configuration // Config is the Synchronizer configuration
type Config struct { type Config struct {
@@ -196,13 +213,13 @@ type Config struct {
// Synchronizer implements the Synchronizer type // Synchronizer implements the Synchronizer type
type Synchronizer struct { type Synchronizer struct {
ethClient eth.ClientInterface ethClient eth.ClientInterface
consts SCConsts consts common.SCConsts
historyDB *historydb.HistoryDB historyDB *historydb.HistoryDB
stateDB *statedb.StateDB stateDB *statedb.StateDB
cfg Config cfg Config
initVars SCVariables initVars common.SCVariables
startBlockNum int64 startBlockNum int64
vars SCVariables vars common.SCVariables
stats *StatsHolder stats *StatsHolder
resetStateFailed bool resetStateFailed bool
} }
@@ -225,7 +242,7 @@ func NewSynchronizer(ethClient eth.ClientInterface, historyDB *historydb.History
return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w", return nil, tracerr.Wrap(fmt.Errorf("NewSynchronizer ethClient.WDelayerConstants(): %w",
err)) err))
} }
consts := SCConsts{ consts := common.SCConsts{
Rollup: *rollupConstants, Rollup: *rollupConstants,
Auction: *auctionConstants, Auction: *auctionConstants,
WDelayer: *wDelayerConstants, WDelayer: *wDelayerConstants,
@@ -290,11 +307,11 @@ func (s *Synchronizer) WDelayerConstants() *common.WDelayerConstants {
} }
// SCVars returns a copy of the Smart Contract Variables // SCVars returns a copy of the Smart Contract Variables
func (s *Synchronizer) SCVars() SCVariablesPtr { func (s *Synchronizer) SCVars() *common.SCVariables {
return SCVariablesPtr{ return &common.SCVariables{
Rollup: s.vars.Rollup.Copy(), Rollup: *s.vars.Rollup.Copy(),
Auction: s.vars.Auction.Copy(), Auction: *s.vars.Auction.Copy(),
WDelayer: s.vars.WDelayer.Copy(), WDelayer: *s.vars.WDelayer.Copy(),
} }
} }
@@ -329,23 +346,25 @@ func (s *Synchronizer) setSlotCoordinator(slot *common.Slot) error {
return nil return nil
} }
// firstBatchBlockNum is the blockNum of first batch in that block, if any // updateCurrentSlot updates the slot with information of the current slot.
func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*common.Slot, error) { // The information abouth which coordinator is allowed to forge is only updated
slot := common.Slot{ // when we are Synced.
SlotNum: s.stats.Sync.Auction.CurrentSlot.SlotNum, // hasBatch is true when the last synced block contained at least one batch.
ForgerCommitment: s.stats.Sync.Auction.CurrentSlot.ForgerCommitment, func (s *Synchronizer) updateCurrentSlot(slot *common.Slot, reset bool, hasBatch bool) error {
}
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) slotNum := s.consts.Auction.SlotNum(blockNum)
firstBatchBlockNum := s.stats.Sync.LastBlock.Num
if reset { if reset {
// Using this query only to know if there
dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum) dbFirstBatchBlockNum, err := s.historyDB.GetFirstBatchBlockNumBySlot(slotNum)
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return nil, tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetFirstBatchBySlot: %w", err))
} else if tracerr.Unwrap(err) == sql.ErrNoRows { } else if tracerr.Unwrap(err) == sql.ErrNoRows {
firstBatchBlockNum = nil hasBatch = false
} else { } else {
firstBatchBlockNum = &dbFirstBatchBlockNum hasBatch = true
firstBatchBlockNum = dbFirstBatchBlockNum
} }
slot.ForgerCommitment = false slot.ForgerCommitment = false
} else if slotNum > slot.SlotNum { } else if slotNum > slot.SlotNum {
@@ -356,11 +375,11 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(&slot); err != nil { if err := s.setSlotCoordinator(slot); err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(err)
} }
if firstBatchBlockNum != nil && if hasBatch &&
s.consts.Auction.RelativeBlock(*firstBatchBlockNum) < s.consts.Auction.RelativeBlock(firstBatchBlockNum) <
int64(s.vars.Auction.SlotDeadline) { int64(s.vars.Auction.SlotDeadline) {
slot.ForgerCommitment = true slot.ForgerCommitment = true
} }
@@ -369,57 +388,61 @@ func (s *Synchronizer) getCurrentSlot(reset bool, firstBatchBlockNum *int64) (*c
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, blockNum)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
} }
if !canForge { if !canForge {
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return &slot, nil return nil
} }
func (s *Synchronizer) getNextSlot() (*common.Slot, error) { // updateNextSlot updates the slot with information of the next slot.
// The information abouth which coordinator is allowed to forge is only updated
// when we are Synced.
func (s *Synchronizer) updateNextSlot(slot *common.Slot) error {
// We want the next block because the current one is already mined // We want the next block because the current one is already mined
blockNum := s.stats.Sync.LastBlock.Num + 1 blockNum := s.stats.Sync.LastBlock.Num + 1
slotNum := s.consts.Auction.SlotNum(blockNum) + 1 slotNum := s.consts.Auction.SlotNum(blockNum) + 1
slot := common.Slot{ slot.SlotNum = slotNum
SlotNum: slotNum, slot.ForgerCommitment = false
ForgerCommitment: false,
}
slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum) slot.StartBlock, slot.EndBlock = s.consts.Auction.SlotBlocks(slot.SlotNum)
// If Synced, update the current coordinator // If Synced, update the current coordinator
if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum { if s.stats.Synced() && blockNum >= s.consts.Auction.GenesisBlockNum {
if err := s.setSlotCoordinator(&slot); err != nil { if err := s.setSlotCoordinator(slot); err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(err)
} }
// TODO: Remove this SANITY CHECK once this code is tested enough // TODO: Remove this SANITY CHECK once this code is tested enough
// BEGIN SANITY CHECK // BEGIN SANITY CHECK
canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock) canForge, err := s.ethClient.AuctionCanForge(slot.Forger, slot.StartBlock)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return tracerr.Wrap(fmt.Errorf("AuctionCanForge: %w", err))
} }
if !canForge { if !canForge {
return nil, tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+ return tracerr.Wrap(fmt.Errorf("Synchronized value of forger address for closed slot "+
"differs from smart contract: %+v", slot)) "differs from smart contract: %+v", slot))
} }
// END SANITY CHECK // END SANITY CHECK
} }
return &slot, nil return nil
} }
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, firstBatchBlockNum *int64) error { // updateCurrentNextSlotIfSync updates the current and next slot. Information
current, err := s.getCurrentSlot(reset, firstBatchBlockNum) // about forger address that is allowed to forge is only updated if we are
if err != nil { // Synced.
func (s *Synchronizer) updateCurrentNextSlotIfSync(reset bool, hasBatch bool) error {
current := s.stats.Sync.Auction.CurrentSlot
next := s.stats.Sync.Auction.NextSlot
if err := s.updateCurrentSlot(&current, reset, hasBatch); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
next, err := s.getNextSlot() if err := s.updateNextSlot(&next); err != nil {
if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
s.stats.UpdateCurrentNextSlot(current, next) s.stats.UpdateCurrentNextSlot(&current, &next)
return nil return nil
} }
@@ -458,9 +481,9 @@ func (s *Synchronizer) init() error {
"ethLastBlock", s.stats.Eth.LastBlock, "ethLastBlock", s.stats.Eth.LastBlock,
) )
log.Infow("Sync init batch", log.Infow("Sync init batch",
"syncLastBatch", s.stats.Sync.LastBatch, "syncLastBatch", s.stats.Sync.LastBatch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch), "syncBatchesPerc", s.stats.batchesPerc(s.stats.Sync.LastBatch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatch, "ethLastBatch", s.stats.Eth.LastBatchNum,
) )
return nil return nil
} }
@@ -480,13 +503,13 @@ func (s *Synchronizer) resetIntermediateState() error {
return nil return nil
} }
// Sync2 attems to synchronize an ethereum block starting from lastSavedBlock. // Sync attems to synchronize an ethereum block starting from lastSavedBlock.
// If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB. // If lastSavedBlock is nil, the lastSavedBlock value is obtained from de DB.
// If a block is synched, it will be returned and also stored in the DB. If a // If a block is synched, it will be returned and also stored in the DB. If a
// reorg is detected, the number of discarded blocks will be returned and no // reorg is detected, the number of discarded blocks will be returned and no
// synchronization will be made. // synchronization will be made.
// TODO: Be smart about locking: only lock during the read/write operations // TODO: Be smart about locking: only lock during the read/write operations
func (s *Synchronizer) Sync2(ctx context.Context, func (s *Synchronizer) Sync(ctx context.Context,
lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) { lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) {
if s.resetStateFailed { if s.resetStateFailed {
if err := s.resetIntermediateState(); err != nil { if err := s.resetIntermediateState(); err != nil {
@@ -521,7 +544,7 @@ func (s *Synchronizer) Sync2(ctx context.Context,
if tracerr.Unwrap(err) == ethereum.NotFound { if tracerr.Unwrap(err) == ethereum.NotFound {
return nil, nil, nil return nil, nil, nil
} else if err != nil { } else if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err))
} }
log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", log.Debugf("ethBlock: num: %v, parent: %v, hash: %v",
ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String())
@@ -627,29 +650,33 @@ func (s *Synchronizer) Sync2(ctx context.Context,
} }
} }
s.stats.UpdateSync(ethBlock, s.stats.UpdateSync(ethBlock,
&rollupData.Batches[batchesLen-1].Batch.BatchNum, &rollupData.Batches[batchesLen-1].Batch,
lastL1BatchBlock, lastForgeL1TxsNum) lastL1BatchBlock, lastForgeL1TxsNum)
} }
var firstBatchBlockNum *int64 hasBatch := false
if len(rollupData.Batches) > 0 { if len(rollupData.Batches) > 0 {
firstBatchBlockNum = &rollupData.Batches[0].Batch.EthBlockNum hasBatch = true
} }
if err := s.updateCurrentNextSlotIfSync(false, firstBatchBlockNum); err != nil { if err := s.updateCurrentNextSlotIfSync(false, hasBatch); err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
for _, batchData := range rollupData.Batches {
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum,
)
}
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
log.Debugw("Synced block", log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num, "syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(), "syncBlocksPerc", s.stats.blocksPerc(),
"ethLastBlockNum", s.stats.Eth.LastBlock.Num, "ethLastBlockNum", s.stats.Eth.LastBlock.Num,
) )
for _, batchData := range rollupData.Batches {
log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(int64(batchData.Batch.BatchNum)),
"ethLastBatch", s.stats.Eth.LastBatch,
)
}
return blockData, nil, nil return blockData, nil, nil
} }
@@ -697,23 +724,23 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
} }
func getInitialVariables(ethClient eth.ClientInterface, func getInitialVariables(ethClient eth.ClientInterface,
consts *SCConsts) (*SCVariables, *StartBlockNums, error) { consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
} }
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit() auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
} }
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit() wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
} }
rollupVars := rollupInit.RollupVariables() rollupVars := rollupInit.RollupVariables()
auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding) auctionVars := auctionInit.AuctionVariables(consts.Auction.InitialMinimalBidding)
wDelayerVars := wDelayerInit.WDelayerVariables() wDelayerVars := wDelayerInit.WDelayerVariables()
return &SCVariables{ return &common.SCVariables{
Rollup: *rollupVars, Rollup: *rollupVars,
Auction: *auctionVars, Auction: *auctionVars,
WDelayer: *wDelayerVars, WDelayer: *wDelayerVars,
@@ -753,15 +780,15 @@ func (s *Synchronizer) resetState(block *common.Block) error {
s.vars.WDelayer = *wDelayer s.vars.WDelayer = *wDelayer
} }
batchNum, err := s.historyDB.GetLastBatchNum() batch, err := s.historyDB.GetLastBatch()
if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows {
return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err)) return tracerr.Wrap(fmt.Errorf("historyDB.GetLastBatchNum: %w", err))
} }
if tracerr.Unwrap(err) == sql.ErrNoRows { if tracerr.Unwrap(err) == sql.ErrNoRows {
batchNum = 0 batch = &common.Batch{}
} }
err = s.stateDB.Reset(batchNum) err = s.stateDB.Reset(batch.BatchNum)
if err != nil { if err != nil {
return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err)) return tracerr.Wrap(fmt.Errorf("stateDB.Reset: %w", err))
} }
@@ -783,9 +810,9 @@ func (s *Synchronizer) resetState(block *common.Block) error {
lastForgeL1TxsNum = &n lastForgeL1TxsNum = &n
} }
s.stats.UpdateSync(block, &batchNum, &lastL1BatchBlockNum, lastForgeL1TxsNum) s.stats.UpdateSync(block, batch, &lastL1BatchBlockNum, lastForgeL1TxsNum)
if err := s.updateCurrentNextSlotIfSync(true, nil); err != nil { if err := s.updateCurrentNextSlotIfSync(true, false); err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
return nil return nil
@@ -801,8 +828,10 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
// Get rollup events in the block, and make sure the block hash matches // Get rollup events in the block, and make sure the block hash matches
// the expected one. // the expected one.
rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash) rollupEvents, err := s.ethClient.RollupEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil && err.Error() == errStrUnknownBlock {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("RollupEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if rollupEvents == nil { if rollupEvents == nil {
@@ -919,9 +948,15 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
if s.stateDB.CurrentBatch() != batchNum { if s.stateDB.CurrentBatch() != batchNum {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != evtForgeBatch.BatchNum = (%v)", return nil, tracerr.Wrap(fmt.Errorf("stateDB.BatchNum (%v) != "+
"evtForgeBatch.BatchNum = (%v)",
s.stateDB.CurrentBatch(), batchNum)) s.stateDB.CurrentBatch(), batchNum))
} }
if s.stateDB.MT.Root().BigInt().Cmp(forgeBatchArgs.NewStRoot) != 0 {
return nil, tracerr.Wrap(fmt.Errorf("stateDB.MTRoot (%v) != "+
"forgeBatchArgs.NewStRoot (%v)",
s.stateDB.MT.Root().BigInt(), forgeBatchArgs.NewStRoot))
}
// Transform processed PoolL2 txs to L2 and store in BatchData // Transform processed PoolL2 txs to L2 and store in BatchData
l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way l2Txs, err := common.PoolL2TxsToL2Txs(poolL2Txs) // NOTE: This is a big uggly, find a better way
@@ -962,6 +997,19 @@ func (s *Synchronizer) rollupSync(ethBlock *common.Block) (*common.RollupData, e
} }
batchData.CreatedAccounts = processTxsOut.CreatedAccounts batchData.CreatedAccounts = processTxsOut.CreatedAccounts
batchData.UpdatedAccounts = make([]common.AccountUpdate, 0,
len(processTxsOut.UpdatedAccounts))
for _, acc := range processTxsOut.UpdatedAccounts {
batchData.UpdatedAccounts = append(batchData.UpdatedAccounts,
common.AccountUpdate{
EthBlockNum: blockNum,
BatchNum: batchNum,
Idx: acc.Idx,
Nonce: acc.Nonce,
Balance: acc.Balance,
})
}
slotNum := int64(0) slotNum := int64(0)
if ethBlock.Num >= s.consts.Auction.GenesisBlockNum { if ethBlock.Num >= s.consts.Auction.GenesisBlockNum {
slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) / slotNum = (ethBlock.Num - s.consts.Auction.GenesisBlockNum) /
@@ -1105,8 +1153,10 @@ func (s *Synchronizer) auctionSync(ethBlock *common.Block) (*common.AuctionData,
// Get auction events in the block // Get auction events in the block
auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash) auctionEvents, err := s.ethClient.AuctionEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil && err.Error() == errStrUnknownBlock {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("AuctionEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if auctionEvents == nil { if auctionEvents == nil {
@@ -1202,8 +1252,10 @@ func (s *Synchronizer) wdelayerSync(ethBlock *common.Block) (*common.WDelayerDat
// Get wDelayer events in the block // Get wDelayer events in the block
wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash) wDelayerEvents, err := s.ethClient.WDelayerEventsByBlock(blockNum, &ethBlock.Hash)
if err != nil { if err != nil && err.Error() == errStrUnknownBlock {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(ErrUnknownBlock)
} else if err != nil {
return nil, tracerr.Wrap(fmt.Errorf("WDelayerEventsByBlock: %w", err))
} }
// No events in this block // No events in this block
if wDelayerEvents == nil { if wDelayerEvents == nil {

View File

@@ -17,7 +17,6 @@ import (
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
@@ -172,6 +171,8 @@ func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, syncBloc
*exit = syncBatch.ExitTree[j] *exit = syncBatch.ExitTree[j]
} }
assert.Equal(t, batch.Batch, syncBatch.Batch) assert.Equal(t, batch.Batch, syncBatch.Batch)
// Ignore updated accounts
syncBatch.UpdatedAccounts = nil
assert.Equal(t, batch, syncBatch) assert.Equal(t, batch, syncBatch)
assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec
@@ -314,13 +315,21 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
historyDB := historydb.NewHistoryDB(db, nil) historyDB := historydb.NewHistoryDB(db, db, nil)
// Clear DB // Clear DB
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
return stateDB, historyDB return stateDB, historyDB
} }
func newBigInt(s string) *big.Int {
v, ok := new(big.Int).SetString(s, 10)
if !ok {
panic(fmt.Errorf("Can't set big.Int from %s", s))
}
return v
}
func TestSyncGeneral(t *testing.T) { func TestSyncGeneral(t *testing.T) {
// //
// Setup // Setup
@@ -339,7 +348,6 @@ func TestSyncGeneral(t *testing.T) {
s, err := NewSynchronizer(client, historyDB, stateDB, Config{ s, err := NewSynchronizer(client, historyDB, stateDB, Config{
StatsRefreshPeriod: 0 * time.Second, StatsRefreshPeriod: 0 * time.Second,
}) })
log.Error(err)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@@ -351,7 +359,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
// Test Sync for rollup genesis block // Test Sync for rollup genesis block
syncBlock, discards, err := s.Sync2(ctx, nil) syncBlock, discards, err := s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -364,9 +372,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), stats.Eth.LastBlock.Num) assert.Equal(t, int64(1), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(1), stats.Sync.LastBlock.Num) assert.Equal(t, int64(1), stats.Sync.LastBlock.Num)
vars := s.SCVars() vars := s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
dbBlocks, err := s.historyDB.GetAllBlocks() dbBlocks, err := s.historyDB.GetAllBlocks()
require.NoError(t, err) require.NoError(t, err)
@@ -374,7 +382,7 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(1), dbBlocks[1].Num) assert.Equal(t, int64(1), dbBlocks[1].Num)
// Sync again and expect no new blocks // Sync again and expect no new blocks
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.Nil(t, syncBlock) require.Nil(t, syncBlock)
@@ -434,12 +442,22 @@ func TestSyncGeneral(t *testing.T) {
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs)) require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("18906357591508007884273218035694076596537737437965299189312069102730480717391")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("9513185123401321669660637227182204000277156839501731093239187625486561933297")
// blocks 1 (blockNum=3) // blocks 1 (blockNum=3)
i = 1 i = 1
require.Equal(t, 3, int(blocks[i].Block.Num)) require.Equal(t, 3, int(blocks[i].Block.Num))
require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs))
require.Equal(t, 2, len(blocks[i].Rollup.Batches)) require.Equal(t, 2, len(blocks[i].Rollup.Batches))
require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs)) require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs))
// Set StateRoots for batches manually (til doesn't set it)
blocks[i].Rollup.Batches[0].Batch.StateRoot =
newBigInt("13060270878200012606074130020925677466793317216609491464427188889005039616594")
blocks[i].Rollup.Batches[1].Batch.StateRoot =
newBigInt("21427104994652624302859637783375978708867165042357535792408500519060088086054")
// Generate extra required data // Generate extra required data
ethAddTokens(blocks, client) ethAddTokens(blocks, client)
@@ -461,7 +479,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 2 // Block 2
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -478,7 +496,7 @@ func TestSyncGeneral(t *testing.T) {
// Block 3 // Block 3
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
@@ -502,7 +520,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -515,9 +533,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, int64(4), stats.Eth.LastBlock.Num) assert.Equal(t, int64(4), stats.Eth.LastBlock.Num)
assert.Equal(t, int64(4), stats.Sync.LastBlock.Num) assert.Equal(t, int64(4), stats.Sync.LastBlock.Num)
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
dbExits, err := s.historyDB.GetAllExits() dbExits, err := s.historyDB.GetAllExits()
require.NoError(t, err) require.NoError(t, err)
@@ -553,7 +571,7 @@ func TestSyncGeneral(t *testing.T) {
client.CtlMineBlock() client.CtlMineBlock()
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -614,6 +632,12 @@ func TestSyncGeneral(t *testing.T) {
blocks, err = tc.GenerateBlocks(set2) blocks, err = tc.GenerateBlocks(set2)
require.NoError(t, err) require.NoError(t, err)
// Set StateRoots for batches manually (til doesn't set it)
blocks[0].Rollup.Batches[0].Batch.StateRoot =
newBigInt("11218510534825843475100588932060366395781087435899915642332104464234485046683")
blocks[0].Rollup.Batches[1].Batch.StateRoot =
newBigInt("20283020730369146334077598087403837297563965802277806438205710455191646998983")
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
client.CtlRollback() client.CtlRollback()
} }
@@ -632,7 +656,7 @@ func TestSyncGeneral(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// First sync detects the reorg and discards 4 blocks // First sync detects the reorg and discards 4 blocks
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
expetedDiscards := int64(4) expetedDiscards := int64(4)
require.Equal(t, &expetedDiscards, discards) require.Equal(t, &expetedDiscards, discards)
@@ -641,9 +665,9 @@ func TestSyncGeneral(t *testing.T) {
assert.Equal(t, false, stats.Synced()) assert.Equal(t, false, stats.Synced())
assert.Equal(t, int64(6), stats.Eth.LastBlock.Num) assert.Equal(t, int64(6), stats.Eth.LastBlock.Num)
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
// At this point, the DB only has data up to block 1 // At this point, the DB only has data up to block 1
dbBlock, err := s.historyDB.GetLastBlock() dbBlock, err := s.historyDB.GetLastBlock()
@@ -660,7 +684,7 @@ func TestSyncGeneral(t *testing.T) {
// Sync blocks 2-6 // Sync blocks 2-6
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
syncBlock, discards, err = s.Sync2(ctx, nil) syncBlock, discards, err = s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
require.NotNil(t, syncBlock) require.NotNil(t, syncBlock)
@@ -680,9 +704,9 @@ func TestSyncGeneral(t *testing.T) {
} }
vars = s.SCVars() vars = s.SCVars()
assert.Equal(t, clientSetup.RollupVariables, vars.Rollup) assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup)
assert.Equal(t, clientSetup.AuctionVariables, vars.Auction) assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction)
assert.Equal(t, clientSetup.WDelayerVariables, vars.WDelayer) assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer)
} }
dbBlock, err = s.historyDB.GetLastBlock() dbBlock, err = s.historyDB.GetLastBlock()
@@ -783,7 +807,7 @@ func TestSyncForgerCommitment(t *testing.T) {
// be in sync // be in sync
for { for {
syncBlock, discards, err := s.Sync2(ctx, nil) syncBlock, discards, err := s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {
@@ -802,7 +826,7 @@ func TestSyncForgerCommitment(t *testing.T) {
err = client.CtlAddBlocks([]common.BlockData{block}) err = client.CtlAddBlocks([]common.BlockData{block})
require.NoError(t, err) require.NoError(t, err)
syncBlock, discards, err := s.Sync2(ctx, nil) syncBlock, discards, err := s.Sync(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, discards) require.Nil(t, discards)
if syncBlock == nil { if syncBlock == nil {

View File

@@ -2,6 +2,7 @@ package debugapi
import ( import (
"context" "context"
"net"
"net/http" "net/http"
"time" "time"
@@ -12,6 +13,7 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
func handleNoRoute(c *gin.Context) { func handleNoRoute(c *gin.Context) {
@@ -107,6 +109,8 @@ func (a *DebugAPI) Run(ctx context.Context) error {
api.Use(cors.Default()) api.Use(cors.Default())
debugAPI := api.Group("/debug") debugAPI := api.Group("/debug")
debugAPI.GET("/metrics", gin.WrapH(promhttp.Handler()))
debugAPI.GET("sdb/batchnum", a.handleCurrentBatch) debugAPI.GET("sdb/batchnum", a.handleCurrentBatch)
debugAPI.GET("sdb/mtroot", a.handleMTRoot) debugAPI.GET("sdb/mtroot", a.handleMTRoot)
// Accounts returned by these endpoints will always have BatchNum = 0, // Accounts returned by these endpoints will always have BatchNum = 0,
@@ -118,16 +122,20 @@ func (a *DebugAPI) Run(ctx context.Context) error {
debugAPI.GET("sync/stats", a.handleSyncStats) debugAPI.GET("sync/stats", a.handleSyncStats)
debugAPIServer := &http.Server{ debugAPIServer := &http.Server{
Addr: a.addr,
Handler: api, Handler: api,
// Use some hardcoded numberes that are suitable for testing // Use some hardcoded numbers that are suitable for testing
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", a.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("DebugAPI is ready at %v", a.addr)
go func() { go func() {
log.Infof("DebugAPI is ready at %v", a.addr) if err := debugAPIServer.Serve(listener); err != nil &&
if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed { tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@@ -145,7 +146,7 @@ const longWaitDuration = 999 * time.Hour
// const provingDuration = 2 * time.Second // const provingDuration = 2 * time.Second
func (s *Mock) runProver(ctx context.Context) { func (s *Mock) runProver(ctx context.Context) {
waitDuration := longWaitDuration timer := time.NewTimer(longWaitDuration)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -153,21 +154,27 @@ func (s *Mock) runProver(ctx context.Context) {
case msg := <-s.msgCh: case msg := <-s.msgCh:
switch msg.value { switch msg.value {
case "cancel": case "cancel":
waitDuration = longWaitDuration if !timer.Stop() {
<-timer.C
}
timer.Reset(longWaitDuration)
s.Lock() s.Lock()
if !s.status.IsReady() { if !s.status.IsReady() {
s.status = prover.StatusCodeAborted s.status = prover.StatusCodeAborted
} }
s.Unlock() s.Unlock()
case "prove": case "prove":
waitDuration = s.provingDuration if !timer.Stop() {
<-timer.C
}
timer.Reset(s.provingDuration)
s.Lock() s.Lock()
s.status = prover.StatusCodeBusy s.status = prover.StatusCodeBusy
s.Unlock() s.Unlock()
} }
msg.ackCh <- true msg.ackCh <- true
case <-time.After(waitDuration): case <-timer.C:
waitDuration = longWaitDuration timer.Reset(longWaitDuration)
s.Lock() s.Lock()
if s.status != prover.StatusCodeBusy { if s.status != prover.StatusCodeBusy {
s.Unlock() s.Unlock()
@@ -202,16 +209,20 @@ func (s *Mock) Run(ctx context.Context) error {
apiGroup.POST("/cancel", s.handleCancel) apiGroup.POST("/cancel", s.handleCancel)
debugAPIServer := &http.Server{ debugAPIServer := &http.Server{
Addr: s.addr,
Handler: api, Handler: api,
// Use some hardcoded numberes that are suitable for testing // Use some hardcoded numberes that are suitable for testing
ReadTimeout: 30 * time.Second, //nolint:gomnd ReadTimeout: 30 * time.Second, //nolint:gomnd
WriteTimeout: 30 * time.Second, //nolint:gomnd WriteTimeout: 30 * time.Second, //nolint:gomnd
MaxHeaderBytes: 1 << 20, //nolint:gomnd MaxHeaderBytes: 1 << 20, //nolint:gomnd
} }
listener, err := net.Listen("tcp", s.addr)
if err != nil {
return tracerr.Wrap(err)
}
log.Infof("prover.MockServer is ready at %v", s.addr)
go func() { go func() {
log.Infof("prover.MockServer is ready at %v", s.addr) if err := debugAPIServer.Serve(listener); err != nil &&
if err := debugAPIServer.ListenAndServe(); err != nil && tracerr.Unwrap(err) != http.ErrServerClosed { tracerr.Unwrap(err) != http.ErrServerClosed {
log.Fatalf("Listen: %s\n", err) log.Fatalf("Listen: %s\n", err)
} }
}() }()

View File

@@ -142,10 +142,12 @@ func GenerateTxsZKInputs0(t *testing.T, chainID uint16) (users []til.User, coord
// same values than in the js test // same values than in the js test
users = GenerateJsUsers(t) users = GenerateJsUsers(t)
depositAmount, err := common.Float40(10400).BigInt()
require.Nil(t, err)
l1UserTxs = []common.L1Tx{ l1UserTxs = []common.L1Tx{
{ {
FromIdx: 0, FromIdx: 0,
DepositAmount: big.NewInt(16000000), DepositAmount: depositAmount,
Amount: big.NewInt(0), Amount: big.NewInt(0),
TokenID: 1, TokenID: 1,
FromBJJ: users[0].BJJ.Public().Compress(), FromBJJ: users[0].BJJ.Public().Compress(),

View File

@@ -38,7 +38,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
}) })
} }
hdb := historydb.NewHistoryDB(db, nil) hdb := historydb.NewHistoryDB(db, db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{ assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1, Num: 1,
})) }))
@@ -75,7 +75,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB") dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)
@@ -311,7 +311,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "8737171572459172806192626402462788826264011087579491137542380589998149683116", bb.LocalStateDB().MT.Root().BigInt().String())
h, err := zki.HashGlobalData() h, err := zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "12174727174629825205577542675894290689387326670869871089988393208259924373499", h.String()) assert.Equal(t, "18608843755023673022528019960628191162333429206359207449879743919826610006009", h.String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)
// batch3 // batch3
@@ -334,7 +334,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String()) assert.Equal(t, "18306761925365215381387147754881756804475668085493847010988306480531520370130", bb.LocalStateDB().MT.Root().BigInt().String())
h, err = zki.HashGlobalData() h, err = zki.HashGlobalData()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "16351950370739934361208977436603065280805499094788807090831605833717933916063", h.String()) assert.Equal(t, "6651837443119278772088559395433504719862425648816904171510845286897104469889", h.String())
assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0]) assert.Equal(t, common.EthAddrToBigInt(tc.Users["Coord"].Addr), zki.EthAddr3[0])
assert.Equal(t, "0", zki.EthAddr3[1].String()) assert.Equal(t, "0", zki.EthAddr3[1].String())
sendProofAndCheckResp(t, zki) sendProofAndCheckResp(t, zki)

View File

@@ -31,7 +31,7 @@ func TestMain(m *testing.M) {
os.Exit(exitVal) os.Exit(exitVal)
} }
const MaxTx = 376 const MaxTx = 352
const NLevels = 32 const NLevels = 32
const MaxL1Tx = 256 const MaxL1Tx = 256
const MaxFeeTx = 64 const MaxFeeTx = 64
@@ -61,6 +61,7 @@ func sendProofAndCheckResp(t *testing.T, zki *common.ZKInputs) {
return return
} }
log.Infof("sending proof to %s", proofServerURL)
// Store zkinputs json for debugging purposes // Store zkinputs json for debugging purposes
zkInputsJSON, err := json.Marshal(zki) zkInputsJSON, err := json.Marshal(zki)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -27,16 +27,21 @@ type TxProcessor struct {
// AccumulatedFees contains the accumulated fees for each token (Coord // AccumulatedFees contains the accumulated fees for each token (Coord
// Idx) in the processed batch // Idx) in the processed batch
AccumulatedFees map[common.Idx]*big.Int AccumulatedFees map[common.Idx]*big.Int
// updatedAccounts stores the last version of the account when it has
// been created/updated by any of the processed transactions.
updatedAccounts map[common.Idx]*common.Account
config Config config Config
} }
// Config contains the TxProcessor configuration parameters // Config contains the TxProcessor configuration parameters
type Config struct { type Config struct {
NLevels uint32 NLevels uint32
// MaxFeeTx is the maximum number of coordinator accounts that can receive fees
MaxFeeTx uint32 MaxFeeTx uint32
MaxTx uint32 MaxTx uint32
MaxL1Tx uint32 MaxL1Tx uint32
ChainID uint16 // ChainID of the blockchain
ChainID uint16
} }
type processedExit struct { type processedExit struct {
@@ -53,6 +58,9 @@ type ProcessTxOutput struct {
CreatedAccounts []common.Account CreatedAccounts []common.Account
CoordinatorIdxsMap map[common.TokenID]common.Idx CoordinatorIdxsMap map[common.TokenID]common.Idx
CollectedFees map[common.TokenID]*big.Int CollectedFees map[common.TokenID]*big.Int
// UpdatedAccounts returns the current state of each account
// created/updated by any of the processed transactions.
UpdatedAccounts map[common.Idx]*common.Account
} }
func newErrorNotEnoughBalance(tx common.Tx) error { func newErrorNotEnoughBalance(tx common.Tx) error {
@@ -125,6 +133,10 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx)) return nil, tracerr.Wrap(fmt.Errorf("L1UserTx + L1CoordinatorTx (%d) can not be bigger than MaxL1Tx (%d)", len(l1usertxs)+len(l1coordinatortxs), tp.config.MaxTx))
} }
if tp.s.Type() == statedb.TypeSynchronizer {
tp.updatedAccounts = make(map[common.Idx]*common.Account)
}
exits := make([]processedExit, nTx) exits := make([]processedExit, nTx)
if tp.s.Type() == statedb.TypeBatchBuilder { if tp.s.Type() == statedb.TypeBatchBuilder {
@@ -196,7 +208,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
} }
} }
if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder { if tp.s.Type() == statedb.TypeSynchronizer || tp.s.Type() == statedb.TypeBatchBuilder {
if exitIdx != nil && exitTree != nil { if exitIdx != nil && exitTree != nil && exitAccount != nil {
exits[tp.i] = processedExit{ exits[tp.i] = processedExit{
exit: true, exit: true,
newExit: newExit, newExit: newExit,
@@ -380,7 +392,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr) tp.zki.EthAddr3[iFee] = common.EthAddrToBigInt(accCoord.EthAddr)
} }
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee) accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
pFee, err := tp.s.UpdateAccount(idx, accCoord) pFee, err := tp.updateAccount(idx, accCoord)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
@@ -405,39 +417,39 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
return nil, nil return nil, nil
} }
// once all txs processed (exitTree root frozen), for each Exit,
// generate common.ExitInfo data
var exitInfos []common.ExitInfo
exitInfosByIdx := make(map[common.Idx]*common.ExitInfo)
for i := 0; i < nTx; i++ {
if !exits[i].exit {
continue
}
exitIdx := exits[i].idx
exitAccount := exits[i].acc
// 0. generate MerkleProof
p, err := exitTree.GenerateSCVerifierProof(exitIdx.BigInt(), nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
// 1. generate common.ExitInfo
ei := common.ExitInfo{
AccountIdx: exitIdx,
MerkleProof: p,
Balance: exitAccount.Balance,
}
if prevExit, ok := exitInfosByIdx[exitIdx]; !ok {
exitInfos = append(exitInfos, ei)
exitInfosByIdx[exitIdx] = &exitInfos[len(exitInfos)-1]
} else {
*prevExit = ei
}
}
if tp.s.Type() == statedb.TypeSynchronizer { if tp.s.Type() == statedb.TypeSynchronizer {
// retuTypeexitInfos, createdAccounts and collectedFees, so Synchronizer will // once all txs processed (exitTree root frozen), for each Exit,
// generate common.ExitInfo data
var exitInfos []common.ExitInfo
exitInfosByIdx := make(map[common.Idx]*common.ExitInfo)
for i := 0; i < nTx; i++ {
if !exits[i].exit {
continue
}
exitIdx := exits[i].idx
exitAccount := exits[i].acc
// 0. generate MerkleProof
p, err := exitTree.GenerateSCVerifierProof(exitIdx.BigInt(), nil)
if err != nil {
return nil, tracerr.Wrap(err)
}
// 1. generate common.ExitInfo
ei := common.ExitInfo{
AccountIdx: exitIdx,
MerkleProof: p,
Balance: exitAccount.Balance,
}
if prevExit, ok := exitInfosByIdx[exitIdx]; !ok {
exitInfos = append(exitInfos, ei)
exitInfosByIdx[exitIdx] = &exitInfos[len(exitInfos)-1]
} else {
*prevExit = ei
}
}
// retun exitInfos, createdAccounts and collectedFees, so Synchronizer will
// be able to store it into HistoryDB for the concrete BatchNum // be able to store it into HistoryDB for the concrete BatchNum
return &ProcessTxOutput{ return &ProcessTxOutput{
ZKInputs: nil, ZKInputs: nil,
@@ -445,6 +457,7 @@ func (tp *TxProcessor) ProcessTxs(coordIdxs []common.Idx, l1usertxs, l1coordinat
CreatedAccounts: createdAccounts, CreatedAccounts: createdAccounts,
CoordinatorIdxsMap: coordIdxsMap, CoordinatorIdxsMap: coordIdxsMap,
CollectedFees: collectedFees, CollectedFees: collectedFees,
UpdatedAccounts: tp.updatedAccounts,
}, nil }, nil
} }
@@ -515,6 +528,20 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx if tp.i < len(tp.zki.ISOnChain) { // len(tp.zki.ISOnChain) == nTx
tp.zki.ISOnChain[tp.i] = big.NewInt(1) tp.zki.ISOnChain[tp.i] = big.NewInt(1)
} }
if tx.Type == common.TxTypeForceTransfer ||
tx.Type == common.TxTypeDepositTransfer ||
tx.Type == common.TxTypeCreateAccountDepositTransfer ||
tx.Type == common.TxTypeForceExit {
// in the cases where at L1Tx there is usage of the
// Amount parameter, add it at the ZKInputs.AmountF
// slot
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, nil, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
}
} }
switch tx.Type { switch tx.Type {
@@ -578,7 +605,7 @@ func (tp *TxProcessor) ProcessL1Tx(exitTree *merkletree.MerkleTree, tx *common.L
// execute exit flow // execute exit flow
// coordIdxsMap is 'nil', as at L1Txs there is no L2 fees // coordIdxsMap is 'nil', as at L1Txs there is no L2 fees
exitAccount, newExit, err := tp.applyExit(nil, nil, exitTree, tx.Tx()) exitAccount, newExit, err := tp.applyExit(nil, nil, exitTree, tx.Tx(), tx.Amount)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, nil, false, nil, tracerr.Wrap(err) return nil, nil, false, nil, tracerr.Wrap(err)
@@ -657,6 +684,11 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr) tp.zki.ToEthAddr[tp.i] = common.EthAddrToBigInt(tx.ToEthAddr)
tp.zki.OnChain[tp.i] = big.NewInt(0) tp.zki.OnChain[tp.i] = big.NewInt(0)
amountF40, err := common.NewFloat40(tx.Amount)
if err != nil {
return nil, nil, false, tracerr.Wrap(err)
}
tp.zki.AmountF[tp.i] = big.NewInt(int64(amountF40))
tp.zki.NewAccount[tp.i] = big.NewInt(0) tp.zki.NewAccount[tp.i] = big.NewInt(0)
// L2Txs // L2Txs
@@ -698,7 +730,7 @@ func (tp *TxProcessor) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx,
} }
case common.TxTypeExit: case common.TxTypeExit:
// execute exit flow // execute exit flow
exitAccount, newExit, err := tp.applyExit(coordIdxsMap, collectedFees, exitTree, tx.Tx()) exitAccount, newExit, err := tp.applyExit(coordIdxsMap, collectedFees, exitTree, tx.Tx(), tx.Amount)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, nil, false, tracerr.Wrap(err) return nil, nil, false, tracerr.Wrap(err)
@@ -720,7 +752,7 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
EthAddr: tx.FromEthAddr, EthAddr: tx.FromEthAddr,
} }
p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), account) p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), account)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -755,6 +787,28 @@ func (tp *TxProcessor) applyCreateAccount(tx *common.L1Tx) error {
return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1) return tp.s.SetCurrentIdx(tp.s.CurrentIdx() + 1)
} }
// createAccount is a wrapper over the StateDB.CreateAccount method that also
// stores the created account in the updatedAccounts map in case the StateDB is
// of TypeSynchronizer
func (tp *TxProcessor) createAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
if tp.s.Type() == statedb.TypeSynchronizer {
account.Idx = idx
tp.updatedAccounts[idx] = account
}
return tp.s.CreateAccount(idx, account)
}
// updateAccount is a wrapper over the StateDB.UpdateAccount method that also
// stores the updated account in the updatedAccounts map in case the StateDB is
// of TypeSynchronizer
func (tp *TxProcessor) updateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
if tp.s.Type() == statedb.TypeSynchronizer {
account.Idx = idx
tp.updatedAccounts[idx] = account
}
return tp.s.UpdateAccount(idx, account)
}
// applyDeposit updates the balance in the account of the depositer, if // applyDeposit updates the balance in the account of the depositer, if
// andTransfer parameter is set to true, the method will also apply the // andTransfer parameter is set to true, the method will also apply the
// Transfer of the L1Tx/DepositTransfer // Transfer of the L1Tx/DepositTransfer
@@ -785,7 +839,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
} }
// update sender account in localStateDB // update sender account in localStateDB
p, err := tp.s.UpdateAccount(tx.FromIdx, accSender) p, err := tp.updateAccount(tx.FromIdx, accSender)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -822,7 +876,7 @@ func (tp *TxProcessor) applyDeposit(tx *common.L1Tx, transfer bool) error {
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
// update receiver account in localStateDB // update receiver account in localStateDB
p, err := tp.s.UpdateAccount(tx.ToIdx, accReceiver) p, err := tp.updateAccount(tx.ToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -905,7 +959,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
} }
// update sender account in localStateDB // update sender account in localStateDB
pSender, err := tp.s.UpdateAccount(tx.FromIdx, accSender) pSender, err := tp.updateAccount(tx.FromIdx, accSender)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return tracerr.Wrap(err) return tracerr.Wrap(err)
@@ -944,7 +998,7 @@ func (tp *TxProcessor) applyTransfer(coordIdxsMap map[common.TokenID]common.Idx,
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.Amount)
// update receiver account in localStateDB // update receiver account in localStateDB
pReceiver, err := tp.s.UpdateAccount(auxToIdx, accReceiver) pReceiver, err := tp.updateAccount(auxToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -987,7 +1041,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
} }
// create Account of the Sender // create Account of the Sender
p, err := tp.s.CreateAccount(common.Idx(tp.s.CurrentIdx()+1), accSender) p, err := tp.createAccount(common.Idx(tp.s.CurrentIdx()+1), accSender)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1035,7 +1089,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount) accReceiver.Balance = new(big.Int).Add(accReceiver.Balance, tx.EffectiveAmount)
// update receiver account in localStateDB // update receiver account in localStateDB
p, err = tp.s.UpdateAccount(tx.ToIdx, accReceiver) p, err = tp.updateAccount(tx.ToIdx, accReceiver)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
@@ -1050,7 +1104,7 @@ func (tp *TxProcessor) applyCreateAccountDepositTransfer(tx *common.L1Tx) error
// new Leaf in the ExitTree. // new Leaf in the ExitTree.
func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx, func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree, collectedFees map[common.TokenID]*big.Int, exitTree *merkletree.MerkleTree,
tx common.Tx) (*common.Account, bool, error) { tx common.Tx, originalAmount *big.Int) (*common.Account, bool, error) {
// 0. subtract tx.Amount from current Account in StateMT // 0. subtract tx.Amount from current Account in StateMT
// add the tx.Amount into the Account (tx.FromIdx) in the ExitMT // add the tx.Amount into the Account (tx.FromIdx) in the ExitMT
acc, err := tp.s.GetAccount(tx.FromIdx) acc, err := tp.s.GetAccount(tx.FromIdx)
@@ -1109,7 +1163,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} }
} }
p, err := tp.s.UpdateAccount(tx.FromIdx, acc) p, err := tp.updateAccount(tx.FromIdx, acc)
if err != nil { if err != nil {
return nil, false, tracerr.Wrap(err) return nil, false, tracerr.Wrap(err)
} }
@@ -1120,6 +1174,21 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
if exitTree == nil { if exitTree == nil {
return nil, false, nil return nil, false, nil
} }
// Do not add the Exit when Amount=0, not EffectiveAmount=0. In
// txprocessor.applyExit function, the tx.Amount is in reality the
// EffectiveAmount, that's why is used here the originalAmount
// parameter, which contains the real value of the tx.Amount (not
// tx.EffectiveAmount). This is a particularity of the approach of the
// circuit, the idea will be in the future to update the circuit and
// when Amount>0 but EffectiveAmount=0, to not add the Exit in the
// Exits MerkleTree, but for the moment the Go code is adapted to the
// circuit.
if originalAmount.Cmp(big.NewInt(0)) == 0 { // Amount == 0
// if the Exit Amount==0, the Exit is not added to the ExitTree
return nil, false, nil
}
exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx) exitAccount, err := statedb.GetAccountInTreeDB(exitTree.DB(), tx.FromIdx)
if tracerr.Unwrap(err) == db.ErrNotFound { if tracerr.Unwrap(err) == db.ErrNotFound {
// 1a. if idx does not exist in exitTree: // 1a. if idx does not exist in exitTree:
@@ -1128,6 +1197,8 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
exitAccount := &common.Account{ exitAccount := &common.Account{
TokenID: acc.TokenID, TokenID: acc.TokenID,
Nonce: common.Nonce(0), Nonce: common.Nonce(0),
// as is a common.Tx, the tx.Amount is already an
// EffectiveAmount
Balance: tx.Amount, Balance: tx.Amount,
BJJ: acc.BJJ, BJJ: acc.BJJ,
EthAddr: acc.EthAddr, EthAddr: acc.EthAddr,
@@ -1141,7 +1212,9 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Sign2[tp.i] = big.NewInt(1) tp.zki.Sign2[tp.i] = big.NewInt(1)
} }
tp.zki.Ay2[tp.i] = accBJJY tp.zki.Ay2[tp.i] = accBJJY
tp.zki.Balance2[tp.i] = tx.Amount // Balance2 contains the ExitLeaf Balance before the
// leaf update, which is 0
tp.zki.Balance2[tp.i] = big.NewInt(0)
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr) tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
// as Leaf didn't exist in the ExitTree, set NewExit[i]=1 // as Leaf didn't exist in the ExitTree, set NewExit[i]=1
tp.zki.NewExit[tp.i] = big.NewInt(1) tp.zki.NewExit[tp.i] = big.NewInt(1)
@@ -1175,7 +1248,9 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
tp.zki.Sign2[tp.i] = big.NewInt(1) tp.zki.Sign2[tp.i] = big.NewInt(1)
} }
tp.zki.Ay2[tp.i] = accBJJY tp.zki.Ay2[tp.i] = accBJJY
tp.zki.Balance2[tp.i] = tx.Amount // Balance2 contains the ExitLeaf Balance before the leaf
// update
tp.zki.Balance2[tp.i] = exitAccount.Balance
tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr) tp.zki.EthAddr2[tp.i] = common.EthAddrToBigInt(acc.EthAddr)
} }
@@ -1193,6 +1268,7 @@ func (tp *TxProcessor) applyExit(coordIdxsMap map[common.TokenID]common.Idx,
} }
tp.zki.OldKey2[tp.i] = p.OldKey.BigInt() tp.zki.OldKey2[tp.i] = p.OldKey.BigInt()
tp.zki.OldValue2[tp.i] = p.OldValue.BigInt() tp.zki.OldValue2[tp.i] = p.OldValue.BigInt()
tp.zki.ISExitRoot[tp.i] = exitTree.Root().BigInt()
} }
return exitAccount, false, nil return exitAccount, false, nil

View File

@@ -4,6 +4,7 @@ import (
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"os" "os"
"sort"
"testing" "testing"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
@@ -794,7 +795,8 @@ func TestMultipleCoordIdxForTokenID(t *testing.T) {
checkBalanceByIdx(t, tp.s, 259, "0") // Coord0 checkBalanceByIdx(t, tp.s, 259, "0") // Coord0
} }
func TestTwoExits(t *testing.T) { func testTwoExits(t *testing.T, stateDBType statedb.TypeStateDB) ([]*ProcessTxOutput,
[]*ProcessTxOutput, []*ProcessTxOutput) {
// In the first part we generate a batch with two force exits for the // In the first part we generate a batch with two force exits for the
// same account of 20 each. The txprocessor output should be a single // same account of 20 each. The txprocessor output should be a single
// exitInfo with balance of 40. // exitInfo with balance of 40.
@@ -802,8 +804,9 @@ func TestTwoExits(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir)) defer assert.NoError(t, os.RemoveAll(dir))
nLevels := 16
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32}) Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err) assert.NoError(t, err)
chainID := uint16(1) chainID := uint16(1)
@@ -839,10 +842,10 @@ func TestTwoExits(t *testing.T) {
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs)) require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
config := Config{ config := Config{
NLevels: 32, NLevels: uint32(nLevels),
MaxFeeTx: 64, MaxTx: 3,
MaxTx: 512, MaxL1Tx: 2,
MaxL1Tx: 16, MaxFeeTx: 2,
ChainID: chainID, ChainID: chainID,
} }
tp := NewTxProcessor(sdb, config) tp := NewTxProcessor(sdb, config)
@@ -855,8 +858,6 @@ func TestTwoExits(t *testing.T) {
} }
} }
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
acc, err := sdb.GetAccount(256) acc, err := sdb.GetAccount(256)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, big.NewInt(60), acc.Balance) assert.Equal(t, big.NewInt(60), acc.Balance)
@@ -871,7 +872,7 @@ func TestTwoExits(t *testing.T) {
defer assert.NoError(t, os.RemoveAll(dir2)) defer assert.NoError(t, os.RemoveAll(dir2))
sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128, sdb2, err := statedb.NewStateDB(statedb.Config{Path: dir2, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32}) Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err) assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx) tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
@@ -909,5 +910,261 @@ func TestTwoExits(t *testing.T) {
} }
} }
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof) // In the third part we start a fresh statedb and generate a batch with
// two force exit for the same account as before but where the 1st Exit
// is with all the amount, and the 2nd Exit is with more amount than
// the available balance. The txprocessor output should be a single
// exitInfo with balance of 40, and the exit merkle tree proof should
// be equal to the previous ones.
dir3, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir3))
sdb3, err := statedb.NewStateDB(statedb.Config{Path: dir3, Keep: 128,
Type: stateDBType, NLevels: nLevels})
assert.NoError(t, err)
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
// Single exit with balance of both exits in previous set. The exit
// root should match.
set3 := `
Type: Blockchain
CreateAccountDeposit(0) A: 100
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
ForceExit(0) A: 40
ForceExit(0) A: 100
> batchL1 // freeze L1User{2}
> batchL1 // forge L1User{2}
> block
`
blocks, err = tc.GenerateBlocks(set3)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
tp = NewTxProcessor(sdb3, config)
ptOuts3 := []*ProcessTxOutput{}
for _, block := range blocks {
for _, batch := range block.Rollup.Batches {
ptOut, err := tp.ProcessTxs(nil, batch.L1UserTxs, nil, nil)
require.NoError(t, err)
ptOuts3 = append(ptOuts3, ptOut)
}
}
return ptOuts, ptOuts2, ptOuts3
}
func TestTwoExitsSynchronizer(t *testing.T) {
ptOuts, ptOuts2, ptOuts3 := testTwoExits(t, statedb.TypeSynchronizer)
assert.Equal(t, 1, len(ptOuts[3].ExitInfos))
assert.Equal(t, big.NewInt(40), ptOuts[3].ExitInfos[0].Balance)
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts2[3].ExitInfos[0].MerkleProof)
assert.Equal(t, ptOuts[3].ExitInfos[0].MerkleProof, ptOuts3[3].ExitInfos[0].MerkleProof)
}
func TestExitOf0Amount(t *testing.T) {
// Test to check that when doing an Exit with amount 0 the Exit Root
// does not change (as there is no new Exit Leaf created)
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeBatchBuilder, NLevels: 32})
assert.NoError(t, err)
chainID := uint16(1)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
set := `
Type: Blockchain
CreateAccountDeposit(0) A: 100
CreateAccountDeposit(0) B: 100
> batchL1 // batch1: freeze L1User{2}
> batchL1 // batch2: forge L1User{2}
ForceExit(0) A: 10
ForceExit(0) B: 0
> batchL1 // batch3: freeze L1User{2}
> batchL1 // batch4: forge L1User{2}
ForceExit(0) A: 10
> batchL1 // batch5: freeze L1User{1}
> batchL1 // batch6: forge L1User{1}
ForceExit(0) A: 0
> batchL1 // batch7: freeze L1User{1}
> batchL1 // batch8: forge L1User{1}
> block
`
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &til.ConfigExtra{})
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
// Sanity check
require.Equal(t, 2, len(blocks[0].Rollup.Batches[1].L1UserTxs))
require.Equal(t, 2, len(blocks[0].Rollup.Batches[3].L1UserTxs))
require.Equal(t, big.NewInt(10), blocks[0].Rollup.Batches[3].L1UserTxs[0].Amount)
require.Equal(t, big.NewInt(0), blocks[0].Rollup.Batches[3].L1UserTxs[1].Amount)
config := Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
ChainID: chainID,
}
tp := NewTxProcessor(sdb, config)
// For this test are only processed the batches with transactions:
// - Batch2, equivalent to Batches[1]
// - Batch4, equivalent to Batches[3]
// - Batch6, equivalent to Batches[5]
// - Batch8, equivalent to Batches[7]
// process Batch2:
_, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[1].L1UserTxs, nil, nil)
require.NoError(t, err)
// process Batch4:
ptOut, err := tp.ProcessTxs(nil, blocks[0].Rollup.Batches[3].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
exitRootBatch4 := ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String()
// process Batch6:
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[5].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "14329759303391468223438874789317921522067594445474390443816827472846339238908", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
// Expect that the ExitRoot for the Batch6 will be equal than for the
// Batch4, as the Batch4 & Batch6 have the same tx with Exit Amount=10,
// and Batch4 has a 2nd tx with Exit Amount=0.
assert.Equal(t, exitRootBatch4, ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
// For the Batch8, as there is only 1 exit with Amount=0, the ExitRoot
// should be 0.
// process Batch8:
ptOut, err = tp.ProcessTxs(nil, blocks[0].Rollup.Batches[7].L1UserTxs, nil, nil)
require.NoError(t, err)
assert.Equal(t, "0", ptOut.ZKInputs.Metadata.NewExitRootRaw.BigInt().String())
}
func TestUpdatedAccounts(t *testing.T) {
dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err)
defer assert.NoError(t, os.RemoveAll(dir))
sdb, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128,
Type: statedb.TypeSynchronizer, NLevels: 32})
assert.NoError(t, err)
set := `
Type: Blockchain
AddToken(1)
CreateAccountCoordinator(0) Coord // 256
CreateAccountCoordinator(1) Coord // 257
> batch // 1
CreateAccountDeposit(0) A: 50 // 258
CreateAccountDeposit(0) B: 60 // 259
CreateAccountDeposit(1) A: 70 // 260
CreateAccountDeposit(1) B: 80 // 261
> batchL1 // 2
> batchL1 // 3
Transfer(0) A-B: 5 (126)
> batch // 4
Exit(1) B: 5 (126)
> batch // 5
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "Coord",
}
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
tc.FillBlocksL1UserTxsBatchNum(blocks)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
require.Equal(t, 5, len(blocks[0].Rollup.Batches))
config := Config{
NLevels: 32,
MaxFeeTx: 64,
MaxTx: 512,
MaxL1Tx: 16,
ChainID: chainID,
}
tp := NewTxProcessor(sdb, config)
sortedKeys := func(m map[common.Idx]*common.Account) []int {
keys := make([]int, 0)
for k := range m {
keys = append(keys, int(k))
}
sort.Ints(keys)
return keys
}
for _, batch := range blocks[0].Rollup.Batches {
l2Txs := common.L2TxsToPoolL2Txs(batch.L2Txs)
ptOut, err := tp.ProcessTxs(batch.Batch.FeeIdxsCoordinator, batch.L1UserTxs,
batch.L1CoordinatorTxs, l2Txs)
require.NoError(t, err)
switch batch.Batch.BatchNum {
case 1:
assert.Equal(t, 2, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{256, 257}, sortedKeys(ptOut.UpdatedAccounts))
case 2:
assert.Equal(t, 0, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{}, sortedKeys(ptOut.UpdatedAccounts))
case 3:
assert.Equal(t, 4, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{258, 259, 260, 261}, sortedKeys(ptOut.UpdatedAccounts))
case 4:
assert.Equal(t, 2+1, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{256, 258, 259}, sortedKeys(ptOut.UpdatedAccounts))
case 5:
assert.Equal(t, 1+1, len(ptOut.UpdatedAccounts))
assert.Equal(t, []int{257, 261}, sortedKeys(ptOut.UpdatedAccounts))
}
for idx, updAcc := range ptOut.UpdatedAccounts {
acc, err := sdb.GetAccount(idx)
require.NoError(t, err)
// If acc.Balance is 0, set it to 0 with big.NewInt so
// that the comparison succeeds. Without this, the
// comparison will not succeed because acc.Balance is
// set from a slice, and thus the internal big.Int
// buffer is not nil (big.Int.abs)
if acc.Balance.BitLen() == 0 {
acc.Balance = big.NewInt(0)
}
assert.Equal(t, acc, updAcc)
}
}
} }

File diff suppressed because one or more lines are too long

53
txselector/metrics.go Normal file
View File

@@ -0,0 +1,53 @@
package txselector
import "github.com/prometheus/client_golang/prometheus"
var (
metricGetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l2_txselecton_total",
Help: "",
},
)
metricGetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l1_l2_txselecton_total",
Help: "",
},
)
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_coordinator_txs",
Help: "",
},
)
metricSelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_user_txs",
Help: "",
},
)
metricSelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l2_txs",
Help: "",
},
)
metricDiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_discarded_l2_txs",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricGetL2TxSelection)
prometheus.MustRegister(metricGetL1L2TxSelection)
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
prometheus.MustRegister(metricSelectedL1UserTxs)
prometheus.MustRegister(metricSelectedL2Txs)
prometheus.MustRegister(metricDiscardedL2Txs)
}

View File

@@ -3,7 +3,6 @@ package txselector
// current: very simple version of TxSelector // current: very simple version of TxSelector
import ( import (
"bytes"
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
@@ -19,19 +18,6 @@ import (
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )
// txs implements the interface Sort for an array of Tx
type txs []common.PoolL2Tx
func (t txs) Len() int {
return len(t)
}
func (t txs) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t txs) Less(i, j int) bool {
return t[i].AbsoluteFee > t[j].AbsoluteFee
}
// CoordAccount contains the data of the Coordinator account, that will be used // CoordAccount contains the data of the Coordinator account, that will be used
// to create new transactions of CreateAccountDeposit type to add new TokenID // to create new transactions of CreateAccountDeposit type to add new TokenID
// accounts for the Coordinator to receive the fees. // accounts for the Coordinator to receive the fees.
@@ -89,12 +75,8 @@ func (txsel *TxSelector) LocalAccountsDB() *statedb.LocalStateDB {
// Reset tells the TxSelector to get it's internal AccountsDB // Reset tells the TxSelector to get it's internal AccountsDB
// from the required `batchNum` // from the required `batchNum`
func (txsel *TxSelector) Reset(batchNum common.BatchNum) error { func (txsel *TxSelector) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
err := txsel.localAccountsDB.Reset(batchNum, true) return tracerr.Wrap(txsel.localAccountsDB.Reset(batchNum, fromSynchronizer))
if err != nil {
return tracerr.Wrap(err)
}
return nil
} }
func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) { func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error) {
@@ -151,9 +133,11 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]common.Idx, func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]common.Idx,
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, discardedL2Txs, err := metricGetL2TxSelection.Inc()
txsel.GetL1L2TxSelection(selectionConfig, []common.L1Tx{}) coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, discardedL2Txs, tracerr.Wrap(err) discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{})
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err)
} }
// GetL1L2TxSelection returns the selection of L1 + L2 txs. // GetL1L2TxSelection returns the selection of L1 + L2 txs.
@@ -165,6 +149,16 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig *SelectionConfig) ([]c
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig, func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL1L2TxSelection.Inc()
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err)
}
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig *SelectionConfig,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
// WIP.0: the TxSelector is not optimized and will need a redesign. The // WIP.0: the TxSelector is not optimized and will need a redesign. The
@@ -195,14 +189,16 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
} }
// discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch // discardedL2Txs contains an array of the L2Txs that have not been selected in this Batch
var discardedL2Txs []common.PoolL2Tx
var l1CoordinatorTxs []common.L1Tx var l1CoordinatorTxs []common.L1Tx
positionL1 := len(l1UserTxs) positionL1 := len(l1UserTxs)
var accAuths [][]byte var accAuths [][]byte
// sort l2TxsRaw (cropping at MaxTx at this point) // sort l2TxsRaw (cropping at MaxTx at this point)
l2Txs0 := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx) l2Txs0, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.TxProcessorConfig.MaxTx)
for i := range discardedL2Txs {
discardedL2Txs[i].Info = "Tx not selected due to low absolute fee"
}
noncesMap := make(map[common.Idx]common.Nonce) noncesMap := make(map[common.Idx]common.Nonce)
var l2Txs []common.PoolL2Tx var l2Txs []common.PoolL2Tx
@@ -236,7 +232,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-len(l1UserTxs) {
// discard L2Tx, and update Info parameter of // discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs0[i].Info = "Tx not selected due the L2Tx depends on a L1CoordinatorTx and there is not enough space for L1Coordinator" l2Txs0[i].Info = "Tx not selected because the L2Tx depends on a " +
"L1CoordinatorTx and there is not enough space for L1Coordinator"
discardedL2Txs = append(discardedL2Txs, l2Txs0[i]) discardedL2Txs = append(discardedL2Txs, l2Txs0[i])
continue continue
} }
@@ -261,7 +258,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Amount with current Balance. Discard L2Tx, // not valid Amount with current Balance. Discard L2Tx,
// and update Info parameter of the tx, and add it to // and update Info parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not enough Balance at the sender. Current sender account Balance: %s, Amount+Fee: %s", balance.String(), feeAndAmount.String()) l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not enough Balance at the sender. "+
"Current sender account Balance: %s, Amount+Fee: %s",
balance.String(), feeAndAmount.String())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -273,7 +272,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// not valid Nonce at tx. Discard L2Tx, and update Info // not valid Nonce at tx. Discard L2Tx, and update Info
// parameter of the tx, and add it to the discardedTxs // parameter of the tx, and add it to the discardedTxs
// array // array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due not current Nonce. Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce) l2Txs[i].Info = fmt.Sprintf("Tx not selected due to not current Nonce. "+
"Tx.Nonce: %d, Account.Nonce: %d", l2Txs[i].Nonce, nonce)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -291,17 +291,30 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i]) len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i])
if err != nil { if err != nil {
log.Debug(err) log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due %s", err.Error()) l2Txs[i].Info = fmt.Sprintf("Tx not selected (in processTxToEthAddrBJJ) due to %s",
err.Error())
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
if accAuth != nil && l1CoordinatorTx != nil { if l1CoordinatorTx != nil {
accAuths = append(accAuths, accAuth.Signature) // If ToEthAddr == 0xff.. this means that we
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx) // are handling a TransferToBJJ, which doesn't
positionL1++ // require an authorization because it doesn't
// contain a valid ethereum address.
// Otherwise only create the account if we have
// the corresponding authorization
if validL2Tx.ToEthAddr == common.FFAddr {
accAuths = append(accAuths, common.EmptyEthSignature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
} else if accAuth != nil {
accAuths = append(accAuths, accAuth.Signature)
l1CoordinatorTxs = append(l1CoordinatorTxs, *l1CoordinatorTx)
positionL1++
}
} }
if validL2Tx != nil { if validL2Tx != nil {
validTxs = append(validTxs, *validL2Tx) validTxs = append(validTxs, *validL2Tx)
@@ -314,8 +327,8 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
"ToIdx", l2Txs[i].ToIdx) "ToIdx", l2Txs[i].ToIdx)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToIdx not found in StateDB. ToIdx: %d", l2Txs[i].Info = fmt.Sprintf("Tx not selected due to tx.ToIdx not found in StateDB. "+
l2Txs[i].ToIdx) "ToIdx: %d", l2Txs[i].ToIdx)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
} }
@@ -327,7 +340,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due ToEthAddr does not correspond to the Account.EthAddr. tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s", l2Txs[i].Info = fmt.Sprintf("Tx not selected because ToEthAddr "+
"does not correspond to the Account.EthAddr. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, account.EthAddr: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, receiverAcc.EthAddr)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -341,7 +356,9 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
// Discard L2Tx, and update Info // Discard L2Tx, and update Info
// parameter of the tx, and add it to // parameter of the tx, and add it to
// the discardedTxs array // the discardedTxs array
l2Txs[i].Info = fmt.Sprintf("Tx not selected due tx.ToBJJ does not correspond to the Account.BJJ. tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s", l2Txs[i].Info = fmt.Sprintf("Tx not selected because tx.ToBJJ "+
"does not correspond to the Account.BJJ. "+
"tx.ToIdx: %d, tx.ToEthAddr: %s, tx.ToBJJ: %s, account.BJJ: %s",
l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ) l2Txs[i].ToIdx, l2Txs[i].ToEthAddr, l2Txs[i].ToBJJ, receiverAcc.BJJ)
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) discardedL2Txs = append(discardedL2Txs, l2Txs[i])
continue continue
@@ -415,7 +432,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
log.Error(err) log.Error(err)
// Discard L2Tx, and update Info parameter of the tx, // Discard L2Tx, and update Info parameter of the tx,
// and add it to the discardedTxs array // and add it to the discardedTxs array
selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due %s", err.Error()) selectedL2Txs[i].Info = fmt.Sprintf("Tx not selected (in ProcessL2Tx) due to %s", err.Error())
discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i]) discardedL2Txs = append(discardedL2Txs, selectedL2Txs[i])
continue continue
} }
@@ -447,6 +464,11 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig *SelectionConfig,
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
} }
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL2Txs.Set(float64(len(finalL2Txs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, finalL2Txs, discardedL2Txs, nil
} }
@@ -471,8 +493,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
var l1CoordinatorTx *common.L1Tx var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth var accAuth *common.AccountCreationAuth
if !bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.EmptyAddr.Bytes()) && if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
!bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) {
// case: ToEthAddr != 0x00 neither 0xff // case: ToEthAddr != 0x00 neither 0xff
if l2Tx.ToBJJ != common.EmptyBJJComp { if l2Tx.ToBJJ != common.EmptyBJJComp {
// case: ToBJJ!=0: // case: ToBJJ!=0:
@@ -528,8 +549,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
DepositAmount: big.NewInt(0), DepositAmount: big.NewInt(0),
Type: common.TxTypeCreateAccountDeposit, Type: common.TxTypeCreateAccountDeposit,
} }
} else if bytes.Equal(l2Tx.ToEthAddr.Bytes(), common.FFAddr.Bytes()) && } else if l2Tx.ToEthAddr == common.FFAddr && l2Tx.ToBJJ != common.EmptyBJJComp {
l2Tx.ToBJJ != common.EmptyBJJComp {
// if idx exist for EthAddr&BJJ use it // if idx exist for EthAddr&BJJ use it
_, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ, _, err := txsel.localAccountsDB.GetIdxByEthAddrBJJ(l2Tx.ToEthAddr, l2Tx.ToBJJ,
l2Tx.TokenID) l2Tx.TokenID)
@@ -555,7 +575,8 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
} }
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs { if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1UserTxs)-nL1UserTxs {
// L2Tx discarded // L2Tx discarded
return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due not slots for L1CoordinatorTx to create a new account for receiver of L2Tx")) return nil, nil, nil, tracerr.Wrap(fmt.Errorf("L2Tx discarded due to no available slots " +
"for L1CoordinatorTx to create a new account for receiver of L2Tx"))
} }
return &l2Tx, l1CoordinatorTx, accAuth, nil return &l2Tx, l1CoordinatorTx, accAuth, nil
@@ -564,7 +585,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ { for i := 0; i < len(l1CoordinatorTxs); i++ {
if bytes.Equal(l1CoordinatorTxs[i].FromEthAddr.Bytes(), addr.Bytes()) && if l1CoordinatorTxs[i].FromEthAddr == addr &&
l1CoordinatorTxs[i].TokenID == tokenID && l1CoordinatorTxs[i].TokenID == tokenID &&
l1CoordinatorTxs[i].FromBJJ == bjj { l1CoordinatorTxs[i].FromBJJ == bjj {
return true return true
@@ -574,21 +595,33 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
} }
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce // getL2Profitable returns the profitable selection of L2Txssorted by Nonce
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) []common.PoolL2Tx { func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx,
sort.Sort(txs(l2Txs)) []common.PoolL2Tx) {
if len(l2Txs) < int(max) { // First sort by nonce so that txs from the same account are sorted so
return l2Txs // that they could be applied in succession.
sort.Slice(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce
})
// Sort by absolute fee with SliceStable, so that txs with same
// AbsoluteFee are not rearranged and nonce order is kept in such case
sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
})
discardedL2Txs := []common.PoolL2Tx{}
if len(l2Txs) > int(max) {
discardedL2Txs = l2Txs[max:]
l2Txs = l2Txs[:max]
} }
l2Txs = l2Txs[:max]
// sort l2Txs by Nonce. This can be done in many different ways, what // sort l2Txs by Nonce. This can be done in many different ways, what
// is needed is to output the l2Txs where the Nonce of l2Txs for each // is needed is to output the l2Txs where the Nonce of l2Txs for each
// Account is sorted, but the l2Txs can not be grouped by sender Account // Account is sorted, but the l2Txs can not be grouped by sender Account
// neither by Fee. This is because later on the Nonces will need to be // neither by Fee. This is because later on the Nonces will need to be
// sequential for the zkproof generation. // sequential for the zkproof generation.
sort.SliceStable(l2Txs, func(i, j int) bool { sort.Slice(l2Txs, func(i, j int) bool {
return l2Txs[i].Nonce < l2Txs[j].Nonce return l2Txs[i].Nonce < l2Txs[j].Nonce
}) })
return l2Txs return l2Txs, discardedL2Txs
} }

View File

@@ -29,7 +29,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, 10, 100, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
@@ -48,7 +48,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
AccountCreationAuth: nil, AccountCreationAuth: nil,
} }
fmt.Printf("%v", coordAccount) // fmt.Printf("%v\n", coordAccount)
auth := common.AccountCreationAuth{ auth := common.AccountCreationAuth{
EthAddr: coordUser.Addr, EthAddr: coordUser.Addr,
BJJ: coordUser.BJJ.Public().Compress(), BJJ: coordUser.BJJ.Public().Compress(),
@@ -106,7 +106,7 @@ func addTokens(t *testing.T, tc *til.Context, db *sqlx.DB) {
}) })
} }
hdb := historydb.NewHistoryDB(db, nil) hdb := historydb.NewHistoryDB(db, db, nil)
assert.NoError(t, hdb.AddBlock(&common.Block{ assert.NoError(t, hdb.AddBlock(&common.Block{
Num: 1, Num: 1,
})) }))
@@ -497,3 +497,215 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch()) err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err) require.NoError(t, err)
} }
func TestTransferToBjj(t *testing.T) {
set := `
Type: Blockchain
AddToken(1)
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
CreateAccountDeposit(1) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
addTokens(t, tc, txsel.l2db.DB())
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 20,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to freeze L1UserTxs that will create some accounts with
// positive balance
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx.
batchPoolL2 := `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
require.Equal(t, 1, len(oL1CoordTxs))
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[0].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[0].FromBJJ)
// fmt.Printf("DBG l1CoordTx[0]: %+v\n", oL1CoordTxs[0])
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// Now the BJJ-only account for B is already created, so the transfer
// happens without an L1CoordTx that creates the user account.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(0) A-B: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// The transfer now is ToBJJ to a BJJ-only account that doesn't exist
// and the coordinator will create it via L1CoordTx. Since it's a
// transfer of a token for which the coordinator doesn't have a fee
// account, another L1CoordTx will be created for the coordinator to
// receive the fees.
batchPoolL2 = `
Type: PoolL2
PoolTransferToBJJ(1) B-A: 50 (126)
`
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account
// to receive the fees by the coordinator and another one for the
// recipient of the l2tx
assert.Equal(t, 2, len(oL1CoordTxs))
// [0] Coordinator account cration for token 1
assert.Equal(t, tc.Users["Coord"].Addr, oL1CoordTxs[0].FromEthAddr)
// [1] User A BJJ-only account creation for token 1
assert.Equal(t, poolL2Txs[0].ToEthAddr, oL1CoordTxs[1].FromEthAddr)
assert.Equal(t, poolL2Txs[0].ToBJJ, oL1CoordTxs[1].FromBJJ)
assert.Equal(t, common.TokenID(1), oL1CoordTxs[1].TokenID)
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}
func TestTransferManyFromSameAccount(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 1000
CreateAccountDeposit(0) B: 1000
> batchL1 // freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
selectionConfig := &SelectionConfig{
MaxL1UserTxs: 5,
TxProcessorConfig: tpc,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
// 8 transfers from the same account
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) A-B: 10 (126) // 1
PoolTransfer(0) A-B: 10 (126) // 2
PoolTransfer(0) A-B: 10 (126) // 3
PoolTransfer(0) A-B: 10 (126) // 4
PoolTransfer(0) A-B: 10 (126) // 5
PoolTransfer(0) A-B: 10 (126) // 6
PoolTransfer(0) A-B: 10 (126) // 7
PoolTransfer(0) A-B: 10 (126) // 8
PoolTransfer(0) A-B: 10 (126) // 9
PoolTransfer(0) A-B: 10 (126) // 10
PoolTransfer(0) A-B: 10 (126) // 11
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 11, len(poolL2Txs))
// reorder poolL2Txs so that nonces are not sorted
poolL2Txs[0], poolL2Txs[7] = poolL2Txs[7], poolL2Txs[0]
poolL2Txs[1], poolL2Txs[10] = poolL2Txs[10], poolL2Txs[1]
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := txsel.GetL1L2TxSelection(selectionConfig, l1UserTxs)
require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 7, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}