Compare commits

...

36 Commits

Author SHA1 Message Date
arnaucube
6f1a44df02 TxSel avoid L1CoordTx for L2Tx that have a L1UserTx in the frozen queue
For the L2Txs of TransferToEthAddr & TransferToBJJ for a not-yet
existing accounts, in the TxSelector check if L2Tx receiver account will
be created by a L1UserFrozenTxs (in the next batch, the current frozen
queue). In that case, the L2Tx will be discarded at the current batch,
even if there is an AccountCreationAuth for the account, as there is a
L1UserTx in the frozen queue that will create the receiver Account.  The
L2Tx is discarded to avoid the Coordinator creating a new
L1CoordinatorTx to create the receiver account, which will be also
created in the next batch from the L1UserFrozenTx, ending with the user
having 2 different accounts for the same TokenID.
The double account creation is supported by the Hermez zkRollup
specification, but it was decided to mitigate it at the TxSelector level
for the explained cases.
2021-03-31 13:02:51 +02:00
Danilo Pantani
3fedcb7023 Merge pull request #636 from hermeznetwork/feature/metric-package
instrumenting the application
2021-03-29 20:03:13 -03:00
Eduard S
3f643f022a Merge pull request #677 from hermeznetwork/feature/fastsync-get-headerByNumber
Faster synchronization by fetching only block headers
2021-03-29 11:10:24 +02:00
Danilo Pantani
b8d339d568 Merge pull request #670 from hermeznetwork/fix/remove-release-os
fix the invalid goarch build
2021-03-26 16:16:29 -03:00
Pantani
9245247ee4 create a gin middleware to collect request metrics and export them to the Prometheus route 2021-03-25 09:31:29 -03:00
Eduard S
6c1c157bc3 Merge pull request #672 from hermeznetwork/feature/configurable-recommendedfee-strategy
Add configuration option to choose recommended fee strategy, and add …
2021-03-25 12:22:57 +01:00
arnaubennassar
f9ddf88c93 Add configuration option to choose recommended fee strategy, and add static strategy 2021-03-25 12:13:04 +01:00
Pantani
a1eea43443 fix the invalid goarch build and avoid calling the migration-pack each build 2021-03-24 10:41:32 -03:00
Oleksandr Brezhniev
2125812e90 Faster synchronization with usage of HeaderByNumber instead of BlockByNumber 2021-03-23 23:09:31 +02:00
Danilo Pantani
f07fd82822 Merge pull request #653 from hermeznetwork/feature/goreleaser-integration
Generating automatically releases with Goreleaser
2021-03-23 13:52:41 -03:00
arnau
d465d51e78 Merge pull request #667 from hermeznetwork/feature/documentpackages
Document synchronizer, node and coordinator
2021-03-23 13:33:40 +01:00
Eduard S
e23d0a07d2 Document synchronizer, node and coordinator 2021-03-23 13:19:23 +01:00
Pantani
6d84d143a2 Measure the server proof duration 2021-03-23 01:40:45 -03:00
Pantani
3b3d96e07c create the metrics package for a better app instrumenting 2021-03-23 01:40:32 -03:00
Danilo Pantani
e9be904c2f Merge pull request #656 from hermeznetwork/feature/license
add AGPLv3 License
2021-03-22 20:45:24 -03:00
Pantani
88b17cbe99 add release distribution with Goreleaser (https://goreleaser.com) 2021-03-22 20:44:27 -03:00
Danilo Pantani
1ffe437538 Merge pull request #657 from hermeznetwork/feature/documentserveapi
Add minimum documentation of serveapi command
2021-03-22 16:02:42 -03:00
arnau
e2f9a1d7eb Merge pull request #650 from hermeznetwork/fix/serveapi-flags
fix the application initialization flags
2021-03-22 18:14:03 +01:00
arnau
999dde5621 Merge pull request #662 from hermeznetwork/feature/improveSCinits
Bound the filter of init SC events by blocks
2021-03-22 18:12:53 +01:00
Eduard S
aa82efc868 Bound the filter of init SC events by blocks
When querying the init event for each smart contract, bound the search from
block genesis-24h to block genesis.  Otherwise, this query in geth synced to
mainnet takes too long.
2021-03-22 17:55:30 +01:00
a_bennassar
2184084408 Merge pull request #659 from hermeznetwork/feature/move-api-subpackages
Move apitypes & stateapiupdater into api dir
2021-03-22 17:02:38 +01:00
arnaucube
206c8e6e8f Move apitypes & stateapiupdater into api dir 2021-03-22 16:51:10 +01:00
Eduard S
c2c74e14f1 Merge pull request #655 from hermeznetwork/feature/api-versioning
Add API versioning
2021-03-22 16:38:12 +01:00
a_bennassar
80e20f3cf1 Merge pull request #658 from hermeznetwork/feature/update-sql-func
Add account_state view
2021-03-22 16:37:46 +01:00
arnaucube
d9741da43b Add API versioning 2021-03-22 16:33:38 +01:00
Eduard S
8c51cfb3d2 Add minimum documentation of serveapi command 2021-03-22 15:41:35 +01:00
laisolizq
7b297c77da Add account_state view 2021-03-22 14:43:24 +01:00
Pantani
9a863eadc4 add AGPLv3 License 2021-03-22 09:26:58 -03:00
Eduard S
d80e3a8988 Merge pull request #648 from hermeznetwork/feature/txsel-forgablepriority
Update TxSel selection to prioritize the forgable
2021-03-22 13:14:53 +01:00
arnaucube
2873ce875a Update TxSel selection to prioritize the forgable
Previous to this commit, there were cases where being
len(nonForgableL2Txs)>maxL2Txs and nonForgableL2Txs have bigger fee than
forgableL2Txs, the forgableTxs where never forged, neither the
nonForgableTxs.  Now, the TxSelector first forges the forgableTxs (which
are forgable for the initial state of the accounts (balances & nonces),
and then the nonForgableL2Txs, which may be unblocked once the forgable
ones have been processed.
2021-03-22 13:03:47 +01:00
Eduard S
10cfc91250 Merge pull request #651 from hermeznetwork/feature/total-txs-in-pool
API change names and add poolLoad, add maxFeeUSD
2021-03-22 11:59:09 +01:00
Eduard S
f6765f82bf Merge pull request #654 from hermeznetwork/feature/api-pending-l1s
Add pending L1 txs to API
2021-03-22 11:58:48 +01:00
arnaubennassar
90126a03a2 API change names and add poolLoad, add maxFeeUSD 2021-03-22 11:52:23 +01:00
arnaubennassar
3fcec947b4 Add pending L1 txs to API 2021-03-22 11:50:37 +01:00
Pantani
dc198c35ae fix import key command into the example script 2021-03-19 22:44:51 -03:00
Pantani
ae0858df3d fix the serve api flags 2021-03-18 12:02:18 -03:00
60 changed files with 2036 additions and 471 deletions

29
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: goreleaser
on:
push:
tags:
- '*'
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

1
.gitignore vendored
View File

@@ -1 +1,2 @@
bin/ bin/
dist/

35
.goreleaser.yml Normal file
View File

@@ -0,0 +1,35 @@
before:
hooks:
- go mod download
- make migration-pack
builds:
- main: ./cli/node/main.go
binary: node
id: node
goos:
- linux
- darwin
goarch:
- amd64
archives:
- replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

661
LICENSE Normal file
View File

@@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

View File

@@ -3,8 +3,8 @@
# Project variables. # Project variables.
PACKAGE := github.com/hermeznetwork/hermez-node PACKAGE := github.com/hermeznetwork/hermez-node
VERSION := $(shell git describe --tags --always) VERSION := $(shell git describe --tags --always)
BUILD := $(shell git rev-parse --short HEAD) COMMIT := $(shell git rev-parse --short HEAD)
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z) DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
PROJECT_NAME := $(shell basename "$(PWD)") PROJECT_NAME := $(shell basename "$(PWD)")
# Go related variables. # Go related variables.
@@ -23,7 +23,7 @@ CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
POSTGRES_PASS ?= yourpasswordhere POSTGRES_PASS ?= yourpasswordhere
# Use linker flags to provide version/build settings. # Use linker flags to provide version/build settings.
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)" LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE)"
# PID file will keep the process id of the server. # PID file will keep the process id of the server.
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
@@ -94,11 +94,11 @@ install:
@echo " > Checking if there is any missing dependencies..." @echo " > Checking if there is any missing dependencies..."
$(GOENVVARS) go get $(GOCMD)/... $(get) $(GOENVVARS) go get $(GOCMD)/... $(get)
## run: Run Hermez node. ## run-node: Run Hermez node.
run: run-node:
@bash -c "$(MAKE) clean build" @bash -c "$(MAKE) clean build"
@echo " > Running $(PROJECT_NAME)" @echo " > Running $(PROJECT_NAME)"
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run @$(GOBIN)/$(GOBINARY) run --mode $(MODE) --cfg $(CONFIG)
## run-proof-mock: Run proof server mock API. ## run-proof-mock: Run proof server mock API.
run-proof-mock: stop-proof-mock run-proof-mock: stop-proof-mock

View File

@@ -25,13 +25,13 @@ there are more information about the config file into [cli/node/README.md](cli/n
After setting the config, you can build and run the Hermez Node as a synchronizer: After setting the config, you can build and run the Hermez Node as a synchronizer:
```shell ```shell
$ make run $ make run-node
``` ```
Or build and run as a coordinator, and also passing the config file from other location: Or build and run as a coordinator, and also passing the config file from other location:
```shell ```shell
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run $ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run-node
``` ```
To check the useful make commands: To check the useful make commands:

View File

@@ -5,7 +5,7 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure" "github.com/mitchellh/copystructure"

View File

@@ -7,7 +7,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )

View File

@@ -7,6 +7,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -50,38 +51,46 @@ func NewAPI(
hermezAddress: consts.HermezAddress, hermezAddress: consts.HermezAddress,
} }
middleware, err := metric.PrometheusMiddleware()
if err != nil {
return nil, err
}
server.Use(middleware)
v1 := server.Group("/v1")
// Add coordinator endpoints // Add coordinator endpoints
if coordinatorEndpoints { if coordinatorEndpoints {
// Account // Account
server.POST("/account-creation-authorization", a.postAccountCreationAuth) v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
server.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth) v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
// Transaction // Transaction
server.POST("/transactions-pool", a.postPoolTx) v1.POST("/transactions-pool", a.postPoolTx)
server.GET("/transactions-pool/:id", a.getPoolTx) v1.GET("/transactions-pool/:id", a.getPoolTx)
} }
// Add explorer endpoints // Add explorer endpoints
if explorerEndpoints { if explorerEndpoints {
// Account // Account
server.GET("/accounts", a.getAccounts) v1.GET("/accounts", a.getAccounts)
server.GET("/accounts/:accountIndex", a.getAccount) v1.GET("/accounts/:accountIndex", a.getAccount)
server.GET("/exits", a.getExits) v1.GET("/exits", a.getExits)
server.GET("/exits/:batchNum/:accountIndex", a.getExit) v1.GET("/exits/:batchNum/:accountIndex", a.getExit)
// Transaction // Transaction
server.GET("/transactions-history", a.getHistoryTxs) v1.GET("/transactions-history", a.getHistoryTxs)
server.GET("/transactions-history/:id", a.getHistoryTx) v1.GET("/transactions-history/:id", a.getHistoryTx)
// Status // Status
server.GET("/batches", a.getBatches) v1.GET("/batches", a.getBatches)
server.GET("/batches/:batchNum", a.getBatch) v1.GET("/batches/:batchNum", a.getBatch)
server.GET("/full-batches/:batchNum", a.getFullBatch) v1.GET("/full-batches/:batchNum", a.getFullBatch)
server.GET("/slots", a.getSlots) v1.GET("/slots", a.getSlots)
server.GET("/slots/:slotNum", a.getSlot) v1.GET("/slots/:slotNum", a.getSlot)
server.GET("/bids", a.getBids) v1.GET("/bids", a.getBids)
server.GET("/state", a.getState) v1.GET("/state", a.getState)
server.GET("/config", a.getConfig) v1.GET("/config", a.getConfig)
server.GET("/tokens", a.getTokens) v1.GET("/tokens", a.getTokens)
server.GET("/tokens/:id", a.getToken) v1.GET("/tokens/:id", a.getToken)
server.GET("/coordinators", a.getCoordinators) v1.GET("/coordinators", a.getCoordinators)
} }
return a, nil return a, nil

View File

@@ -19,12 +19,12 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
swagger "github.com/getkin/kin-openapi/openapi3filter" swagger "github.com/getkin/kin-openapi/openapi3filter"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/stateapiupdater"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"
"github.com/hermeznetwork/hermez-node/test/til" "github.com/hermeznetwork/hermez-node/test/til"
"github.com/hermeznetwork/hermez-node/test/txsets" "github.com/hermeznetwork/hermez-node/test/txsets"
@@ -40,8 +40,8 @@ type Pendinger interface {
New() Pendinger New() Pendinger
} }
const apiAddr = ":4010" const apiPort = "4010"
const apiURL = "http://localhost" + apiAddr + "/" const apiURL = "http://localhost:" + apiPort + "/v1/"
var SetBlockchain = ` var SetBlockchain = `
Type: Blockchain Type: Blockchain
@@ -209,7 +209,7 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
// L2DB // L2DB
l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DB := l2db.NewL2DB(database, database, 10, 1000, 0.0, 1000.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB test.WipeDB(l2DB.DB()) // this will clean HistoryDB and L2DB
// Config (smart contract constants) // Config (smart contract constants)
chainID := uint16(0) chainID := uint16(0)
@@ -240,6 +240,7 @@ func TestMain(m *testing.M) {
nodeConfig := &historydb.NodeConfig{ nodeConfig := &historydb.NodeConfig{
MaxPoolTxs: 10, MaxPoolTxs: 10,
MinFeeUSD: 0, MinFeeUSD: 0,
MaxFeeUSD: 10000000000,
} }
if err := hdb.SetNodeConfig(nodeConfig); err != nil { if err := hdb.SetNodeConfig(nodeConfig); err != nil {
panic(err) panic(err)
@@ -257,7 +258,7 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
// Start server // Start server
listener, err := net.Listen("tcp", apiAddr) //nolint:gosec listener, err := net.Listen("tcp", ":"+apiPort) //nolint:gosec
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -521,11 +522,16 @@ func TestMain(m *testing.M) {
WithdrawalDelay: uint64(3000), WithdrawalDelay: uint64(3000),
} }
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{ stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
Rollup: rollupVars, Rollup: rollupVars,
Auction: auctionVars, Auction: auctionVars,
WDelayer: wdelayerVars, WDelayer: wdelayerVars,
}, constants) }, constants, &stateapiupdater.RecommendedFeePolicy{
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
})
if err != nil {
panic(err)
}
// Generate test data, as expected to be received/sended from/to the API // Generate test data, as expected to be received/sended from/to the API
testCoords := genTestCoordinators(commonCoords) testCoords := genTestCoordinators(commonCoords)
@@ -615,13 +621,13 @@ func TestTimeout(t *testing.T) {
hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO) hdbTO := historydb.NewHistoryDB(databaseTO, databaseTO, apiConnConTO)
require.NoError(t, err) require.NoError(t, err)
// L2DB // L2DB
l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 24*time.Hour, apiConnConTO) l2DBTO := l2db.NewL2DB(databaseTO, databaseTO, 10, 1000, 1.0, 1000.0, 24*time.Hour, apiConnConTO)
// API // API
apiGinTO := gin.Default() apiGinTO := gin.Default()
finishWait := make(chan interface{}) finishWait := make(chan interface{})
startWait := make(chan interface{}) startWait := make(chan interface{})
apiGinTO.GET("/wait", func(c *gin.Context) { apiGinTO.GET("/v1/wait", func(c *gin.Context) {
cancel, err := apiConnConTO.Acquire() cancel, err := apiConnConTO.Acquire()
defer cancel() defer cancel()
require.NoError(t, err) require.NoError(t, err)
@@ -649,9 +655,9 @@ func TestTimeout(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
client := &http.Client{} client := &http.Client{}
httpReq, err := http.NewRequest("GET", "http://localhost:4444/tokens", nil) httpReq, err := http.NewRequest("GET", "http://localhost:4444/v1/tokens", nil)
require.NoError(t, err) require.NoError(t, err)
httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/wait", nil) httpReqWait, err := http.NewRequest("GET", "http://localhost:4444/v1/wait", nil)
require.NoError(t, err) require.NoError(t, err)
// Request that will get timed out // Request that will get timed out
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@@ -7,7 +7,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure" "github.com/mitchellh/copystructure"

View File

@@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/mitchellh/copystructure" "github.com/mitchellh/copystructure"

View File

@@ -8,6 +8,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/russross/meddler" "github.com/russross/meddler"
@@ -46,7 +47,9 @@ var (
func retSQLErr(err error, c *gin.Context) { func retSQLErr(err error, c *gin.Context) {
log.Warnw("HTTP API SQL request error", "err", err) log.Warnw("HTTP API SQL request error", "err", err)
errMsg := tracerr.Unwrap(err).Error() unwrapErr := tracerr.Unwrap(err)
metric.CollectError(unwrapErr)
errMsg := unwrapErr.Error()
retDupKey := func(errCode pq.ErrorCode) { retDupKey := func(errCode pq.ErrorCode) {
// https://www.postgresql.org/docs/current/errcodes-appendix.html // https://www.postgresql.org/docs/current/errcodes-appendix.html
if errCode == "23505" { if errCode == "23505" {
@@ -80,6 +83,7 @@ func retSQLErr(err error, c *gin.Context) {
func retBadReq(err error, c *gin.Context) { func retBadReq(err error, c *gin.Context) {
log.Warnw("HTTP API Bad request error", "err", err) log.Warnw("HTTP API Bad request error", "err", err)
metric.CollectError(err)
c.JSON(http.StatusBadRequest, errorMsg{ c.JSON(http.StatusBadRequest, errorMsg{
Message: err.Error(), Message: err.Error(),
}) })

View File

@@ -4,7 +4,7 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -145,8 +145,8 @@ func TestUpdateMetrics(t *testing.T) {
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0)) assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0)) assert.Greater(t, ni.StateAPI.Metrics.BatchFrequency, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0)) assert.Greater(t, ni.StateAPI.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalAccounts, int64(0)) assert.Greater(t, ni.StateAPI.Metrics.TokenAccounts, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.TotalBJJs, int64(0)) assert.Greater(t, ni.StateAPI.Metrics.Wallets, int64(0))
assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0)) assert.Greater(t, ni.StateAPI.Metrics.AvgTransactionFee, float64(0))
} }
@@ -210,8 +210,8 @@ func TestGetState(t *testing.T) {
assert.Greater(t, status.Metrics.TransactionsPerBatch, float64(0)) assert.Greater(t, status.Metrics.TransactionsPerBatch, float64(0))
assert.Greater(t, status.Metrics.BatchFrequency, float64(0)) assert.Greater(t, status.Metrics.BatchFrequency, float64(0))
assert.Greater(t, status.Metrics.TransactionsPerSecond, float64(0)) assert.Greater(t, status.Metrics.TransactionsPerSecond, float64(0))
assert.Greater(t, status.Metrics.TotalAccounts, int64(0)) assert.Greater(t, status.Metrics.TokenAccounts, int64(0))
assert.Greater(t, status.Metrics.TotalBJJs, int64(0)) assert.Greater(t, status.Metrics.Wallets, int64(0))
assert.Greater(t, status.Metrics.AvgTransactionFee, float64(0)) assert.Greater(t, status.Metrics.AvgTransactionFee, float64(0))
// Recommended fee // Recommended fee
// TODO: perform real asserts (not just greater than 0) // TODO: perform real asserts (not just greater than 0)

View File

@@ -2,10 +2,12 @@ package stateapiupdater
import ( import (
"database/sql" "database/sql"
"fmt"
"sync" "sync"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -17,23 +19,58 @@ type Updater struct {
vars common.SCVariablesPtr vars common.SCVariablesPtr
consts historydb.Constants consts historydb.Constants
rw sync.RWMutex rw sync.RWMutex
rfp *RecommendedFeePolicy
}
// RecommendedFeePolicy describes how the recommended fee is calculated
type RecommendedFeePolicy struct {
PolicyType RecommendedFeePolicyType `validate:"required"`
StaticValue float64
}
// RecommendedFeePolicyType describes the different available recommended fee strategies
type RecommendedFeePolicyType string
const (
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
)
func (rfp *RecommendedFeePolicy) valid() bool {
switch rfp.PolicyType {
case RecommendedFeePolicyTypeStatic:
if rfp.StaticValue == 0 {
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
}
return true
case RecommendedFeePolicyTypeAvgLastHour:
return true
default:
return false
}
} }
// NewUpdater creates a new Updater // NewUpdater creates a new Updater
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables, func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
consts *historydb.Constants) *Updater { consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) {
if ok := rfp.valid(); !ok {
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
}
u := Updater{ u := Updater{
hdb: hdb, hdb: hdb,
config: *config, config: *config,
consts: *consts, consts: *consts,
state: historydb.StateAPI{ state: historydb.StateAPI{
NodePublicConfig: historydb.NodePublicConfig{ NodePublicInfo: historydb.NodePublicInfo{
ForgeDelay: config.ForgeDelay, ForgeDelay: config.ForgeDelay,
}, },
}, },
rfp: rfp,
} }
u.SetSCVars(vars.AsPtr()) u.SetSCVars(vars.AsPtr())
return &u return &u, nil
} }
// Store the State in the HistoryDB // Store the State in the HistoryDB
@@ -65,13 +102,27 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
// UpdateRecommendedFee update Status.RecommendedFee information // UpdateRecommendedFee update Status.RecommendedFee information
func (u *Updater) UpdateRecommendedFee() error { func (u *Updater) UpdateRecommendedFee() error {
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD) switch u.rfp.PolicyType {
if err != nil { case RecommendedFeePolicyTypeStatic:
return tracerr.Wrap(err) u.rw.Lock()
u.state.RecommendedFee = common.RecommendedFee{
ExistingAccount: u.rfp.StaticValue,
CreatesAccount: u.rfp.StaticValue,
CreatesAccountInternal: u.rfp.StaticValue,
}
u.rw.Unlock()
case RecommendedFeePolicyTypeAvgLastHour:
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
if err != nil {
return tracerr.Wrap(err)
}
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
default:
return tracerr.New("Invalid recommende fee policy")
} }
u.rw.Lock()
u.state.RecommendedFee = *recommendedFee
u.rw.Unlock()
return nil return nil
} }
@@ -84,12 +135,13 @@ func (u *Updater) UpdateMetrics() error {
return nil return nil
} }
lastBatchNum := lastBatch.BatchNum lastBatchNum := lastBatch.BatchNum
metrics, err := u.hdb.GetMetricsInternalAPI(lastBatchNum) metrics, poolLoad, err := u.hdb.GetMetricsInternalAPI(lastBatchNum)
if err != nil { if err != nil {
return tracerr.Wrap(err) return tracerr.Wrap(err)
} }
u.rw.Lock() u.rw.Lock()
u.state.Metrics = *metrics u.state.Metrics = *metrics
u.state.NodePublicInfo.PoolLoad = poolLoad
u.rw.Unlock() u.rw.Unlock()
return nil return nil
} }
@@ -145,11 +197,17 @@ func (u *Updater) UpdateNetworkInfo(
} }
} }
} }
// Update pending L1s
pendingL1s, err := u.hdb.GetUnforgedL1UserTxsCount()
if err != nil {
return tracerr.Wrap(err)
}
u.state.Network.LastSyncBlock = lastSyncBlock.Num u.state.Network.LastSyncBlock = lastSyncBlock.Num
u.state.Network.LastEthBlock = lastEthBlock.Num u.state.Network.LastEthBlock = lastEthBlock.Num
u.state.Network.LastBatch = lastBatch u.state.Network.LastBatch = lastBatch
u.state.Network.CurrentSlot = currentSlot u.state.Network.CurrentSlot = currentSlot
u.state.Network.NextForgers = nextForgers u.state.Network.NextForgers = nextForgers
u.state.Network.PendingL1Txs = pendingL1s
u.rw.Unlock() u.rw.Unlock()
return nil return nil
} }

View File

@@ -60,9 +60,9 @@ externalDocs:
url: 'https://hermez.io' url: 'https://hermez.io'
servers: servers:
- description: Hosted mock up - description: Hosted mock up
url: https://apimock.hermez.network url: https://apimock.hermez.network/v1
- description: Localhost mock Up - description: Localhost mock Up
url: http://localhost:4010 url: http://localhost:4010/v1
tags: tags:
- name: Coordinator - name: Coordinator
description: Endpoints used by the nodes running in coordinator mode. They are used to interact with the network. description: Endpoints used by the nodes running in coordinator mode. They are used to interact with the network.
@@ -2569,9 +2569,9 @@ components:
description: List of next coordinators to forge. description: List of next coordinators to forge.
items: items:
$ref: '#/components/schemas/NextForger' $ref: '#/components/schemas/NextForger'
NodeConfig: Node:
type: object type: object
description: Configuration of the coordinator node. Note that this is specific for each coordinator. description: Configuration and metrics of the coordinator node. Note that this is specific for each coordinator.
properties: properties:
forgeDelay: forgeDelay:
type: number type: number
@@ -2581,9 +2581,14 @@ components:
forge at the maximum rate. Note that this is a configuration parameter of a node, forge at the maximum rate. Note that this is a configuration parameter of a node,
so each coordinator may have a different value. so each coordinator may have a different value.
example: 193.4 example: 193.4
poolLoad:
type: number
description: Number of pending transactions in the pool
example: 23201
additionalProperties: false additionalProperties: false
required: required:
- forgeDelay - forgeDelay
- poolLoad
State: State:
type: object type: object
description: Gobal variables of the network description: Gobal variables of the network
@@ -2600,8 +2605,8 @@ components:
$ref: '#/components/schemas/StateWithdrawDelayer' $ref: '#/components/schemas/StateWithdrawDelayer'
recommendedFee: recommendedFee:
$ref: '#/components/schemas/RecommendedFee' $ref: '#/components/schemas/RecommendedFee'
nodeConfig: node:
$ref: '#/components/schemas/NodeConfig' $ref: '#/components/schemas/Node'
additionalProperties: false additionalProperties: false
required: required:
- network - network
@@ -2610,7 +2615,7 @@ components:
- auction - auction
- withdrawalDelayer - withdrawalDelayer
- recommendedFee - recommendedFee
- nodeConfig - node
StateNetwork: StateNetwork:
type: object type: object
description: Gobal statistics of the network description: Gobal statistics of the network
@@ -2634,6 +2639,10 @@ components:
- example: 2334 - example: 2334
nextForgers: nextForgers:
$ref: '#/components/schemas/NextForgers' $ref: '#/components/schemas/NextForgers'
pendingL1Transactions:
type: number
description: Number of pending L1 transactions (added in the smart contract queue but not forged).
example: 22
additionalProperties: false additionalProperties: false
required: required:
- lastEthereumBlock - lastEthereumBlock
@@ -2809,11 +2818,11 @@ components:
type: number type: number
description: Average transactions per second in the last 24 hours. description: Average transactions per second in the last 24 hours.
example: 302.3 example: 302.3
totalAccounts: tokenAccounts:
type: integer type: integer
description: Number of created accounts. description: Number of created accounts.
example: 90473 example: 90473
totalBJJs: wallets:
type: integer type: integer
description: Number of different registered BJJs. description: Number of different registered BJJs.
example: 23067 example: 23067
@@ -2830,8 +2839,8 @@ components:
- transactionsPerBatch - transactionsPerBatch
- batchFrequency - batchFrequency
- transactionsPerSecond - transactionsPerSecond
- totalAccounts - tokenAccounts
- totalBJJs - wallets
- avgTransactionFee - avgTransactionFee
- estimatedTimeToForgeL1 - estimatedTimeToForgeL1
PendingItems: PendingItems:

View File

@@ -8,7 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/historydb"
"github.com/hermeznetwork/hermez-node/test" "github.com/hermeznetwork/hermez-node/test"

View File

@@ -8,7 +8,7 @@ import (
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"

View File

@@ -24,6 +24,7 @@ COMMANDS:
genbjj Generate a new BabyJubJub key genbjj Generate a new BabyJubJub key
wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state
run Run the hermez-node in the indicated mode run Run the hermez-node in the indicated mode
serveapi Serve the API only
discard Discard blocks up to a specified block number discard Discard blocks up to a specified block number
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
@@ -54,6 +55,10 @@ To read the documentation of each configuration parameter, please check the
with `Coordinator` are only used in coord mode, and don't need to be defined with `Coordinator` are only used in coord mode, and don't need to be defined
when running the coordinator in sync mode when running the coordinator in sync mode
When running the API in standalone mode, the required configuration is a subset
of the node configuration. Please, check the `type APIServer` at
[config/config.go](../../config/config.go) to learn about all the parametes.
### Notes ### Notes
- The private key corresponding to the parameter `Coordinator.ForgerAddress` needs to be imported in the ethereum keystore - The private key corresponding to the parameter `Coordinator.ForgerAddress` needs to be imported in the ethereum keystore
@@ -68,6 +73,9 @@ when running the coordinator in sync mode
monitor the size of the folder to avoid running out of space. monitor the size of the folder to avoid running out of space.
- The node requires a PostgreSQL database. The parameters of the server and - The node requires a PostgreSQL database. The parameters of the server and
database must be set in the `PostgreSQL` section. database must be set in the `PostgreSQL` section.
- The node requires a web3 RPC server to work. The node has only been tested
with geth and may not work correctly with other ethereum nodes
implementations.
## Building ## Building
@@ -107,6 +115,14 @@ Run the node in mode coordinator:
./node run --mode coord --cfg cfg.buidler.toml ./node run --mode coord --cfg cfg.buidler.toml
``` ```
Serve the API in standalone mode. This command allows serving the API just
with access to the PostgreSQL database that a node is using. Several instances
of `serveapi` can be running at the same time with a single PostgreSQL
database:
```shell
./node serveapi --mode coord --cfg cfg.buidler.toml
```
Import an ethereum private key into the keystore: Import an ethereum private key into the keystore:
```shell ```shell
./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde ./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde

View File

@@ -35,7 +35,7 @@ Symbol = "SUSHI"
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2" Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
[Debug] [Debug]
APIAddress = "localhost:12345" APIAddress = "0.0.0.0:12345"
MeddlerLogs = true MeddlerLogs = true
GinDebugMode = true GinDebugMode = true
@@ -99,6 +99,7 @@ BJJ = "0x1b176232f78ba0d388ecc5f4896eca2d3b3d4f272092469f559247297f5c0c13"
SafetyPeriod = 10 SafetyPeriod = 10
MaxTxs = 512 MaxTxs = 512
MinFeeUSD = 0.0 MinFeeUSD = 0.0
MaxFeeUSD = 50.0
TTL = "24h" TTL = "24h"
PurgeBatchDelay = 10 PurgeBatchDelay = 10
InvalidateBatchDelay = 20 InvalidateBatchDelay = 20
@@ -144,3 +145,11 @@ Coordinator = true
BatchPath = "/tmp/iden3-test/hermez/batchesdebug" BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
LightScrypt = true LightScrypt = true
# RollupVerifierIndex = 0 # RollupVerifierIndex = 0
[RecommendedFeePolicy]
# Strategy used to calculate the recommended fee that the API will expose.
# Available options:
# - Static: always return the same value (StaticValue) in USD
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
PolicyType = "Static"
StaticValue = 0.99

View File

@@ -1,10 +1,10 @@
#!/bin/sh #!/bin/sh
# Non-Boot Coordinator # Non-Boot Coordinator
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3 go run . importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3
# Boot Coordinator # Boot Coordinator
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563 go run . importkey --mode coord --cfg cfg.buidler.toml --privatekey 0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563
# FeeAccount # FeeAccount
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x3a9270c020e169097808da4b02e8d9100be0f8a38cfad3dcfc0b398076381fdd go run . importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x3a9270c020e169097808da4b02e8d9100be0f8a38cfad3dcfc0b398076381fdd

View File

@@ -35,18 +35,18 @@ const (
) )
var ( var (
// Version represents the program based on the git tag // version represents the program based on the git tag
Version = "v0.1.0" version = "v0.1.0"
// Build represents the program based on the git commit // commit represents the program based on the git commit
Build = "dev" commit = "dev"
// Date represents the date of application was built // date represents the date of application was built
Date = "" date = ""
) )
func cmdVersion(c *cli.Context) error { func cmdVersion(c *cli.Context) error {
fmt.Printf("Version = \"%v\"\n", Version) fmt.Printf("Version = \"%v\"\n", version)
fmt.Printf("Build = \"%v\"\n", Build) fmt.Printf("Build = \"%v\"\n", commit)
fmt.Printf("Date = \"%v\"\n", Date) fmt.Printf("Date = \"%v\"\n", date)
return nil return nil
} }
@@ -318,6 +318,7 @@ func cmdDiscard(c *cli.Context) error {
cfg.Coordinator.L2DB.SafetyPeriod, cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD, cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration, cfg.Coordinator.L2DB.TTL.Duration,
nil, nil,
) )
@@ -420,7 +421,7 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
func main() { func main() {
app := cli.NewApp() app := cli.NewApp()
app.Name = "hermez-node" app.Name = "hermez-node"
app.Version = Version app.Version = version
flags := []cli.Flag{ flags := []cli.Flag{
&cli.StringFlag{ &cli.StringFlag{
Name: flagMode, Name: flagMode,
@@ -484,6 +485,7 @@ func main() {
Aliases: []string{}, Aliases: []string{},
Usage: "Serve the API only", Usage: "Serve the API only",
Action: cmdServeAPI, Action: cmdServeAPI,
Flags: flags,
}, },
{ {
Name: "discard", Name: "discard",

View File

@@ -22,7 +22,7 @@ type L1Tx struct {
// - L1UserTx: 0 // - L1UserTx: 0
// - L1CoordinatorTx: 1 // - L1CoordinatorTx: 1
TxID TxID `meddler:"id"` TxID TxID `meddler:"id"`
// ToForgeL1TxsNum indicates in which the tx was forged / will be forged // ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"` ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
Position int `meddler:"position"` Position int `meddler:"position"`
// UserOrigin is set to true if the tx was originated by a user, false if it was // UserOrigin is set to true if the tx was originated by a user, false if it was

View File

@@ -8,6 +8,7 @@ import (
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/priceupdater" "github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
@@ -144,6 +145,10 @@ type Coordinator struct {
// order to be accepted into the pool. Txs with lower than // order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level. // minimum fee will be rejected at the API level.
MinFeeUSD float64 MinFeeUSD float64
// MaxFeeUSD is the maximum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with greater than
// maximum fee will be rejected at the API level.
MaxFeeUSD float64 `validate:"required"`
// TTL is the Time To Live for L2Txs in the pool. Once MaxTxs // TTL is the Time To Live for L2Txs in the pool. Once MaxTxs
// L2Txs is reached, L2Txs older than TTL will be deleted. // L2Txs is reached, L2Txs older than TTL will be deleted.
TTL Duration `validate:"required"` TTL Duration `validate:"required"`
@@ -295,7 +300,8 @@ type Node struct {
} `validate:"required"` } `validate:"required"`
PostgreSQL PostgreSQL `validate:"required"` PostgreSQL PostgreSQL `validate:"required"`
Web3 struct { Web3 struct {
// URL is the URL of the web3 ethereum-node RPC server // URL is the URL of the web3 ethereum-node RPC server. Only
// geth is officially supported.
URL string `validate:"required"` URL string `validate:"required"`
} `validate:"required"` } `validate:"required"`
Synchronizer struct { Synchronizer struct {
@@ -342,8 +348,9 @@ type Node struct {
// can wait to stablish a SQL connection // can wait to stablish a SQL connection
SQLConnectionTimeout Duration SQLConnectionTimeout Duration
} `validate:"required"` } `validate:"required"`
Debug NodeDebug `validate:"required"` RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"`
Coordinator Coordinator `validate:"-"` Debug NodeDebug `validate:"required"`
Coordinator Coordinator `validate:"-"`
} }
// APIServer is the api server configuration parameters // APIServer is the api server configuration parameters
@@ -376,6 +383,10 @@ type APIServer struct {
// order to be accepted into the pool. Txs with lower than // order to be accepted into the pool. Txs with lower than
// minimum fee will be rejected at the API level. // minimum fee will be rejected at the API level.
MinFeeUSD float64 MinFeeUSD float64
// MaxFeeUSD is the maximum fee in USD that a tx must pay in
// order to be accepted into the pool. Txs with greater than
// maximum fee will be rejected at the API level.
MaxFeeUSD float64 `validate:"required"`
} `validate:"required"` } `validate:"required"`
} }
Debug NodeDebug `validate:"required"` Debug NodeDebug `validate:"required"`

View File

@@ -80,6 +80,7 @@ type BatchInfo struct {
PipelineNum int PipelineNum int
BatchNum common.BatchNum BatchNum common.BatchNum
ServerProof prover.Client ServerProof prover.Client
ProofStart time.Time
ZKInputs *common.ZKInputs ZKInputs *common.ZKInputs
Proof *prover.Proof Proof *prover.Proof
PublicInputs []*big.Int PublicInputs []*big.Int

View File

@@ -1,3 +1,43 @@
/*
Package coordinator handles all the logic related to forging batches as a
coordinator in the hermez network.
The forging of batches is done with a pipeline in order to allow multiple
batches being forged in parallel. The maximum number of batches that can be
forged in parallel is determined by the number of available proof servers.
The Coordinator begins with the pipeline stopped. The main Coordinator
goroutine keeps listening for synchronizer events sent by the node package,
which allow the coordinator to determine if the configured forger address is
allowed to forge at the current block or not. When the forger address becomes
allowed to forge, the pipeline is started, and when it terminates being allowed
to forge, the pipeline is stopped.
The Pipeline consists of two goroutines. The first one is in charge of
preparing a batch internally, which involves making a selection of transactions
and calculating the ZKInputs for the batch proof, and sending these ZKInputs to
an idle proof server. This goroutine will keep preparing batches while there
are idle proof servers, if the forging policy determines that a batch should be
forged in the current state. The second goroutine is in charge of waiting for
the proof server to finish computing the proof, retreiving it, prepare the
arguments for the `forgeBatch` Rollup transaction, and sending the result to
the TxManager. All the batch information moves between functions and
goroutines via the BatchInfo struct.
Finally, the TxManager contains a single goroutine that makes forgeBatch
ethereum transactions for the batches sent by the Pipeline, and keeps them in a
list to check them periodically. In the periodic checks, the ethereum
transaction is checked for successfulness, and it's only forgotten after a
number of confirmation blocks have passed after being successfully mined. At
any point if a transaction failure is detected, the TxManager can signal the
Coordinator to reset the Pipeline in order to reforge the failed batches.
The Coordinator goroutine acts as a manager. The synchronizer events (which
notify about new blocks and associated new state) that it receives are
broadcasted to the Pipeline and the TxManager. This allows the Coordinator,
Pipeline and TxManager to have a copy of the current hermez network state
required to perform their duties.
*/
package coordinator package coordinator
import ( import (

View File

@@ -105,7 +105,7 @@ func newTestModules(t *testing.T) modules {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil) historyDB := historydb.NewHistoryDB(db, db, nil)
txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB") txSelDBPath, err = ioutil.TempDir("", "tmpTxSelDB")

View File

@@ -5,6 +5,7 @@ import (
"database/sql" "database/sql"
"fmt" "fmt"
"math/big" "math/big"
"strconv"
"sync" "sync"
"time" "time"
@@ -14,6 +15,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/prover" "github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/txselector" "github.com/hermeznetwork/hermez-node/txselector"
@@ -246,6 +248,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
// 3. Send the ZKInputs to the proof server // 3. Send the ZKInputs to the proof server
batchInfo.ServerProof = serverProof batchInfo.ServerProof = serverProof
batchInfo.ProofStart = time.Now()
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil { if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
return nil, ctx.Err() return nil, ctx.Err()
} else if err != nil { } else if err != nil {
@@ -520,15 +523,30 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
// l1UserFutureTxs are the l1UserTxs that are not being forged
// in the next batch, but that are also in the queue for the
// future batches
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1)
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs) p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
} else { } else {
// get l1UserFutureTxs which are all the l1 pending in all the
// queues
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd
if err != nil {
return nil, nil, tracerr.Wrap(err)
}
// 2b: only L2 txs // 2b: only L2 txs
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err = coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig) p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
@@ -602,6 +620,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
// waitServerProof gets the generated zkProof & sends it to the SmartContract // waitServerProof gets the generated zkProof & sends it to the SmartContract
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error { func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
defer metric.MeasureDuration(metric.WaitServerProof, batchInfo.ProofStart,
batchInfo.BatchNum.BigInt().String(), strconv.Itoa(batchInfo.PipelineNum))
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call, proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
// until not resolved don't continue. Returns when the proof server has calculated the proof // until not resolved don't continue. Returns when the proof server has calculated the proof
if err != nil { if err != nil {

View File

@@ -21,7 +21,7 @@ func newL2DB(t *testing.T) *l2db.L2DB {
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
test.WipeDB(db) test.WipeDB(db)
return l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) return l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
} }
func newStateDB(t *testing.T) *statedb.LocalStateDB { func newStateDB(t *testing.T) *statedb.LocalStateDB {

View File

@@ -8,7 +8,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/db" "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
@@ -1083,12 +1083,8 @@ func (hdb *HistoryDB) GetNextForgersInternalAPI(auctionVars *common.AuctionVaria
} }
// GetMetricsInternalAPI returns the MetricsAPI // GetMetricsInternalAPI returns the MetricsAPI
func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*MetricsAPI, error) { func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (metrics *MetricsAPI, poolLoad int64, err error) {
var metrics MetricsAPI metrics = &MetricsAPI{}
// Get the first and last batch of the last 24h and their timestamps
// if u.state.Network.LastBatch == nil {
// return &metrics, nil
// }
type period struct { type period struct {
FromBatchNum common.BatchNum `meddler:"from_batch_num"` FromBatchNum common.BatchNum `meddler:"from_batch_num"`
FromTimestamp time.Time `meddler:"from_timestamp"` FromTimestamp time.Time `meddler:"from_timestamp"`
@@ -1106,7 +1102,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num FROM batch INNER JOIN block ON batch.eth_block_num = block.eth_block_num
WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`, WHERE block.timestamp >= NOW() - INTERVAL '24 HOURS';`,
); err != nil { ); err != nil {
return nil, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// Get the amount of txs of that period // Get the amount of txs of that period
row := hdb.dbRead.QueryRow( row := hdb.dbRead.QueryRow(
@@ -1115,7 +1111,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
) )
var nTxs int var nTxs int
if err := row.Scan(&nTxs); err != nil { if err := row.Scan(&nTxs); err != nil {
return nil, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// Set txs/s // Set txs/s
seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds() seconds := p.ToTimestamp.Sub(p.FromTimestamp).Seconds()
@@ -1141,7 +1137,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
) )
var totalFee float64 var totalFee float64
if err := row.Scan(&totalFee); err != nil { if err := row.Scan(&totalFee); err != nil {
return nil, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
// Set batch frequency // Set batch frequency
metrics.BatchFrequency = seconds / float64(nBatches) metrics.BatchFrequency = seconds / float64(nBatches)
@@ -1152,7 +1148,7 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
) )
var nL2Txs int var nL2Txs int
if err := row.Scan(&nL2Txs); err != nil { if err := row.Scan(&nL2Txs); err != nil {
return nil, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
if nL2Txs > 0 { if nL2Txs > 0 {
metrics.AvgTransactionFee = totalFee / float64(nL2Txs) metrics.AvgTransactionFee = totalFee / float64(nL2Txs)
@@ -1161,18 +1157,18 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
} }
// Get and set amount of registered accounts // Get and set amount of registered accounts
type registeredAccounts struct { type registeredAccounts struct {
TotalIdx int64 `meddler:"total_idx"` TokenAccounts int64 `meddler:"token_accounts"`
TotalBJJ int64 `meddler:"total_bjj"` Wallets int64 `meddler:"wallets"`
} }
ra := &registeredAccounts{} ra := &registeredAccounts{}
if err := meddler.QueryRow( if err := meddler.QueryRow(
hdb.dbRead, ra, hdb.dbRead, ra,
`SELECT COUNT(*) AS total_bjj, COUNT(DISTINCT(bjj)) AS total_idx FROM account;`, `SELECT COUNT(*) AS token_accounts, COUNT(DISTINCT(bjj)) AS wallets FROM account;`,
); err != nil { ); err != nil {
return nil, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
metrics.TotalAccounts = ra.TotalIdx metrics.TokenAccounts = ra.TokenAccounts
metrics.TotalBJJs = ra.TotalBJJ metrics.Wallets = ra.Wallets
// Get and set estimated time to forge L1 tx // Get and set estimated time to forge L1 tx
row = hdb.dbRead.QueryRow( row = hdb.dbRead.QueryRow(
`SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx `SELECT COALESCE (AVG(EXTRACT(EPOCH FROM (forged.timestamp - added.timestamp))), 0) FROM tx
@@ -1184,10 +1180,18 @@ func (hdb *HistoryDB) GetMetricsInternalAPI(lastBatchNum common.BatchNum) (*Metr
) )
var timeToForgeL1 float64 var timeToForgeL1 float64
if err := row.Scan(&timeToForgeL1); err != nil { if err := row.Scan(&timeToForgeL1); err != nil {
return nil, tracerr.Wrap(err) return nil, 0, tracerr.Wrap(err)
} }
metrics.EstimatedTimeToForgeL1 = timeToForgeL1 metrics.EstimatedTimeToForgeL1 = timeToForgeL1
return &metrics, nil // Get amount of txs in the pool
row = hdb.dbRead.QueryRow(
`SELECT COUNT(*) FROM tx_pool WHERE state = $1 AND NOT external_delete;`,
common.PoolL2TxStatePending,
)
if err := row.Scan(&poolLoad); err != nil {
return nil, 0, tracerr.Wrap(err)
}
return metrics, poolLoad, nil
} }
// GetStateAPI returns the StateAPI // GetStateAPI returns the StateAPI

View File

@@ -751,6 +751,24 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err) return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
} }
// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch
// with toForgeL1TxsNum (in one of the future batches, not in the next one).
func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
var txs []*common.L1Tx
err := meddler.QueryAll(
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
tx.amount, NULL AS effective_amount,
tx.deposit_amount, NULL AS effective_deposit_amount,
tx.eth_block_num, tx.type, tx.batch_num
FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1
ORDER BY position;`,
toForgeL1TxsNum,
)
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
}
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in // GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
// open or frozen queues that are not yet forged) // open or frozen queues that are not yet forged)
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) { func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {
@@ -1181,7 +1199,7 @@ const (
) )
// GetRecommendedFee returns the RecommendedFee information // GetRecommendedFee returns the RecommendedFee information
func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedFee, error) { func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD, maxFeeUSD float64) (*common.RecommendedFee, error) {
var recommendedFee common.RecommendedFee var recommendedFee common.RecommendedFee
// Get total txs and the batch of the first selected tx of the last hour // Get total txs and the batch of the first selected tx of the last hour
type totalTxsSinceBatchNum struct { type totalTxsSinceBatchNum struct {
@@ -1217,11 +1235,11 @@ func (hdb *HistoryDB) GetRecommendedFee(minFeeUSD float64) (*common.RecommendedF
} else { } else {
avgTransactionFee = 0 avgTransactionFee = 0
} }
recommendedFee.ExistingAccount = recommendedFee.ExistingAccount = math.Min(maxFeeUSD,
math.Max(avgTransactionFee, minFeeUSD) math.Max(avgTransactionFee, minFeeUSD))
recommendedFee.CreatesAccount = recommendedFee.CreatesAccount = math.Min(maxFeeUSD,
math.Max(CreateAccountExtraFeePercentage*avgTransactionFee, minFeeUSD) math.Max(CreateAccountExtraFeePercentage*avgTransactionFee, minFeeUSD))
recommendedFee.CreatesAccountInternal = recommendedFee.CreatesAccountInternal = math.Min(maxFeeUSD,
math.Max(CreateAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD) math.Max(CreateAccountInternalExtraFeePercentage*avgTransactionFee, minFeeUSD))
return &recommendedFee, nil return &recommendedFee, nil
} }

View File

@@ -11,7 +11,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
dbUtils "github.com/hermeznetwork/hermez-node/db" dbUtils "github.com/hermeznetwork/hermez-node/db"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
@@ -699,34 +699,55 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
CreateAccountDeposit(1) B: 5 CreateAccountDeposit(1) B: 5
CreateAccountDeposit(1) C: 5 CreateAccountDeposit(1) C: 5
CreateAccountDeposit(1) D: 5 CreateAccountDeposit(1) D: 5
> block > block
> batchL1
> block
CreateAccountDeposit(1) E: 5
CreateAccountDeposit(1) F: 5
> block
` `
tc := til.NewContext(uint16(0), 128) tc := til.NewContext(uint16(0), 128)
blocks, err := tc.GenerateBlocks(set) blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err) require.NoError(t, err)
// Sanity check // Sanity check
require.Equal(t, 1, len(blocks)) require.Equal(t, 3, len(blocks))
require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs)) require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs))
toForgeL1TxsNum := int64(1)
for i := range blocks { for i := range blocks {
err = historyDB.AddBlockSCData(&blocks[i]) err = historyDB.AddBlockSCData(&blocks[i])
require.NoError(t, err) require.NoError(t, err)
} }
l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum) l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0)
require.NoError(t, err)
assert.Equal(t, 7, len(l1UserTxs))
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 5, len(l1UserTxs)) assert.Equal(t, 5, len(l1UserTxs))
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs) assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1)
require.NoError(t, err)
assert.Equal(t, 2, len(l1UserTxs))
count, err := historyDB.GetUnforgedL1UserTxsCount() count, err := historyDB.GetUnforgedL1UserTxsCount()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 5, count) assert.Equal(t, 7, count)
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
require.NoError(t, err)
assert.Equal(t, 2, len(l1UserTxs))
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2)
require.NoError(t, err)
assert.Equal(t, 0, len(l1UserTxs))
// No l1UserTxs for this toForgeL1TxsNum // No l1UserTxs for this toForgeL1TxsNum
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2) l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(l1UserTxs)) assert.Equal(t, 0, len(l1UserTxs))
} }
@@ -1177,7 +1198,7 @@ func TestGetMetricsAPI(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches)) res, _, err := historyDB.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch) assert.Equal(t, float64(numTx)/float64(numBatches), res.TransactionsPerBatch)
@@ -1186,8 +1207,8 @@ func TestGetMetricsAPI(t *testing.T) {
// There is a -2 as time for first and last batch is not taken into account // There is a -2 as time for first and last batch is not taken into account
assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01) assert.InEpsilon(t, float64(frequency)*float64(numBatches-2)/float64(numBatches), res.BatchFrequency, 0.01)
assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01) assert.InEpsilon(t, float64(numTx)/float64(frequency*blockNum-frequency), res.TransactionsPerSecond, 0.01)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TokenAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.Wallets)
// Til does not set fees // Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee) assert.Equal(t, float64(0), res.AvgTransactionFee)
} }
@@ -1255,22 +1276,22 @@ func TestGetMetricsAPIMoreThan24Hours(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
res, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches)) res, _, err := historyDBWithACC.GetMetricsInternalAPI(common.BatchNum(numBatches))
assert.NoError(t, err) assert.NoError(t, err)
assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1) assert.InEpsilon(t, 1.0, res.TransactionsPerBatch, 0.1)
assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1) assert.InEpsilon(t, res.BatchFrequency, float64(blockTime/time.Second), 0.1)
assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1) assert.InEpsilon(t, 1.0/float64(blockTime/time.Second), res.TransactionsPerSecond, 0.1)
assert.Equal(t, int64(3), res.TotalAccounts) assert.Equal(t, int64(3), res.TokenAccounts)
assert.Equal(t, int64(3), res.TotalBJJs) assert.Equal(t, int64(3), res.Wallets)
// Til does not set fees // Til does not set fees
assert.Equal(t, float64(0), res.AvgTransactionFee) assert.Equal(t, float64(0), res.AvgTransactionFee)
} }
func TestGetMetricsAPIEmpty(t *testing.T) { func TestGetMetricsAPIEmpty(t *testing.T) {
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
_, err := historyDBWithACC.GetMetricsInternalAPI(0) _, _, err := historyDBWithACC.GetMetricsInternalAPI(0)
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -1485,7 +1506,7 @@ func TestNodeInfo(t *testing.T) {
addr := ethCommon.HexToAddress("0x1234") addr := ethCommon.HexToAddress("0x1234")
hash := ethCommon.HexToHash("0x5678") hash := ethCommon.HexToHash("0x5678")
stateAPI := &StateAPI{ stateAPI := &StateAPI{
NodePublicConfig: NodePublicConfig{ NodePublicInfo: NodePublicInfo{
ForgeDelay: 3.1, ForgeDelay: 3.1,
}, },
Network: NetworkAPI{ Network: NetworkAPI{
@@ -1544,7 +1565,7 @@ func TestNodeInfo(t *testing.T) {
}, },
Metrics: MetricsAPI{ Metrics: MetricsAPI{
TransactionsPerBatch: 1.1, TransactionsPerBatch: 1.1,
TotalAccounts: 42, TokenAccounts: 42,
}, },
Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables), Rollup: *NewRollupVariablesAPI(clientSetup.RollupVariables),
Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables), Auction: *NewAuctionVariablesAPI(clientSetup.AuctionVariables),

View File

@@ -4,7 +4,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/russross/meddler" "github.com/russross/meddler"
@@ -32,18 +32,20 @@ type NetworkAPI struct {
LastBatch *BatchAPI `json:"lastBatch"` LastBatch *BatchAPI `json:"lastBatch"`
CurrentSlot int64 `json:"currentSlot"` CurrentSlot int64 `json:"currentSlot"`
NextForgers []NextForgerAPI `json:"nextForgers"` NextForgers []NextForgerAPI `json:"nextForgers"`
PendingL1Txs int `json:"pendingL1Transactions"`
} }
// NodePublicConfig is the configuration of the node that is exposed via API // NodePublicInfo is the configuration and metrics of the node that is exposed via API
type NodePublicConfig struct { type NodePublicInfo struct {
// ForgeDelay in seconds // ForgeDelay in seconds
ForgeDelay float64 `json:"forgeDelay"` ForgeDelay float64 `json:"forgeDelay"`
// PoolLoad amount of transactions in the pool
PoolLoad int64 `json:"poolLoad"`
} }
// StateAPI is an object representing the node and network state exposed via the API // StateAPI is an object representing the node and network state exposed via the API
type StateAPI struct { type StateAPI struct {
// NodePublicConfig is the configuration of the node that is exposed via API NodePublicInfo NodePublicInfo `json:"node"`
NodePublicConfig NodePublicConfig `json:"nodeConfig"`
Network NetworkAPI `json:"network"` Network NetworkAPI `json:"network"`
Metrics MetricsAPI `json:"metrics"` Metrics MetricsAPI `json:"metrics"`
Rollup RollupVariablesAPI `json:"rollup"` Rollup RollupVariablesAPI `json:"rollup"`
@@ -63,6 +65,7 @@ type Constants struct {
type NodeConfig struct { type NodeConfig struct {
MaxPoolTxs uint32 MaxPoolTxs uint32
MinFeeUSD float64 MinFeeUSD float64
MaxFeeUSD float64
ForgeDelay float64 ForgeDelay float64
} }

View File

@@ -6,7 +6,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
"github.com/iden3/go-merkletree" "github.com/iden3/go-merkletree"
@@ -314,8 +314,8 @@ type MetricsAPI struct {
TransactionsPerBatch float64 `json:"transactionsPerBatch"` TransactionsPerBatch float64 `json:"transactionsPerBatch"`
BatchFrequency float64 `json:"batchFrequency"` BatchFrequency float64 `json:"batchFrequency"`
TransactionsPerSecond float64 `json:"transactionsPerSecond"` TransactionsPerSecond float64 `json:"transactionsPerSecond"`
TotalAccounts int64 `json:"totalAccounts" meddler:"total_accounts"` TokenAccounts int64 `json:"tokenAccounts"`
TotalBJJs int64 `json:"totalBJJs" meddler:"total_bjjs"` Wallets int64 `json:"wallets"`
AvgTransactionFee float64 `json:"avgTransactionFee"` AvgTransactionFee float64 `json:"avgTransactionFee"`
EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"` EstimatedTimeToForgeL1 float64 `json:"estimatedTimeToForgeL1" meddler:"estimated_time_to_forge_l1"`
} }

View File

@@ -62,6 +62,10 @@ func (l2db *L2DB) AddTxAPI(tx *PoolL2TxWrite) error {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)", return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) < minFeeUSD (%v)",
feeUSD, l2db.minFeeUSD)) feeUSD, l2db.minFeeUSD))
} }
if feeUSD > l2db.maxFeeUSD {
return tracerr.Wrap(fmt.Errorf("tx.feeUSD (%v) > maxFeeUSD (%v)",
feeUSD, l2db.maxFeeUSD))
}
// Prepare insert SQL query argument parameters // Prepare insert SQL query argument parameters
namesPart, err := meddler.Default.ColumnsQuoted(tx, false) namesPart, err := meddler.Default.ColumnsQuoted(tx, false)

View File

@@ -27,6 +27,7 @@ type L2DB struct {
ttl time.Duration ttl time.Duration
maxTxs uint32 // limit of txs that are accepted in the pool maxTxs uint32 // limit of txs that are accepted in the pool
minFeeUSD float64 minFeeUSD float64
maxFeeUSD float64
apiConnCon *db.APIConnectionController apiConnCon *db.APIConnectionController
} }
@@ -38,6 +39,7 @@ func NewL2DB(
safetyPeriod common.BatchNum, safetyPeriod common.BatchNum,
maxTxs uint32, maxTxs uint32,
minFeeUSD float64, minFeeUSD float64,
maxFeeUSD float64,
TTL time.Duration, TTL time.Duration,
apiConnCon *db.APIConnectionController, apiConnCon *db.APIConnectionController,
) *L2DB { ) *L2DB {
@@ -48,6 +50,7 @@ func NewL2DB(
ttl: TTL, ttl: TTL,
maxTxs: maxTxs, maxTxs: maxTxs,
minFeeUSD: minFeeUSD, minFeeUSD: minFeeUSD,
maxFeeUSD: maxFeeUSD,
apiConnCon: apiConnCon, apiConnCon: apiConnCon,
} }
} }

View File

@@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
l2DB = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, nil) l2DB = NewL2DB(db, db, 10, 1000, 0.0, 1000.0, 24*time.Hour, nil)
apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second) apiConnCon := dbUtils.NewAPIConnectionController(1, time.Second)
l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 24*time.Hour, apiConnCon) l2DBWithACC = NewL2DB(db, db, 10, 1000, 0.0, 1000.0, 24*time.Hour, apiConnCon)
test.WipeDB(l2DB.DB()) test.WipeDB(l2DB.DB())
historyDB = historydb.NewHistoryDB(db, db, nil) historyDB = historydb.NewHistoryDB(db, db, nil)
// Run tests // Run tests

View File

@@ -6,7 +6,7 @@ import (
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common" ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/hermeznetwork/hermez-node/apitypes" "github.com/hermeznetwork/hermez-node/api/apitypes"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
) )

View File

@@ -677,12 +677,22 @@ CREATE TABLE node_info (
); );
INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update INSERT INTO node_info(item_id) VALUES (1); -- Always have a single row that we will update
CREATE VIEW account_state AS SELECT DISTINCT idx,
first_value(nonce) OVER w AS nonce,
first_value(balance) OVER w AS balance,
first_value(eth_block_num) OVER w AS eth_block_num,
first_value(batch_num) OVER w AS batch_num
FROM account_update
window w AS (partition by idx ORDER BY item_id desc);
-- +migrate Down -- +migrate Down
-- triggers -- triggers
DROP TRIGGER IF EXISTS trigger_token_usd_update ON token; DROP TRIGGER IF EXISTS trigger_token_usd_update ON token;
DROP TRIGGER IF EXISTS trigger_set_tx ON tx; DROP TRIGGER IF EXISTS trigger_set_tx ON tx;
DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch; DROP TRIGGER IF EXISTS trigger_forge_l1_txs ON batch;
DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool; DROP TRIGGER IF EXISTS trigger_set_pool_tx ON tx_pool;
-- drop views IF EXISTS
DROP VIEW IF EXISTS account_state;
-- functions -- functions
DROP FUNCTION IF EXISTS hez_idx; DROP FUNCTION IF EXISTS hez_idx;
DROP FUNCTION IF EXISTS set_token_usd_update; DROP FUNCTION IF EXISTS set_token_usd_update;

View File

@@ -260,7 +260,7 @@ type AuctionInterface interface {
AuctionConstants() (*common.AuctionConstants, error) AuctionConstants() (*common.AuctionConstants, error)
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error) AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error)
AuctionEventInit() (*AuctionEventInitialize, int64, error) AuctionEventInit(genesisBlockNum int64) (*AuctionEventInitialize, int64, error)
} }
// //
@@ -809,12 +809,14 @@ var (
) )
// AuctionEventInit returns the initialize event with its corresponding block number // AuctionEventInit returns the initialize event with its corresponding block number
func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, error) { func (c *AuctionClient) AuctionEventInit(genesisBlockNum int64) (*AuctionEventInitialize, int64, error) {
query := ethereum.FilterQuery{ query := ethereum.FilterQuery{
Addresses: []ethCommon.Address{ Addresses: []ethCommon.Address{
c.address, c.address,
}, },
Topics: [][]ethCommon.Hash{{logAuctionInitialize}}, FromBlock: big.NewInt(max(0, genesisBlockNum-blocksPerDay)),
ToBlock: big.NewInt(genesisBlockNum),
Topics: [][]ethCommon.Hash{{logAuctionInitialize}},
} }
logs, err := c.client.client.FilterLogs(context.Background(), query) logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil { if err != nil {

View File

@@ -28,7 +28,7 @@ func TestAuctionGetCurrentSlotNumber(t *testing.T) {
} }
func TestAuctionEventInit(t *testing.T) { func TestAuctionEventInit(t *testing.T) {
auctionInit, blockNum, err := auctionClientTest.AuctionEventInit() auctionInit, blockNum, err := auctionClientTest.AuctionEventInit(genesisBlock)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(18), blockNum) assert.Equal(t, int64(18), blockNum)
assert.Equal(t, donationAddressConst, auctionInit.DonationAddress) assert.Equal(t, donationAddressConst, auctionInit.DonationAddress)

View File

@@ -12,6 +12,17 @@ import (
var errTODO = fmt.Errorf("TODO: Not implemented yet") var errTODO = fmt.Errorf("TODO: Not implemented yet")
const (
blocksPerDay = (3600 * 24) / 15
)
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
// ClientInterface is the eth Client interface used by hermez-node modules to // ClientInterface is the eth Client interface used by hermez-node modules to
// interact with Ethereum Blockchain and smart contracts. // interact with Ethereum Blockchain and smart contracts.
type ClientInterface interface { type ClientInterface interface {

View File

@@ -245,15 +245,15 @@ func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*c
if number == -1 { if number == -1 {
blockNum = nil blockNum = nil
} }
block, err := c.client.BlockByNumber(ctx, blockNum) header, err := c.client.HeaderByNumber(ctx, blockNum)
if err != nil { if err != nil {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
b := &common.Block{ b := &common.Block{
Num: block.Number().Int64(), Num: header.Number.Int64(),
Timestamp: time.Unix(int64(block.Time()), 0), Timestamp: time.Unix(int64(header.Time), 0),
ParentHash: block.ParentHash(), ParentHash: header.ParentHash,
Hash: block.Hash(), Hash: header.Hash(),
} }
return b, nil return b, nil
} }

View File

@@ -273,7 +273,7 @@ type RollupInterface interface {
RollupConstants() (*common.RollupConstants, error) RollupConstants() (*common.RollupConstants, error)
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error) RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error)
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error) RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
RollupEventInit() (*RollupEventInitialize, int64, error) RollupEventInit(genesisBlockNum int64) (*RollupEventInitialize, int64, error)
} }
// //
@@ -749,12 +749,14 @@ var (
) )
// RollupEventInit returns the initialize event with its corresponding block number // RollupEventInit returns the initialize event with its corresponding block number
func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error) { func (c *RollupClient) RollupEventInit(genesisBlockNum int64) (*RollupEventInitialize, int64, error) {
query := ethereum.FilterQuery{ query := ethereum.FilterQuery{
Addresses: []ethCommon.Address{ Addresses: []ethCommon.Address{
c.address, c.address,
}, },
Topics: [][]ethCommon.Hash{{logHermezInitialize}}, FromBlock: big.NewInt(max(0, genesisBlockNum-blocksPerDay)),
ToBlock: big.NewInt(genesisBlockNum),
Topics: [][]ethCommon.Hash{{logHermezInitialize}},
} }
logs, err := c.client.client.FilterLogs(context.Background(), query) logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil { if err != nil {

View File

@@ -56,7 +56,7 @@ func genKeysBjj(i int64) *keys {
} }
func TestRollupEventInit(t *testing.T) { func TestRollupEventInit(t *testing.T) {
rollupInit, blockNum, err := rollupClient.RollupEventInit() rollupInit, blockNum, err := rollupClient.RollupEventInit(genesisBlock)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(19), blockNum) assert.Equal(t, int64(19), blockNum)
assert.Equal(t, uint8(10), rollupInit.ForgeL1L2BatchTimeout) assert.Equal(t, uint8(10), rollupInit.ForgeL1L2BatchTimeout)

View File

@@ -137,7 +137,7 @@ type WDelayerInterface interface {
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error) WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
WDelayerConstants() (*common.WDelayerConstants, error) WDelayerConstants() (*common.WDelayerConstants, error)
WDelayerEventInit() (*WDelayerEventInitialize, int64, error) WDelayerEventInit(genesisBlockNum int64) (*WDelayerEventInitialize, int64, error)
} }
// //
@@ -415,12 +415,14 @@ var (
) )
// WDelayerEventInit returns the initialize event with its corresponding block number // WDelayerEventInit returns the initialize event with its corresponding block number
func (c *WDelayerClient) WDelayerEventInit() (*WDelayerEventInitialize, int64, error) { func (c *WDelayerClient) WDelayerEventInit(genesisBlockNum int64) (*WDelayerEventInitialize, int64, error) {
query := ethereum.FilterQuery{ query := ethereum.FilterQuery{
Addresses: []ethCommon.Address{ Addresses: []ethCommon.Address{
c.address, c.address,
}, },
Topics: [][]ethCommon.Hash{{logWDelayerInitialize}}, FromBlock: big.NewInt(max(0, genesisBlockNum-blocksPerDay)),
ToBlock: big.NewInt(genesisBlockNum),
Topics: [][]ethCommon.Hash{{logWDelayerInitialize}},
} }
logs, err := c.client.client.FilterLogs(context.Background(), query) logs, err := c.client.client.FilterLogs(context.Background(), query)
if err != nil { if err != nil {

View File

@@ -18,7 +18,7 @@ var maxEmergencyModeTime = time.Hour * 24 * 7 * 26
var maxWithdrawalDelay = time.Hour * 24 * 7 * 2 var maxWithdrawalDelay = time.Hour * 24 * 7 * 2
func TestWDelayerInit(t *testing.T) { func TestWDelayerInit(t *testing.T) {
wDelayerInit, blockNum, err := wdelayerClientTest.WDelayerEventInit() wDelayerInit, blockNum, err := wdelayerClientTest.WDelayerEventInit(genesisBlock)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(16), blockNum) assert.Equal(t, int64(16), blockNum)
assert.Equal(t, uint64(initWithdrawalDelay), wDelayerInit.InitialWithdrawalDelay) assert.Equal(t, uint64(initWithdrawalDelay), wDelayerInit.InitialWithdrawalDelay)

192
metric/metric.go Normal file
View File

@@ -0,0 +1,192 @@
package metric
import (
"time"
"github.com/hermeznetwork/hermez-node/log"
"github.com/prometheus/client_golang/prometheus"
)
type (
// Metric represents the metric type
Metric string
)
const (
namespaceError = "error"
namespaceSync = "synchronizer"
namespaceTxSelector = "txselector"
namespaceAPI = "api"
)
var (
// Errors errors count metric.
Errors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespaceError,
Name: "errors",
Help: "",
}, []string{"error"})
// WaitServerProof duration time to get the calculated
// proof from the server.
WaitServerProof = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespaceSync,
Name: "wait_server_proof",
Help: "",
}, []string{"batch_number", "pipeline_number"})
// Reorgs block reorg count
Reorgs = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceSync,
Name: "reorgs",
Help: "",
})
// LastBlockNum last block synced
LastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "synced_last_block_num",
Help: "",
})
// EthLastBlockNum last eth block synced
EthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "eth_last_block_num",
Help: "",
})
// LastBatchNum last batch synced
LastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "synced_last_batch_num",
Help: "",
})
// EthLastBatchNum last eth batch synced
EthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceSync,
Name: "eth_last_batch_num",
Help: "",
})
// GetL2TxSelection L2 tx selection count
GetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceTxSelector,
Name: "get_l2_txselection_total",
Help: "",
})
// GetL1L2TxSelection L1L2 tx selection count
GetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespaceTxSelector,
Name: "get_l1_l2_txselection_total",
Help: "",
})
// SelectedL1CoordinatorTxs selected L1 coordinator tx count
SelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l1_coordinator_txs",
Help: "",
})
// SelectedL1UserTxs selected L1 user tx count
SelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l1_user_txs",
Help: "",
})
// SelectedL2Txs selected L2 tx count
SelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "selected_l2_txs",
Help: "",
})
// DiscardedL2Txs discarded L2 tx count
DiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespaceTxSelector,
Name: "discarded_l2_txs",
Help: "",
})
)
func init() {
if err := registerCollectors(); err != nil {
log.Error(err)
}
}
func registerCollectors() error {
if err := registerCollector(Errors); err != nil {
return err
}
if err := registerCollector(WaitServerProof); err != nil {
return err
}
if err := registerCollector(Reorgs); err != nil {
return err
}
if err := registerCollector(LastBlockNum); err != nil {
return err
}
if err := registerCollector(LastBatchNum); err != nil {
return err
}
if err := registerCollector(EthLastBlockNum); err != nil {
return err
}
if err := registerCollector(EthLastBatchNum); err != nil {
return err
}
if err := registerCollector(GetL2TxSelection); err != nil {
return err
}
if err := registerCollector(GetL1L2TxSelection); err != nil {
return err
}
if err := registerCollector(SelectedL1CoordinatorTxs); err != nil {
return err
}
if err := registerCollector(SelectedL1UserTxs); err != nil {
return err
}
return registerCollector(DiscardedL2Txs)
}
func registerCollector(collector prometheus.Collector) error {
err := prometheus.Register(collector)
if err != nil {
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
return err
}
}
return nil
}
// MeasureDuration measure the method execution duration
// and save it into a histogram metric
func MeasureDuration(histogram *prometheus.HistogramVec, start time.Time, lvs ...string) {
duration := time.Since(start)
histogram.WithLabelValues(lvs...).Observe(float64(duration.Milliseconds()))
}
// CollectError collect the error message and increment
// the error count
func CollectError(err error) {
Errors.With(map[string]string{"error": err.Error()}).Inc()
}

78
metric/request.go Normal file
View File

@@ -0,0 +1,78 @@
package metric
import (
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
)
const (
favicon = "/favicon.ico"
)
// Prometheus contains the metrics gathered by the instance and its path
type Prometheus struct {
reqCnt *prometheus.CounterVec
reqDur *prometheus.HistogramVec
}
// NewPrometheus generates a new set of metrics with a certain subsystem name
func NewPrometheus() (*Prometheus, error) {
reqCnt := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespaceAPI,
Name: "requests_total",
Help: "How many HTTP requests processed, partitioned by status code and HTTP method",
},
[]string{"code", "method", "path"},
)
if err := registerCollector(reqCnt); err != nil {
return nil, err
}
reqDur := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespaceAPI,
Name: "request_duration_seconds",
Help: "The HTTP request latencies in seconds",
},
[]string{"code", "method", "path"},
)
if err := registerCollector(reqDur); err != nil {
return nil, err
}
return &Prometheus{
reqCnt: reqCnt,
reqDur: reqDur,
}, nil
}
// PrometheusMiddleware creates the prometheus collector and
// defines status handler function for the middleware
func PrometheusMiddleware() (gin.HandlerFunc, error) {
p, err := NewPrometheus()
if err != nil {
return nil, err
}
return p.Middleware(), nil
}
// Middleware defines status handler function for middleware
func (p *Prometheus) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
if c.Request.URL.Path == favicon {
c.Next()
return
}
start := time.Now()
c.Next()
status := strconv.Itoa(c.Writer.Status())
elapsed := float64(time.Since(start)) / float64(time.Second)
fullPath := c.FullPath()
p.reqDur.WithLabelValues(status, c.Request.Method, fullPath).Observe(elapsed)
p.reqCnt.WithLabelValues(status, c.Request.Method, fullPath).Inc()
}
}

View File

@@ -1,3 +1,18 @@
/*
Package node does the initialization of all the required objects to either run
as a synchronizer or as a coordinator.
The Node contains several goroutines that run in the background or that
periodically perform tasks. One of this goroutines periodically calls the
`Synchronizer.Sync` function, allowing the synchronization of one block at a
time. After every call to `Synchronizer.Sync`, the Node sends a message to the
Coordinator to notify it about the new synced block (and associated state) or
reorg (and resetted state) in case one happens.
Other goroutines perform tasks such as: updating the token prices, update
metrics stored in the historyDB, update recommended fee stored in the
historyDB, run the http API server, run the debug http API server, etc.
*/
package node package node
import ( import (
@@ -15,6 +30,7 @@ import (
"github.com/gin-contrib/cors" "github.com/gin-contrib/cors"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/hermeznetwork/hermez-node/api" "github.com/hermeznetwork/hermez-node/api"
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
"github.com/hermeznetwork/hermez-node/batchbuilder" "github.com/hermeznetwork/hermez-node/batchbuilder"
"github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/common"
"github.com/hermeznetwork/hermez-node/config" "github.com/hermeznetwork/hermez-node/config"
@@ -27,7 +43,6 @@ import (
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/priceupdater" "github.com/hermeznetwork/hermez-node/priceupdater"
"github.com/hermeznetwork/hermez-node/prover" "github.com/hermeznetwork/hermez-node/prover"
"github.com/hermeznetwork/hermez-node/stateapiupdater"
"github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/synchronizer"
"github.com/hermeznetwork/hermez-node/test/debugapi" "github.com/hermeznetwork/hermez-node/test/debugapi"
"github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/hermez-node/txprocessor"
@@ -230,6 +245,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
cfg.Coordinator.L2DB.SafetyPeriod, cfg.Coordinator.L2DB.SafetyPeriod,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD, cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
cfg.Coordinator.L2DB.TTL.Duration, cfg.Coordinator.L2DB.TTL.Duration,
apiConnCon, apiConnCon,
) )
@@ -253,6 +269,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
hdbNodeCfg := historydb.NodeConfig{ hdbNodeCfg := historydb.NodeConfig{
MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs, MaxPoolTxs: cfg.Coordinator.L2DB.MaxTxs,
MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD, MinFeeUSD: cfg.Coordinator.L2DB.MinFeeUSD,
MaxFeeUSD: cfg.Coordinator.L2DB.MaxFeeUSD,
ForgeDelay: cfg.Coordinator.ForgeDelay.Duration.Seconds(), ForgeDelay: cfg.Coordinator.ForgeDelay.Duration.Seconds(),
} }
if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil { if err := historyDB.SetNodeConfig(&hdbNodeCfg); err != nil {
@@ -271,7 +288,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
return nil, tracerr.Wrap(err) return nil, tracerr.Wrap(err)
} }
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts) stateAPIUpdater, err := stateapiupdater.NewUpdater(
historyDB,
&hdbNodeCfg,
initSCVars,
&hdbConsts,
&cfg.RecommendedFeePolicy,
)
if err != nil {
return nil, tracerr.Wrap(err)
}
var coord *coordinator.Coordinator var coord *coordinator.Coordinator
if mode == ModeCoordinator { if mode == ModeCoordinator {
@@ -522,6 +548,7 @@ func NewAPIServer(mode Mode, cfg *config.APIServer) (*APIServer, error) {
0, 0,
cfg.Coordinator.L2DB.MaxTxs, cfg.Coordinator.L2DB.MaxTxs,
cfg.Coordinator.L2DB.MinFeeUSD, cfg.Coordinator.L2DB.MinFeeUSD,
cfg.Coordinator.L2DB.MaxFeeUSD,
0, 0,
apiConnCon, apiConnCon,
) )

View File

@@ -1,44 +0,0 @@
package synchronizer
import "github.com/prometheus/client_golang/prometheus"
var (
metricReorgsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "sync_reorgs",
Help: "",
},
)
metricSyncedLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_block_num",
Help: "",
},
)
metricEthLastBlockNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_block_num",
Help: "",
},
)
metricSyncedLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_synced_last_batch_num",
Help: "",
},
)
metricEthLastBatchNum = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "sync_eth_last_batch_num",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricReorgsCount)
prometheus.MustRegister(metricSyncedLastBlockNum)
prometheus.MustRegister(metricEthLastBlockNum)
prometheus.MustRegister(metricSyncedLastBatchNum)
prometheus.MustRegister(metricEthLastBatchNum)
}

View File

@@ -1,3 +1,35 @@
/*
Package synchronizer synchronizes the hermez network state by querying events
emitted by the three smart contracts: `Hermez.sol` (referred as Rollup here),
`HermezAuctionProtocol.sol` (referred as Auction here) and
`WithdrawalDelayer.sol` (referred as WDelayer here).
The main entry point for synchronization is the `Sync` function, which at most
will synchronize one ethereum block, and all the hermez events that happened in
that block. During a `Sync` call, a reorg can be detected; in such case, uncle
blocks will be discarded, and only in a future `Sync` call correct blocks will
be synced.
The synchronization of the events in each smart contracts are done
in the methods `rollupSync`, `auctionSync` and `wdelayerSync`, which in turn
use the interface code to read each smart contract state and events found in
"github.com/hermeznetwork/hermez-node/eth". After these three methods are
called, an object of type `common.BlockData` is built containing all the
updates and events that happened in that block, and it is inserted in the
HistoryDB in a single SQL transaction.
`rollupSync` is the method that synchronizes batches sent via the `forgeBatch`
transaction in `Hermez.sol`. In `rollupSync`, for every batch, the accounts
state is updated in the StateDB by processing all transactions that have been
forged in that batch.
The consistency of the stored data is guaranteed by the HistoryDB: All the
block information is inserted in a single SQL transaction at the end of the
`Sync` method, once the StateDB has been updated. And every time the
Synchronizer starts, it continues from the last block in the HistoryDB. The
StateDB stores updates organized by checkpoints for every batch, and each batch
is only accessed if it appears in the HistoryDB.
*/
package synchronizer package synchronizer
import ( import (
@@ -15,6 +47,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/eth"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
) )
@@ -549,6 +582,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
return nil, nil, tracerr.Wrap(err) return nil, nil, tracerr.Wrap(err)
} }
discarded := lastSavedBlock.Num - lastDBBlockNum discarded := lastSavedBlock.Num - lastDBBlockNum
metric.Reorgs.Inc()
return nil, &discarded, nil return nil, &discarded, nil
} }
} }
@@ -641,16 +675,16 @@ func (s *Synchronizer) Sync(ctx context.Context,
} }
for _, batchData := range rollupData.Batches { for _, batchData := range rollupData.Batches {
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum)) metric.LastBatchNum.Set(float64(batchData.Batch.BatchNum))
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum)) metric.EthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
log.Debugw("Synced batch", log.Debugw("Synced batch",
"syncLastBatch", batchData.Batch.BatchNum, "syncLastBatch", batchData.Batch.BatchNum,
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum), "syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
"ethLastBatch", s.stats.Eth.LastBatchNum, "ethLastBatch", s.stats.Eth.LastBatchNum,
) )
} }
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num)) metric.LastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num)) metric.EthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
log.Debugw("Synced block", log.Debugw("Synced block",
"syncLastBlockNum", s.stats.Sync.LastBlock.Num, "syncLastBlockNum", s.stats.Sync.LastBlock.Num,
"syncBlocksPerc", s.stats.blocksPerc(), "syncBlocksPerc", s.stats.blocksPerc(),
@@ -704,15 +738,15 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
func getInitialVariables(ethClient eth.ClientInterface, func getInitialVariables(ethClient eth.ClientInterface,
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) { consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit() rollupInit, rollupInitBlock, err := ethClient.RollupEventInit(consts.Auction.GenesisBlockNum)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err)) return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
} }
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit() auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit(consts.Auction.GenesisBlockNum)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err)) return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
} }
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit() wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit(consts.Auction.GenesisBlockNum)
if err != nil { if err != nil {
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err)) return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
} }

View File

@@ -323,7 +323,7 @@ func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db
test.WipeDB(historyDB.DB()) test.WipeDB(historyDB.DB())
// Init L2 DB // Init L2 DB
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
return stateDB, historyDB, l2DB return stateDB, historyDB, l2DB
} }

View File

@@ -1152,7 +1152,7 @@ func (c *Client) RollupEventsByBlock(blockNum int64,
} }
// RollupEventInit returns the initialize event with its corresponding block number // RollupEventInit returns the initialize event with its corresponding block number
func (c *Client) RollupEventInit() (*eth.RollupEventInitialize, int64, error) { func (c *Client) RollupEventInit(genesisBlockNum int64) (*eth.RollupEventInitialize, int64, error) {
vars := c.blocks[0].Rollup.Vars vars := c.blocks[0].Rollup.Vars
return &eth.RollupEventInitialize{ return &eth.RollupEventInitialize{
ForgeL1L2BatchTimeout: uint8(vars.ForgeL1L2BatchTimeout), ForgeL1L2BatchTimeout: uint8(vars.ForgeL1L2BatchTimeout),
@@ -1628,7 +1628,7 @@ func (c *Client) AuctionEventsByBlock(blockNum int64,
} }
// AuctionEventInit returns the initialize event with its corresponding block number // AuctionEventInit returns the initialize event with its corresponding block number
func (c *Client) AuctionEventInit() (*eth.AuctionEventInitialize, int64, error) { func (c *Client) AuctionEventInit(genesisBlockNum int64) (*eth.AuctionEventInitialize, int64, error) {
vars := c.blocks[0].Auction.Vars vars := c.blocks[0].Auction.Vars
return &eth.AuctionEventInitialize{ return &eth.AuctionEventInitialize{
DonationAddress: vars.DonationAddress, DonationAddress: vars.DonationAddress,
@@ -1863,7 +1863,7 @@ func (c *Client) WDelayerConstants() (*common.WDelayerConstants, error) {
} }
// WDelayerEventInit returns the initialize event with its corresponding block number // WDelayerEventInit returns the initialize event with its corresponding block number
func (c *Client) WDelayerEventInit() (*eth.WDelayerEventInitialize, int64, error) { func (c *Client) WDelayerEventInit(genesisBlockNum int64) (*eth.WDelayerEventInitialize, int64, error) {
vars := c.blocks[0].WDelayer.Vars vars := c.blocks[0].WDelayer.Vars
return &eth.WDelayerEventInitialize{ return &eth.WDelayerEventInitialize{
InitialWithdrawalDelay: vars.WithdrawalDelay, InitialWithdrawalDelay: vars.WithdrawalDelay,

View File

@@ -77,7 +77,7 @@ func initTxSelector(t *testing.T, chainID uint16, hermezContractAddr ethCommon.A
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
dir, err := ioutil.TempDir("", "tmpSyncDB") dir, err := ioutil.TempDir("", "tmpSyncDB")
require.NoError(t, err) require.NoError(t, err)
@@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
} }
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
// TxSelector select the transactions for the next Batch // TxSelector select the transactions for the next Batch
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs) txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// BatchBuilder build Batch // BatchBuilder build Batch
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs) zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
@@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(txprocConfig, nil) txsel.GetL1L2TxSelection(txprocConfig, nil, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(coordIdxs)) assert.Equal(t, 1, len(coordIdxs))
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))

View File

@@ -1,53 +0,0 @@
package txselector
import "github.com/prometheus/client_golang/prometheus"
var (
metricGetL2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l2_txselecton_total",
Help: "",
},
)
metricGetL1L2TxSelection = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "txsel_get_l1_l2_txselecton_total",
Help: "",
},
)
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_coordinator_txs",
Help: "",
},
)
metricSelectedL1UserTxs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l1_user_txs",
Help: "",
},
)
metricSelectedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_selected_l2_txs",
Help: "",
},
)
metricDiscardedL2Txs = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "txsel_discarded_l2_txs",
Help: "",
},
)
)
func init() {
prometheus.MustRegister(metricGetL2TxSelection)
prometheus.MustRegister(metricGetL1L2TxSelection)
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
prometheus.MustRegister(metricSelectedL1UserTxs)
prometheus.MustRegister(metricSelectedL2Txs)
prometheus.MustRegister(metricDiscardedL2Txs)
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/l2db"
"github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/db/statedb"
"github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/log"
"github.com/hermeznetwork/hermez-node/metric"
"github.com/hermeznetwork/hermez-node/txprocessor" "github.com/hermeznetwork/hermez-node/txprocessor"
"github.com/hermeznetwork/tracerr" "github.com/hermeznetwork/tracerr"
"github.com/iden3/go-iden3-crypto/babyjub" "github.com/iden3/go-iden3-crypto/babyjub"
@@ -84,7 +85,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error)
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx, func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) { tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
// check if CoordinatorAccount for TokenID is already pending to create // check if CoordinatorAccount for TokenID is already pending to create
if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID, if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID,
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) { txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
return nil, positionL1, nil return nil, positionL1, nil
} }
@@ -121,11 +122,12 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
// but there is a transactions to them and the authorization of account // but there is a transactions to them and the authorization of account
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx, func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx,
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { [][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL2TxSelection.Inc() metric.GetL2TxSelection.Inc()
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs, coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{}) discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig,
[]common.L1Tx{}, l1UserFutureTxs)
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs, return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err) discardedL2Txs, tracerr.Wrap(err)
} }
@@ -139,11 +141,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config, func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
metricGetL1L2TxSelection.Inc() metric.GetL1L2TxSelection.Inc()
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs) discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs)
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs, return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
discardedL2Txs, tracerr.Wrap(err) discardedL2Txs, tracerr.Wrap(err)
} }
@@ -157,37 +159,40 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be // creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
// included in the next batch. // included in the next batch.
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config, func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx, l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) { []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
// WIP.0: the TxSelector is not optimized and will need a redesign. The // WIP.0: the TxSelector is not optimized and will need a redesign. The
// current version is implemented in order to have a functional // current version is implemented in order to have a functional
// implementation that can be used ASAP. // implementation that can be used ASAP.
// Steps of this method: // Steps of this method:
// - getPendingTxs // - ProcessL1Txs (User txs)
// - ProcessL1Txs // - getPendingTxs (forgable directly with current state & not forgable
// - getProfitable (sort by fee & nonce) // yet)
// - loop over l2Txs // - split between l2TxsForgable & l2TxsNonForgable, where:
// - Fill tx.TokenID tx.Nonce // - l2TxsForgable are the txs that are directly forgable with the
// - Check enough Balance on sender // current state
// - Check Nonce // - l2TxsNonForgable are the txs that are not directly forgable
// - Create CoordAccount L1CoordTx for TokenID if needed // with the current state, but that may be forgable once the
// - & ProcessL1Tx of L1CoordTx // l2TxsForgable ones are processed
// - Check validity of receiver Account for ToEthAddr / ToBJJ // - for l2TxsForgable, and if needed, for l2TxsNonForgable:
// - Create UserAccount L1CoordTx if needed (and possible) // - sort by Fee & Nonce
// - If everything is fine, store l2Tx to validTxs & update NoncesMap // - loop over l2Txs (txsel.processL2Txs)
// - Fill tx.TokenID tx.Nonce
// - Check enough Balance on sender
// - Check Nonce
// - Create CoordAccount L1CoordTx for TokenID if needed
// - & ProcessL1Tx of L1CoordTx
// - Check validity of receiver Account for ToEthAddr / ToBJJ
// - Create UserAccount L1CoordTx if needed (and possible)
// - If everything is fine, store l2Tx to validTxs & update NoncesMap
// - Prepare coordIdxsMap & AccumulatedFees // - Prepare coordIdxsMap & AccumulatedFees
// - Distribute AccumulatedFees to CoordIdxs // - Distribute AccumulatedFees to CoordIdxs
// - MakeCheckpoint // - MakeCheckpoint
// get pending l2-tx from tx-pool
l2TxsRaw, err := txsel.l2db.GetPendingTxs()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
txselStateDB := txsel.localAccountsDB.StateDB txselStateDB := txsel.localAccountsDB.StateDB
tp := txprocessor.NewTxProcessor(txselStateDB, selectionConfig) tp := txprocessor.NewTxProcessor(txselStateDB, selectionConfig)
tp.AccumulatedFees = make(map[common.Idx]*big.Int)
// Process L1UserTxs // Process L1UserTxs
for i := 0; i < len(l1UserTxs); i++ { for i := 0; i < len(l1UserTxs); i++ {
@@ -198,21 +203,141 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
} }
} }
var l1CoordinatorTxs []common.L1Tx l2TxsFromDB, err := txsel.l2db.GetPendingTxs()
positionL1 := len(l1UserTxs) if err != nil {
var accAuths [][]byte return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
l2TxsForgable, l2TxsNonForgable := splitL2ForgableAndNonForgable(tp, l2TxsFromDB)
// Sort l2TxsRaw (cropping at MaxTx at this point). // in case that length of l2TxsForgable is 0, no need to continue, there
// discardedL2Txs contains an array of the L2Txs that have not been // is no L2Txs to forge at all
// selected in this Batch. if len(l2TxsForgable) == 0 {
l2Txs, discardedL2Txs := txsel.getL2Profitable(l2TxsRaw, selectionConfig.MaxTx-uint32(len(l1UserTxs))) var discardedL2Txs []common.PoolL2Tx
for i := range discardedL2Txs { for i := 0; i < len(l2TxsNonForgable); i++ {
discardedL2Txs[i].Info = l2TxsNonForgable[i].Info =
"Tx not selected due to low absolute fee (does not fit inside the profitable set)" "Tx not selected due impossibility to be forged with the current state"
discardedL2Txs = append(discardedL2Txs, l2TxsNonForgable[i])
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metric.SelectedL1CoordinatorTxs.Set(0)
metric.SelectedL2Txs.Set(0)
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
} }
var validTxs []common.PoolL2Tx var accAuths [][]byte
tp.AccumulatedFees = make(map[common.Idx]*big.Int) var l1CoordinatorTxs []common.L1Tx
var validTxs, discardedL2Txs []common.PoolL2Tx
l2TxsForgable = sortL2Txs(l2TxsForgable)
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs,
l2TxsForgable, validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
// if there is space for more txs get also the NonForgable txs, that may
// be unblocked once the Forgable ones are processed
if len(validTxs) < int(selectionConfig.MaxTx)-(len(l1UserTxs)+len(l1CoordinatorTxs)) {
l2TxsNonForgable = sortL2Txs(l2TxsNonForgable)
var accAuths2 [][]byte
var l1CoordinatorTxs2 []common.L1Tx
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
txsel.processL2Txs(tp, selectionConfig,
len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs,
l2TxsNonForgable, validTxs, discardedL2Txs)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accAuths = append(accAuths, accAuths2...)
l1CoordinatorTxs = append(l1CoordinatorTxs, l1CoordinatorTxs2...)
} else {
// if there is no space for NonForgable txs, put them at the
// discardedL2Txs array
for i := 0; i < len(l2TxsNonForgable); i++ {
l2TxsNonForgable[i].Info =
"Tx not selected due not available slots for L2Txs"
discardedL2Txs = append(discardedL2Txs, l2TxsNonForgable[i])
}
}
// get CoordIdxsMap for the TokenIDs
coordIdxsMap := make(map[common.TokenID]common.Idx)
for i := 0; i < len(validTxs); i++ {
// get TokenID from tx.Sender
accSender, err := tp.StateDB().GetAccount(validTxs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
tokenID := accSender.TokenID
coordIdx, err := txsel.getCoordIdx(tokenID)
if err != nil {
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
coordIdxsMap[tokenID] = coordIdx
}
var coordIdxs []common.Idx
for _, idx := range coordIdxsMap {
coordIdxs = append(coordIdxs, idx)
}
// sort CoordIdxs
sort.SliceStable(coordIdxs, func(i, j int) bool {
return coordIdxs[i] < coordIdxs[j]
})
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
for idx, accumulatedFee := range tp.AccumulatedFees {
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
if err != nil {
log.Errorw("Can not distribute accumulated fees to coordinator "+
"account: No coord Idx to receive fee", "idx", idx)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
if err != nil {
log.Error(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metric.SelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metric.SelectedL2Txs.Set(float64(len(validTxs)))
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
}
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx,
l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx,
[]common.PoolL2Tx, []common.PoolL2Tx, error) {
var l1CoordinatorTxs []common.L1Tx
positionL1 := nL1Txs
var accAuths [][]byte
// Iterate over l2Txs // Iterate over l2Txs
// - check Nonces // - check Nonces
// - check enough Balance for the Amount+Fee // - check enough Balance for the Amount+Fee
@@ -221,20 +346,22 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// - put the valid txs into validTxs array // - put the valid txs into validTxs array
for i := 0; i < len(l2Txs); i++ { for i := 0; i < len(l2Txs); i++ {
// Check if there is space for more L2Txs in the selection // Check if there is space for more L2Txs in the selection
maxL2Txs := int(selectionConfig.MaxTx) - maxL2Txs := int(selectionConfig.MaxTx) - nL1Txs - len(l1CoordinatorTxs)
len(l1UserTxs) - len(l1CoordinatorTxs)
if len(validTxs) >= maxL2Txs { if len(validTxs) >= maxL2Txs {
// no more available slots for L2Txs // no more available slots for L2Txs, so mark this tx
l2Txs[i].Info = // but also the rest of remaining txs as discarded
"Tx not selected due not available slots for L2Txs" for j := i; j < len(l2Txs); j++ {
discardedL2Txs = append(discardedL2Txs, l2Txs[i]) l2Txs[j].Info =
continue "Tx not selected due not available slots for L2Txs"
discardedL2Txs = append(discardedL2Txs, l2Txs[j])
}
break
} }
// get Nonce & TokenID from the Account by l2Tx.FromIdx // get Nonce & TokenID from the Account by l2Tx.FromIdx
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx) accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, tracerr.Wrap(err)
} }
l2Txs[i].TokenID = accSender.TokenID l2Txs[i].TokenID = accSender.TokenID
@@ -272,13 +399,13 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
txsel.coordAccountForTokenID(l1CoordinatorTxs, txsel.coordAccountForTokenID(l1CoordinatorTxs,
accSender.TokenID, positionL1) accSender.TokenID, positionL1)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, tracerr.Wrap(err)
} }
if newL1CoordTx != nil { if newL1CoordTx != nil {
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space // if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx // for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) || if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1Txs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) { len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1Txs {
// discard L2Tx, and update Info parameter of // discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " + l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
@@ -294,7 +421,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// process the L1CoordTx // process the L1CoordTx
_, _, _, _, err := tp.ProcessL1Tx(nil, newL1CoordTx) _, _, _, _, err := tp.ProcessL1Tx(nil, newL1CoordTx)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, tracerr.Wrap(err)
} }
} }
@@ -309,7 +436,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
validL2Tx, l1CoordinatorTx, accAuth, err := validL2Tx, l1CoordinatorTx, accAuth, err :=
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig, txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
len(l1UserTxs), l1CoordinatorTxs, positionL1, l2Txs[i]) nL1Txs, l1UserFutureTxs, l1CoordinatorTxs,
positionL1, l2Txs[i])
if err != nil { if err != nil {
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err) log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
// Discard L2Tx, and update Info parameter of // Discard L2Tx, and update Info parameter of
@@ -321,8 +449,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
} }
// if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space // if there is no space for the L1CoordinatorTx as MaxL1Tx, or no space
// for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx // for L1CoordinatorTx + L2Tx as MaxTx, discard the L2Tx
if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-len(l1UserTxs) || if len(l1CoordinatorTxs) >= int(selectionConfig.MaxL1Tx)-nL1Txs ||
len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-len(l1UserTxs) { len(l1CoordinatorTxs)+1 >= int(selectionConfig.MaxTx)-nL1Txs {
// discard L2Tx, and update Info parameter of // discard L2Tx, and update Info parameter of
// the tx, and add it to the discardedTxs array // the tx, and add it to the discardedTxs array
l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " + l2Txs[i].Info = "Tx not selected because the L2Tx depends on a " +
@@ -351,7 +479,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// process the L1CoordTx // process the L1CoordTx
_, _, _, _, err := tp.ProcessL1Tx(nil, l1CoordinatorTx) _, _, _, _, err := tp.ProcessL1Tx(nil, l1CoordinatorTx)
if err != nil { if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err) return nil, nil, nil, nil, tracerr.Wrap(err)
} }
} }
if validL2Tx == nil { if validL2Tx == nil {
@@ -413,7 +541,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// if err is db.ErrNotFound, should not happen, as all // if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx // the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point // created in the DB at this point
return nil, nil, nil, nil, nil, nil, return nil, nil, nil, nil,
tracerr.Wrap(fmt.Errorf("Could not get CoordIdx for TokenID=%d, "+ tracerr.Wrap(fmt.Errorf("Could not get CoordIdx for TokenID=%d, "+
"due: %s", tokenID, err)) "due: %s", tokenID, err))
} }
@@ -439,68 +567,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
validTxs = append(validTxs, l2Txs[i]) validTxs = append(validTxs, l2Txs[i])
} // after this loop, no checks to discard txs should be done } // after this loop, no checks to discard txs should be done
// get CoordIdxsMap for the TokenIDs return accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
coordIdxsMap := make(map[common.TokenID]common.Idx)
for i := 0; i < len(validTxs); i++ {
// get TokenID from tx.Sender
accSender, err := tp.StateDB().GetAccount(validTxs[i].FromIdx)
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
tokenID := accSender.TokenID
coordIdx, err := txsel.getCoordIdx(tokenID)
if err != nil {
// if err is db.ErrNotFound, should not happen, as all
// the validTxs.TokenID should have a CoordinatorIdx
// created in the DB at this point
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
coordIdxsMap[tokenID] = coordIdx
}
var coordIdxs []common.Idx
for _, idx := range coordIdxsMap {
coordIdxs = append(coordIdxs, idx)
}
// sort CoordIdxs
sort.SliceStable(coordIdxs, func(i, j int) bool {
return coordIdxs[i] < coordIdxs[j]
})
// distribute the AccumulatedFees from the processed L2Txs into the
// Coordinator Idxs
for idx, accumulatedFee := range tp.AccumulatedFees {
cmp := accumulatedFee.Cmp(big.NewInt(0))
if cmp == 1 { // accumulatedFee>0
// send the fee to the Idx of the Coordinator for the TokenID
accCoord, err := txsel.localAccountsDB.GetAccount(idx)
if err != nil {
log.Errorw("Can not distribute accumulated fees to coordinator "+
"account: No coord Idx to receive fee", "idx", idx)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
accCoord.Balance = new(big.Int).Add(accCoord.Balance, accumulatedFee)
_, err = txsel.localAccountsDB.UpdateAccount(idx, accCoord)
if err != nil {
log.Error(err)
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
}
}
err = tp.StateDB().MakeCheckpoint()
if err != nil {
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
}
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
metricSelectedL2Txs.Set(float64(len(validTxs)))
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
// return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
} }
// processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where // processTxsToEthAddrBJJ process the common.PoolL2Tx in the case where
@@ -510,18 +577,35 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs // l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
// array. // array.
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx, func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx, selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs,
positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx, l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) (
*common.AccountCreationAuth, error) { *common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) {
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a // if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
// previous L2Tx in the current process already created a // previous L2Tx in the current process already created a
// L1CoordinatorTx of this type, in the DB there still seem that needs // L1CoordinatorTx of this type, in the DB there still seem that needs
// to create a new L1CoordinatorTx, but as is already created, the tx // to create a new L1CoordinatorTx, but as is already created, the tx
// is valid // is valid
if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) { if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
return &l2Tx, nil, nil, nil return &l2Tx, nil, nil, nil
} }
// check if L2Tx receiver account will be created by a L1UserFutureTxs
// (in the next batch, the current frozen queue). In that case, the L2Tx
// will be discarded at the current batch, even if there is an
// AccountCreationAuth for the account, as there is a L1UserTx in the
// frozen queue that will create the receiver Account. The L2Tx is
// discarded to avoid the Coordinator creating a new L1CoordinatorTx to
// create the receiver account, which will be also created in the next
// batch from the L1UserFutureTx, ending with the user having 2
// different accounts for the same TokenID. The double account creation
// is supported by the Hermez zkRollup specification, but it was decided
// to mitigate it at the TxSelector level for the explained cases.
if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" +
" receiver account does not exist yet, and there is a L1UserTx that" +
" will create that account in a future batch.")
}
var l1CoordinatorTx *common.L1Tx var l1CoordinatorTx *common.L1Tx
var accAuth *common.AccountCreationAuth var accAuth *common.AccountCreationAuth
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr { if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
@@ -624,7 +708,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
return &l2Tx, l1CoordinatorTx, accAuth, nil return &l2Tx, l1CoordinatorTx, accAuth, nil
} }
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID, func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool { addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
for i := 0; i < len(l1CoordinatorTxs); i++ { for i := 0; i < len(l1CoordinatorTxs); i++ {
if l1CoordinatorTxs[i].FromEthAddr == addr && if l1CoordinatorTxs[i].FromEthAddr == addr &&
@@ -636,26 +720,31 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
return false return false
} }
// getL2Profitable returns the profitable selection of L2Txssorted by Nonce func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID,
func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([]common.PoolL2Tx, addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
[]common.PoolL2Tx) { for i := 0; i < len(l1UserFutureTxs); i++ {
// First sort by nonce so that txs from the same account are sorted so if l1UserFutureTxs[i].FromEthAddr == addr &&
// that they could be applied in succession. l1UserFutureTxs[i].TokenID == tokenID &&
sort.Slice(l2Txs, func(i, j int) bool { l1UserFutureTxs[i].FromBJJ == bjj {
return l2Txs[i].Nonce < l2Txs[j].Nonce return true
}) }
if l1UserFutureTxs[i].FromEthAddr == addr &&
l1UserFutureTxs[i].TokenID == tokenID &&
common.EmptyBJJComp == bjj {
return true
}
}
return false
}
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
// Sort by absolute fee with SliceStable, so that txs with same // Sort by absolute fee with SliceStable, so that txs with same
// AbsoluteFee are not rearranged and nonce order is kept in such case // AbsoluteFee are not rearranged and nonce order is kept in such case
sort.SliceStable(l2Txs, func(i, j int) bool { sort.SliceStable(l2Txs, func(i, j int) bool {
return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee return l2Txs[i].AbsoluteFee > l2Txs[j].AbsoluteFee
}) })
discardedL2Txs := []common.PoolL2Tx{}
if len(l2Txs) > int(max) {
discardedL2Txs = l2Txs[max:]
l2Txs = l2Txs[:max]
}
// sort l2Txs by Nonce. This can be done in many different ways, what // sort l2Txs by Nonce. This can be done in many different ways, what
// is needed is to output the l2Txs where the Nonce of l2Txs for each // is needed is to output the l2Txs where the Nonce of l2Txs for each
// Account is sorted, but the l2Txs can not be grouped by sender Account // Account is sorted, but the l2Txs can not be grouped by sender Account
@@ -665,5 +754,29 @@ func (txsel *TxSelector) getL2Profitable(l2Txs []common.PoolL2Tx, max uint32) ([
return l2Txs[i].Nonce < l2Txs[j].Nonce return l2Txs[i].Nonce < l2Txs[j].Nonce
}) })
return l2Txs, discardedL2Txs return l2Txs
}
func splitL2ForgableAndNonForgable(tp *txprocessor.TxProcessor,
l2Txs []common.PoolL2Tx) ([]common.PoolL2Tx, []common.PoolL2Tx) {
var l2TxsForgable, l2TxsNonForgable []common.PoolL2Tx
for i := 0; i < len(l2Txs); i++ {
accSender, err := tp.StateDB().GetAccount(l2Txs[i].FromIdx)
if err != nil {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
if l2Txs[i].Nonce != accSender.Nonce {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
enoughBalance, _, _ := tp.CheckEnoughBalance(l2Txs[i])
if !enoughBalance {
l2TxsNonForgable = append(l2TxsNonForgable, l2Txs[i])
continue
}
l2TxsForgable = append(l2TxsForgable, l2Txs[i])
}
return l2TxsForgable, l2TxsNonForgable
} }

View File

@@ -26,11 +26,13 @@ import (
) )
func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address, func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address,
coordUser *til.User) *TxSelector { coordUser *til.User) (*TxSelector, *historydb.HistoryDB) {
pass := os.Getenv("POSTGRES_PASS") pass := os.Getenv("POSTGRES_PASS")
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez") db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
require.NoError(t, err) require.NoError(t, err)
l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 24*time.Hour, nil) l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil)
historyDB := historydb.NewHistoryDB(db, db, nil)
dir, err := ioutil.TempDir("", "tmpdb") dir, err := ioutil.TempDir("", "tmpdb")
require.NoError(t, err) require.NoError(t, err)
@@ -65,7 +67,7 @@ func initTest(t *testing.T, chainID uint16, hermezContractAddr ethCommon.Address
test.WipeDB(txsel.l2db.DB()) test.WipeDB(txsel.l2db.DB())
return txsel return txsel, historyDB
} }
func addAccCreationAuth(t *testing.T, tc *til.Context, txsel *TxSelector, chainID uint16, func addAccCreationAuth(t *testing.T, tc *til.Context, txsel *TxSelector, chainID uint16,
@@ -157,7 +159,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly // restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -180,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:1") log.Debug("block:0 batch:1")
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -191,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:2") log.Debug("block:0 batch:2")
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -202,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:3") log.Debug("block:0 batch:3")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 2, len(oL1UserTxs)) assert.Equal(t, 2, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -215,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:4") log.Debug("block:0 batch:4")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -229,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:5") log.Debug("block:0 batch:5")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -243,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
log.Debug("block:0 batch:6") log.Debug("block:0 batch:6")
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs)) assert.Equal(t, 1, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -277,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0]) assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
@@ -326,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress())) assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []common.Idx{261, 263}, coordIdxs) assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
assert.Equal(t, 0, len(accAuths)) assert.Equal(t, 0, len(accAuths))
@@ -370,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress())) assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err = coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []common.Idx{263}, coordIdxs) assert.Equal(t, []common.Idx{263}, coordIdxs)
assert.Equal(t, 0, len(accAuths)) assert.Equal(t, 0, len(accAuths))
@@ -417,7 +419,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly // restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -432,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
} }
// batch1 // batch1
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// 1st TransferToEthAddr // 1st TransferToEthAddr
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965" expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965"
@@ -454,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 3, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
@@ -470,11 +472,6 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
tc.RestartNonces() tc.RestartNonces()
// batch3 // batch3
// NOTE: this batch will result with 1 L2Tx, as the PoolExit tx is not
// possible, as the PoolTransferToEthAddr is not processed yet when
// checking availability of PoolExit. This, in a near-future iteration
// of the TxSelector will return the 2 transactions as valid and
// selected, as the TxSelector will handle this kind of combinations.
batchPoolL2 = ` batchPoolL2 = `
Type: PoolL2 Type: PoolL2
PoolTransferToEthAddr(0) A-B: 50 (126)` PoolTransferToEthAddr(0) A-B: 50 (126)`
@@ -484,15 +481,15 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs)) // see 'NOTE' at the beginning of 'batch3' of this test assert.Equal(t, 2, len(oL2Txs))
assert.Equal(t, 2, len(discardedL2Txs)) assert.Equal(t, 1, len(discardedL2Txs))
assert.Equal(t, expectedTxID2, oL2Txs[0].TxID.String()) assert.Equal(t, expectedTxID2, oL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID1, oL2Txs[1].TxID.String())
assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String()) assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID1, discardedL2Txs[1].TxID.String())
assert.Equal(t, common.TxTypeTransferToEthAddr, oL2Txs[0].Type) assert.Equal(t, common.TxTypeTransferToEthAddr, oL2Txs[0].Type)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch()) txsel.localAccountsDB.CurrentBatch())
@@ -503,16 +500,12 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
// initial PoolExit, which now is valid as B has enough Balance // initial PoolExit, which now is valid as B has enough Balance
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
assert.Equal(t, 0, len(oL1CoordTxs)) assert.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs)) assert.Equal(t, 0, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs)) assert.Equal(t, 1, len(discardedL2Txs))
// the Exit that was not accepted at the batch2
assert.Equal(t, expectedTxID1, oL2Txs[0].TxID.String())
assert.Equal(t, expectedTxID0, discardedL2Txs[0].TxID.String())
assert.Equal(t, common.TxTypeExit, oL2Txs[0].Type)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs), err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch()) txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err) require.NoError(t, err)
@@ -539,7 +532,7 @@ func TestTransferToBjj(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly // restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -557,7 +550,7 @@ func TestTransferToBjj(t *testing.T) {
// batch1 to freeze L1UserTxs that will create some accounts with // batch1 to freeze L1UserTxs that will create some accounts with
// positive balance // positive balance
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// Transfer is ToBJJ to a BJJ-only account that doesn't exist // Transfer is ToBJJ to a BJJ-only account that doesn't exist
@@ -575,7 +568,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 4, len(oL1UserTxs)) assert.Equal(t, 4, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx // We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
@@ -602,7 +595,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs // Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
@@ -630,7 +623,7 @@ func TestTransferToBjj(t *testing.T) {
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs)) assert.Equal(t, 0, len(oL1UserTxs))
// We expect the coordinator to add an L1CoordTx to create an account // We expect the coordinator to add an L1CoordTx to create an account
@@ -670,7 +663,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly // restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -685,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// 8 transfers from the same account // 8 transfers from the same account
@@ -717,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
// transfers from account A // transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -752,7 +745,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly // restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -767,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
batchPoolL2 := ` batchPoolL2 := `
@@ -801,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// select L1 & L2 txs // select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs)) require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -816,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// batch 3 // batch 3
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs)) require.Equal(t, 0, len(oL1UserTxs))
@@ -832,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
// batch 4 // batch 4
l1UserTxs = []common.L1Tx{} l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err = _, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs)) require.Equal(t, 0, len(oL1UserTxs))
@@ -865,7 +858,7 @@ func TestProcessL2Selection(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6") hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"]) txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly // restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs // the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
@@ -880,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) {
} }
// batch1 to freeze L1UserTxs // batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{} l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs) _, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
// 8 transfers from the same account // 3 transfers from the same account
batchPoolL2 := ` batchPoolL2 := `
Type: PoolL2 Type: PoolL2
PoolTransfer(0) A-B: 10 (126) PoolTransfer(0) A-B: 10 (126)
@@ -896,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) {
// add the PoolL2Txs to the l2DB // add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs) addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A // batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum]) l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err := _, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs) txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 3, len(oL1UserTxs)) assert.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs)) require.Equal(t, 0, len(oL1CoordTxs))
@@ -921,3 +914,236 @@ func TestProcessL2Selection(t *testing.T) {
txsel.localAccountsDB.CurrentBatch()) txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err) require.NoError(t, err)
} }
func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
// This test recreates the case where there are
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 100
CreateAccountDeposit(0) B: 0
> batchL1 // Batch1: freeze L1User{3}
> batchL1 // Batch2: forge L1User{3}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
tilCfgExtra := til.ConfigExtra{
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
CoordUser: "Coord",
}
blocks, err := tc.GenerateBlocks(set)
require.NoError(t, err)
err = tc.FillBlocksExtra(blocks, &tilCfgExtra)
require.NoError(t, err)
err = tc.FillBlocksForgedL1UserTxs(blocks)
require.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, historyDB := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// Insert blocks into DB
for i := range blocks {
err = historyDB.AddBlockSCData(&blocks[i])
assert.NoError(t, err)
}
err = historyDB.UpdateTokenValue(common.EmptyAddr, 1000)
require.NoError(t, err)
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 5,
MaxTx: 5,
MaxL1Tx: 3,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
// batch 2 to crate the accounts (from L1UserTxs)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
// select L1 & L2 txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 3, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 0, len(oL2Txs))
require.Equal(t, 0, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// batch 3. The A-B txs have lower fee, but are the only ones possible
// with the current Accounts Balances, as the B-A tx of amount 40 will
// not be included as will be processed first when there is not enough
// balance at B (processed first as the TxSelector sorts by Fee and then
// by Nonce).
batchPoolL2 := `
Type: PoolL2
PoolTransfer(0) B-A: 40 (130) // B-A txs are only possible once A-B txs are processed
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) B-A: 1 (126)
PoolTransfer(0) A-B: 20 (20)
PoolTransfer(0) A-B: 25 (150)
PoolTransfer(0) A-B: 20 (20)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 11, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
l1UserTxs = []common.L1Tx{}
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 3, len(oL2Txs)) // the 3 txs A-B
require.Equal(t, 8, len(discardedL2Txs)) // the 8 txs B-A
require.Equal(t, 0, len(accAuths))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// batch 4. In this Batch, account B has enough balance to send the txs
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
require.NoError(t, err)
require.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
require.Equal(t, 5, len(oL2Txs))
require.Equal(t, 3, len(discardedL2Txs))
require.Equal(t, 0, len(accAuths))
}
func TestL1UserFutureTxs(t *testing.T) {
set := `
Type: Blockchain
CreateAccountDeposit(0) Coord: 0
CreateAccountDeposit(0) A: 100
> batchL1 // freeze L1User{2}
CreateAccountDeposit(0) B: 18
> batchL1 // forge L1User{2}, freeze L1User{1}
> batchL1 // forge L1User{1}
> block
`
chainID := uint16(0)
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
blocks, err := tc.GenerateBlocks(set)
assert.NoError(t, err)
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
// restart nonces of TilContext, as will be set by generating directly
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
tc.RestartNonces()
tpc := txprocessor.Config{
NLevels: 16,
MaxFeeTx: 10,
MaxTx: 10,
MaxL1Tx: 10,
ChainID: chainID,
}
// batch1 to freeze L1UserTxs
l1UserTxs := []common.L1Tx{}
l1UserFutureTxs := []common.L1Tx{}
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
batchPoolL2 := `
Type: PoolL2
PoolTransferToEthAddr(0) A-B: 10 (126)
`
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 1, len(poolL2Txs))
// add AccountCreationAuth for B
_ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B")
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
// batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
l1UserFutureTxs =
til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
require.Equal(t, 2, len(l1UserTxs))
require.Equal(t, 1, len(l1UserFutureTxs))
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
assert.Equal(t, 2, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed
// at the next batch once the L1UserTx of CreateAccount B is processed,
// despite that there is an AccountCreationAuth for Account B.
assert.Equal(t, 0, len(oL2Txs))
assert.Equal(t, 1, len(discardedL2Txs))
assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+
" discarded at the current batch, as the receiver account does"+
" not exist yet, and there is a L1UserTx that will create that"+
" account in a future batch.",
discardedL2Txs[0].Info)
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
l1UserFutureTxs = []common.L1Tx{}
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
require.NoError(t, err)
assert.Equal(t, 1, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
// L2Tx selected as now the L1UserTx of CreateAccount B is processed
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
// generate a new L2Tx A-B and check that is processed
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
require.NoError(t, err)
require.Equal(t, 1, len(poolL2Txs))
// add the PoolL2Txs to the l2DB
addL2Txs(t, txsel, poolL2Txs)
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
txsel.GetL1L2TxSelection(tpc, nil, nil)
require.NoError(t, err)
assert.Equal(t, 0, len(oL1UserTxs))
require.Equal(t, 0, len(oL1CoordTxs))
assert.Equal(t, 1, len(oL2Txs))
assert.Equal(t, 0, len(discardedL2Txs))
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
txsel.localAccountsDB.CurrentBatch())
require.NoError(t, err)
}