mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
37 Commits
fix/mockse
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e40265226 | ||
|
|
7f971fb72b | ||
|
|
e9b80d0c6d | ||
|
|
2dba819696 | ||
|
|
629c67a62e | ||
|
|
bb71a94e22 | ||
|
|
6074f2d7fb | ||
|
|
6f1a44df02 | ||
|
|
4c99640b8c | ||
|
|
3fedcb7023 | ||
|
|
3f643f022a | ||
|
|
b8d339d568 | ||
|
|
01ec1ca395 | ||
|
|
561f491d53 | ||
|
|
9245247ee4 | ||
|
|
6c1c157bc3 | ||
|
|
f9ddf88c93 | ||
|
|
4dc44e70c4 | ||
|
|
7b6dd0899e | ||
|
|
14ead3ddf1 | ||
|
|
a1eea43443 | ||
|
|
2125812e90 | ||
|
|
f07fd82822 | ||
|
|
d465d51e78 | ||
|
|
e23d0a07d2 | ||
|
|
6d84d143a2 | ||
|
|
3b3d96e07c | ||
|
|
e9be904c2f | ||
|
|
88b17cbe99 | ||
|
|
1ffe437538 | ||
|
|
e2f9a1d7eb | ||
|
|
999dde5621 | ||
|
|
aa82efc868 | ||
|
|
8c51cfb3d2 | ||
|
|
9a863eadc4 | ||
|
|
dc198c35ae | ||
|
|
ae0858df3d |
29
.github/workflows/release.yml
vendored
Normal file
29
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: goreleaser
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.16
|
||||||
|
-
|
||||||
|
name: Run GoReleaser
|
||||||
|
uses: goreleaser/goreleaser-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
args: release --rm-dist
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1 +1,2 @@
|
|||||||
bin/
|
bin/
|
||||||
|
dist/
|
||||||
|
|||||||
35
.goreleaser.yml
Normal file
35
.goreleaser.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod download
|
||||||
|
- make migration-pack
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- main: ./cli/node/main.go
|
||||||
|
binary: node
|
||||||
|
id: node
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
|
||||||
|
archives:
|
||||||
|
- replacements:
|
||||||
|
darwin: Darwin
|
||||||
|
linux: Linux
|
||||||
|
windows: Windows
|
||||||
|
386: i386
|
||||||
|
amd64: x86_64
|
||||||
|
|
||||||
|
checksum:
|
||||||
|
name_template: 'checksums.txt'
|
||||||
|
|
||||||
|
snapshot:
|
||||||
|
name_template: "{{ .Tag }}-next"
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
sort: asc
|
||||||
|
filters:
|
||||||
|
exclude:
|
||||||
|
- '^docs:'
|
||||||
|
- '^test:'
|
||||||
661
LICENSE
Normal file
661
LICENSE
Normal file
@@ -0,0 +1,661 @@
|
|||||||
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works, specifically designed to ensure
|
||||||
|
cooperation with the community in the case of network server software.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
our General Public Licenses are intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
Developers that use our General Public Licenses protect your rights
|
||||||
|
with two steps: (1) assert copyright on the software, and (2) offer
|
||||||
|
you this License which gives you legal permission to copy, distribute
|
||||||
|
and/or modify the software.
|
||||||
|
|
||||||
|
A secondary benefit of defending all users' freedom is that
|
||||||
|
improvements made in alternate versions of the program, if they
|
||||||
|
receive widespread use, become available for other developers to
|
||||||
|
incorporate. Many developers of free software are heartened and
|
||||||
|
encouraged by the resulting cooperation. However, in the case of
|
||||||
|
software used on network servers, this result may fail to come about.
|
||||||
|
The GNU General Public License permits making a modified version and
|
||||||
|
letting the public access it on a server without ever releasing its
|
||||||
|
source code to the public.
|
||||||
|
|
||||||
|
The GNU Affero General Public License is designed specifically to
|
||||||
|
ensure that, in such cases, the modified source code becomes available
|
||||||
|
to the community. It requires the operator of a network server to
|
||||||
|
provide the source code of the modified version running there to the
|
||||||
|
users of that server. Therefore, public use of a modified version, on
|
||||||
|
a publicly accessible server, gives the public access to the source
|
||||||
|
code of the modified version.
|
||||||
|
|
||||||
|
An older license, called the Affero General Public License and
|
||||||
|
published by Affero, was designed to accomplish similar goals. This is
|
||||||
|
a different license, not a version of the Affero GPL, but Affero has
|
||||||
|
released a new version of the Affero GPL which permits relicensing under
|
||||||
|
this license.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, if you modify the
|
||||||
|
Program, your modified version must prominently offer all users
|
||||||
|
interacting with it remotely through a computer network (if your version
|
||||||
|
supports such interaction) an opportunity to receive the Corresponding
|
||||||
|
Source of your version by providing access to the Corresponding Source
|
||||||
|
from a network server at no charge, through some standard or customary
|
||||||
|
means of facilitating copying of software. This Corresponding Source
|
||||||
|
shall include the Corresponding Source for any work covered by version 3
|
||||||
|
of the GNU General Public License that is incorporated pursuant to the
|
||||||
|
following paragraph.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the work with which it is combined will remain governed by version
|
||||||
|
3 of the GNU General Public License.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU Affero General Public License from time to time. Such new versions
|
||||||
|
will be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU Affero General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU Affero General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU Affero General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published
|
||||||
|
by the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If your software can interact with users remotely through a computer
|
||||||
|
network, you should also make sure that it provides a way for users to
|
||||||
|
get its source. For example, if your program is a web application, its
|
||||||
|
interface could display a "Source" link that leads users to an archive
|
||||||
|
of the code. There are many ways you could offer source, and different
|
||||||
|
solutions will be better for different programs; see section 13 for the
|
||||||
|
specific requirements.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
12
Makefile
12
Makefile
@@ -3,8 +3,8 @@
|
|||||||
# Project variables.
|
# Project variables.
|
||||||
PACKAGE := github.com/hermeznetwork/hermez-node
|
PACKAGE := github.com/hermeznetwork/hermez-node
|
||||||
VERSION := $(shell git describe --tags --always)
|
VERSION := $(shell git describe --tags --always)
|
||||||
BUILD := $(shell git rev-parse --short HEAD)
|
COMMIT := $(shell git rev-parse --short HEAD)
|
||||||
BUILD_DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
DATE := $(shell date +%Y-%m-%dT%H:%M:%S%z)
|
||||||
PROJECT_NAME := $(shell basename "$(PWD)")
|
PROJECT_NAME := $(shell basename "$(PWD)")
|
||||||
|
|
||||||
# Go related variables.
|
# Go related variables.
|
||||||
@@ -23,7 +23,7 @@ CONFIG ?= $(GOBASE)/cli/node/cfg.buidler.toml
|
|||||||
POSTGRES_PASS ?= yourpasswordhere
|
POSTGRES_PASS ?= yourpasswordhere
|
||||||
|
|
||||||
# Use linker flags to provide version/build settings.
|
# Use linker flags to provide version/build settings.
|
||||||
LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -X=main.Date=$(BUILD_DATE)"
|
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE)"
|
||||||
|
|
||||||
# PID file will keep the process id of the server.
|
# PID file will keep the process id of the server.
|
||||||
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
|
PID_PROOF_MOCK := /tmp/.$(PROJECT_NAME).proof.pid
|
||||||
@@ -94,11 +94,11 @@ install:
|
|||||||
@echo " > Checking if there is any missing dependencies..."
|
@echo " > Checking if there is any missing dependencies..."
|
||||||
$(GOENVVARS) go get $(GOCMD)/... $(get)
|
$(GOENVVARS) go get $(GOCMD)/... $(get)
|
||||||
|
|
||||||
## run: Run Hermez node.
|
## run-node: Run Hermez node.
|
||||||
run:
|
run-node:
|
||||||
@bash -c "$(MAKE) clean build"
|
@bash -c "$(MAKE) clean build"
|
||||||
@echo " > Running $(PROJECT_NAME)"
|
@echo " > Running $(PROJECT_NAME)"
|
||||||
@$(GOBIN)/$(GOBINARY) --mode $(MODE) --cfg $(CONFIG) run
|
@$(GOBIN)/$(GOBINARY) run --mode $(MODE) --cfg $(CONFIG)
|
||||||
|
|
||||||
## run-proof-mock: Run proof server mock API.
|
## run-proof-mock: Run proof server mock API.
|
||||||
run-proof-mock: stop-proof-mock
|
run-proof-mock: stop-proof-mock
|
||||||
|
|||||||
@@ -25,13 +25,13 @@ there are more information about the config file into [cli/node/README.md](cli/n
|
|||||||
After setting the config, you can build and run the Hermez Node as a synchronizer:
|
After setting the config, you can build and run the Hermez Node as a synchronizer:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ make run
|
$ make run-node
|
||||||
```
|
```
|
||||||
|
|
||||||
Or build and run as a coordinator, and also passing the config file from other location:
|
Or build and run as a coordinator, and also passing the config file from other location:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run
|
$ MODE=sync CONFIG=cli/node/cfg.buidler.toml make run-node
|
||||||
```
|
```
|
||||||
|
|
||||||
To check the useful make commands:
|
To check the useful make commands:
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func (a *API) postAccountCreationAuth(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
// API to common + verify signature
|
// API to common + verify signature
|
||||||
commonAuth := accountCreationAuthAPIToCommon(&apiAuth)
|
commonAuth := accountCreationAuthAPIToCommon(&apiAuth)
|
||||||
if !commonAuth.VerifySignature(a.chainID, a.hermezAddress) {
|
if !commonAuth.VerifySignature(a.cg.ChainID, a.hermezAddress) {
|
||||||
retBadReq(errors.New("invalid signature"), c)
|
retBadReq(errors.New("invalid signature"), c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
47
api/api.go
47
api/api.go
@@ -1,3 +1,27 @@
|
|||||||
|
/*
|
||||||
|
Package api implements the public interface of the hermez-node using a HTTP REST API.
|
||||||
|
There are two subsets of endpoints:
|
||||||
|
- coordinatorEndpoints: used to receive L2 transactions and account creation authorizations. Targeted for wallets.
|
||||||
|
- explorerEndpoints: used to provide all sorts of information about the network. Targeted for explorers and similar services.
|
||||||
|
|
||||||
|
About the configuration of the API:
|
||||||
|
- The API is supposed to be launched using the cli found at the package cli/node, and configured through the configuration file.
|
||||||
|
- The mentioned configuration file allows exposing any combination of the endpoint subsets.
|
||||||
|
- Although the API can run in a "standalone" manner using the serveapi command, it won't work properly
|
||||||
|
unless another process acting as a coord or sync is filling the HistoryDB.
|
||||||
|
|
||||||
|
Design principles and considerations:
|
||||||
|
- In order to decouple the API process from the rest of the node, all the communication between this package and the rest of
|
||||||
|
the system is done through the SQL database. As a matter of fact, the only public function of the package is the constructor NewAPI.
|
||||||
|
All the information needed for the API to work should be obtained through the configuration file of the cli or the database.
|
||||||
|
- The format of the requests / responses doesn't match directly with the common types, and for this reason, the package api/apitypes is used
|
||||||
|
to facilitate the format conversion. Most of the time, this is done directly at the db level.
|
||||||
|
- The API endpoints are fully documented using OpenAPI aka Swagger. All the endpoints are tested against the spec to ensure consistency
|
||||||
|
between implementation and specification. To get a sense of which endpoints exist and how they work, it's strongly recommended to check this specification.
|
||||||
|
The specification can be found at api/swagger.yml.
|
||||||
|
- In general, all the API endpoints produce queries to the SQL database in order to retrieve / insert the requested information. The most notable exceptions to this are
|
||||||
|
the /config endpoint, which returns a static object generated at construction time, and the /state, which also is retrieved from the database, but it's generated by API/stateapiupdater package.
|
||||||
|
*/
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -7,6 +31,7 @@ import (
|
|||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
|
"github.com/hermeznetwork/hermez-node/metric"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,7 +40,6 @@ type API struct {
|
|||||||
h *historydb.HistoryDB
|
h *historydb.HistoryDB
|
||||||
cg *configAPI
|
cg *configAPI
|
||||||
l2 *l2db.L2DB
|
l2 *l2db.L2DB
|
||||||
chainID uint16
|
|
||||||
hermezAddress ethCommon.Address
|
hermezAddress ethCommon.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,7 +51,6 @@ func NewAPI(
|
|||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
) (*API, error) {
|
) (*API, error) {
|
||||||
// Check input
|
// Check input
|
||||||
// TODO: is stateDB only needed for explorer endpoints or for both?
|
|
||||||
if coordinatorEndpoints && l2db == nil {
|
if coordinatorEndpoints && l2db == nil {
|
||||||
return nil, tracerr.Wrap(errors.New("cannot serve Coordinator endpoints without L2DB"))
|
return nil, tracerr.Wrap(errors.New("cannot serve Coordinator endpoints without L2DB"))
|
||||||
}
|
}
|
||||||
@@ -44,17 +67,25 @@ func NewAPI(
|
|||||||
RollupConstants: *newRollupConstants(consts.Rollup),
|
RollupConstants: *newRollupConstants(consts.Rollup),
|
||||||
AuctionConstants: consts.Auction,
|
AuctionConstants: consts.Auction,
|
||||||
WDelayerConstants: consts.WDelayer,
|
WDelayerConstants: consts.WDelayer,
|
||||||
|
ChainID: consts.ChainID,
|
||||||
},
|
},
|
||||||
l2: l2db,
|
l2: l2db,
|
||||||
chainID: consts.ChainID,
|
|
||||||
hermezAddress: consts.HermezAddress,
|
hermezAddress: consts.HermezAddress,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
middleware, err := metric.PrometheusMiddleware()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
server.Use(middleware)
|
||||||
|
|
||||||
|
server.NoRoute(a.noRoute)
|
||||||
|
|
||||||
v1 := server.Group("/v1")
|
v1 := server.Group("/v1")
|
||||||
|
|
||||||
// Add coordinator endpoints
|
// Add coordinator endpoints
|
||||||
if coordinatorEndpoints {
|
if coordinatorEndpoints {
|
||||||
// Account
|
// Account creation authorization
|
||||||
v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
|
v1.POST("/account-creation-authorization", a.postAccountCreationAuth)
|
||||||
v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
|
v1.GET("/account-creation-authorization/:hezEthereumAddress", a.getAccountCreationAuth)
|
||||||
// Transaction
|
// Transaction
|
||||||
@@ -72,17 +103,23 @@ func NewAPI(
|
|||||||
// Transaction
|
// Transaction
|
||||||
v1.GET("/transactions-history", a.getHistoryTxs)
|
v1.GET("/transactions-history", a.getHistoryTxs)
|
||||||
v1.GET("/transactions-history/:id", a.getHistoryTx)
|
v1.GET("/transactions-history/:id", a.getHistoryTx)
|
||||||
// Status
|
// Batches
|
||||||
v1.GET("/batches", a.getBatches)
|
v1.GET("/batches", a.getBatches)
|
||||||
v1.GET("/batches/:batchNum", a.getBatch)
|
v1.GET("/batches/:batchNum", a.getBatch)
|
||||||
v1.GET("/full-batches/:batchNum", a.getFullBatch)
|
v1.GET("/full-batches/:batchNum", a.getFullBatch)
|
||||||
|
// Slots
|
||||||
v1.GET("/slots", a.getSlots)
|
v1.GET("/slots", a.getSlots)
|
||||||
v1.GET("/slots/:slotNum", a.getSlot)
|
v1.GET("/slots/:slotNum", a.getSlot)
|
||||||
|
// Bids
|
||||||
v1.GET("/bids", a.getBids)
|
v1.GET("/bids", a.getBids)
|
||||||
|
// State
|
||||||
v1.GET("/state", a.getState)
|
v1.GET("/state", a.getState)
|
||||||
|
// Config
|
||||||
v1.GET("/config", a.getConfig)
|
v1.GET("/config", a.getConfig)
|
||||||
|
// Tokens
|
||||||
v1.GET("/tokens", a.getTokens)
|
v1.GET("/tokens", a.getTokens)
|
||||||
v1.GET("/tokens/:id", a.getToken)
|
v1.GET("/tokens/:id", a.getToken)
|
||||||
|
// Coordinators
|
||||||
v1.GET("/coordinators", a.getCoordinators)
|
v1.GET("/coordinators", a.getCoordinators)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -40,8 +40,11 @@ type Pendinger interface {
|
|||||||
New() Pendinger
|
New() Pendinger
|
||||||
}
|
}
|
||||||
|
|
||||||
const apiPort = "4010"
|
const (
|
||||||
const apiURL = "http://localhost:" + apiPort + "/v1/"
|
apiPort = "4010"
|
||||||
|
apiIP = "http://localhost:"
|
||||||
|
apiURL = apiIP + apiPort + "/v1/"
|
||||||
|
)
|
||||||
|
|
||||||
var SetBlockchain = `
|
var SetBlockchain = `
|
||||||
Type: Blockchain
|
Type: Blockchain
|
||||||
@@ -215,6 +218,7 @@ func TestMain(m *testing.M) {
|
|||||||
chainID := uint16(0)
|
chainID := uint16(0)
|
||||||
_config := getConfigTest(chainID)
|
_config := getConfigTest(chainID)
|
||||||
config = configAPI{
|
config = configAPI{
|
||||||
|
ChainID: chainID,
|
||||||
RollupConstants: *newRollupConstants(_config.RollupConstants),
|
RollupConstants: *newRollupConstants(_config.RollupConstants),
|
||||||
AuctionConstants: _config.AuctionConstants,
|
AuctionConstants: _config.AuctionConstants,
|
||||||
WDelayerConstants: _config.WDelayerConstants,
|
WDelayerConstants: _config.WDelayerConstants,
|
||||||
@@ -522,11 +526,16 @@ func TestMain(m *testing.M) {
|
|||||||
WithdrawalDelay: uint64(3000),
|
WithdrawalDelay: uint64(3000),
|
||||||
}
|
}
|
||||||
|
|
||||||
stateAPIUpdater = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
stateAPIUpdater, err = stateapiupdater.NewUpdater(hdb, nodeConfig, &common.SCVariables{
|
||||||
Rollup: rollupVars,
|
Rollup: rollupVars,
|
||||||
Auction: auctionVars,
|
Auction: auctionVars,
|
||||||
WDelayer: wdelayerVars,
|
WDelayer: wdelayerVars,
|
||||||
}, constants)
|
}, constants, &stateapiupdater.RecommendedFeePolicy{
|
||||||
|
PolicyType: stateapiupdater.RecommendedFeePolicyTypeAvgLastHour,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Generate test data, as expected to be received/sended from/to the API
|
// Generate test data, as expected to be received/sended from/to the API
|
||||||
testCoords := genTestCoordinators(commonCoords)
|
testCoords := genTestCoordinators(commonCoords)
|
||||||
@@ -841,6 +850,25 @@ func doBadReq(method, path string, reqBody io.Reader, expectedResponseCode int)
|
|||||||
return swagger.ValidateResponse(ctx, responseValidationInput)
|
return swagger.ValidateResponse(ctx, responseValidationInput)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func doSimpleReq(method, endpoint string) (string, error) {
|
||||||
|
client := &http.Client{}
|
||||||
|
httpReq, err := http.NewRequest(method, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
resp, err := client.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return "", tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
//nolint
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return string(body), nil
|
||||||
|
}
|
||||||
|
|
||||||
// test helpers
|
// test helpers
|
||||||
|
|
||||||
func getTimestamp(blockNum int64, blocks []common.Block) time.Time {
|
func getTimestamp(blockNum int64, blocks []common.Block) time.Time {
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package apitypes is used to map the common types used across the node with the format expected by the API.
|
||||||
|
|
||||||
|
This is done using different strategies:
|
||||||
|
- Marshallers: they get triggered when the API marshals the response structs into JSONs
|
||||||
|
- Scanners/Valuers: they get triggered when a struct is sent/received to/from the SQL database
|
||||||
|
- Adhoc functions: when the already mentioned strategies are not suitable, functions are added to the structs to facilitate the conversions
|
||||||
|
*/
|
||||||
package apitypes
|
package apitypes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -57,6 +57,7 @@ type Config struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type configAPI struct {
|
type configAPI struct {
|
||||||
|
ChainID uint16 `json:"chainId"`
|
||||||
RollupConstants rollupConstants `json:"hermez"`
|
RollupConstants rollupConstants `json:"hermez"`
|
||||||
AuctionConstants common.AuctionConstants `json:"auction"`
|
AuctionConstants common.AuctionConstants `json:"auction"`
|
||||||
WDelayerConstants common.WDelayerConstants `json:"withdrawalDelayer"`
|
WDelayerConstants common.WDelayerConstants `json:"withdrawalDelayer"`
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/hermeznetwork/hermez-node/metric"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
"github.com/russross/meddler"
|
"github.com/russross/meddler"
|
||||||
@@ -46,7 +47,9 @@ var (
|
|||||||
|
|
||||||
func retSQLErr(err error, c *gin.Context) {
|
func retSQLErr(err error, c *gin.Context) {
|
||||||
log.Warnw("HTTP API SQL request error", "err", err)
|
log.Warnw("HTTP API SQL request error", "err", err)
|
||||||
errMsg := tracerr.Unwrap(err).Error()
|
unwrapErr := tracerr.Unwrap(err)
|
||||||
|
metric.CollectError(unwrapErr)
|
||||||
|
errMsg := unwrapErr.Error()
|
||||||
retDupKey := func(errCode pq.ErrorCode) {
|
retDupKey := func(errCode pq.ErrorCode) {
|
||||||
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
// https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||||
if errCode == "23505" {
|
if errCode == "23505" {
|
||||||
@@ -80,6 +83,7 @@ func retSQLErr(err error, c *gin.Context) {
|
|||||||
|
|
||||||
func retBadReq(err error, c *gin.Context) {
|
func retBadReq(err error, c *gin.Context) {
|
||||||
log.Warnw("HTTP API Bad request error", "err", err)
|
log.Warnw("HTTP API Bad request error", "err", err)
|
||||||
|
metric.CollectError(err)
|
||||||
c.JSON(http.StatusBadRequest, errorMsg{
|
c.JSON(http.StatusBadRequest, errorMsg{
|
||||||
Message: err.Error(),
|
Message: err.Error(),
|
||||||
})
|
})
|
||||||
|
|||||||
21
api/noroute.go
Normal file
21
api/noroute.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a *API) noRoute(c *gin.Context) {
|
||||||
|
matched, _ := regexp.MatchString(`^/v[0-9]+/`, c.Request.URL.Path)
|
||||||
|
if !matched {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{
|
||||||
|
"error": "Version not provided, please provide a valid version in the path such as v1",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{
|
||||||
|
"error": "404 page not found",
|
||||||
|
})
|
||||||
|
}
|
||||||
29
api/noroute_test.go
Normal file
29
api/noroute_test.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNoRouteVersionNotProvided(t *testing.T) {
|
||||||
|
endpoint := apiIP + apiPort + "/"
|
||||||
|
// not using doGoodReq, bcs internally
|
||||||
|
// there is a method FindRoute that checks route and returns error
|
||||||
|
resp, err := doSimpleReq("GET", endpoint)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
"{\"error\":\"Version not provided, please provide a valid version in the path such as v1\"}\n",
|
||||||
|
resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoRoute(t *testing.T) {
|
||||||
|
endpoint := apiURL
|
||||||
|
// not using doGoodReq, bcs internally
|
||||||
|
// there is a method FindRoute that checks route and returns error
|
||||||
|
resp, err := doSimpleReq("GET", endpoint)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
"{\"error\":\"404 page not found\"}\n",
|
||||||
|
resp)
|
||||||
|
}
|
||||||
@@ -1,11 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Package stateapiupdater is responsible for generating and storing the object response of the GET /state endpoint exposed through the api package.
|
||||||
|
This object is extensively defined at the OpenAPI spec located at api/swagger.yml.
|
||||||
|
|
||||||
|
Deployment considerations: in a setup where multiple processes are used (dedicated api process, separated coord / sync, ...), only one process should care
|
||||||
|
of using this package.
|
||||||
|
*/
|
||||||
package stateapiupdater
|
package stateapiupdater
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/db/historydb"
|
"github.com/hermeznetwork/hermez-node/db/historydb"
|
||||||
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,11 +26,45 @@ type Updater struct {
|
|||||||
vars common.SCVariablesPtr
|
vars common.SCVariablesPtr
|
||||||
consts historydb.Constants
|
consts historydb.Constants
|
||||||
rw sync.RWMutex
|
rw sync.RWMutex
|
||||||
|
rfp *RecommendedFeePolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecommendedFeePolicy describes how the recommended fee is calculated
|
||||||
|
type RecommendedFeePolicy struct {
|
||||||
|
PolicyType RecommendedFeePolicyType `validate:"required"`
|
||||||
|
StaticValue float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecommendedFeePolicyType describes the different available recommended fee strategies
|
||||||
|
type RecommendedFeePolicyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RecommendedFeePolicyTypeStatic always give the same StaticValue as recommended fee
|
||||||
|
RecommendedFeePolicyTypeStatic RecommendedFeePolicyType = "Static"
|
||||||
|
// RecommendedFeePolicyTypeAvgLastHour set the recommended fee using the average fee of the last hour
|
||||||
|
RecommendedFeePolicyTypeAvgLastHour RecommendedFeePolicyType = "AvgLastHour"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (rfp *RecommendedFeePolicy) valid() bool {
|
||||||
|
switch rfp.PolicyType {
|
||||||
|
case RecommendedFeePolicyTypeStatic:
|
||||||
|
if rfp.StaticValue == 0 {
|
||||||
|
log.Warn("RcommendedFee is set to 0 USD, and the policy is static")
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case RecommendedFeePolicyTypeAvgLastHour:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpdater creates a new Updater
|
// NewUpdater creates a new Updater
|
||||||
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *common.SCVariables,
|
||||||
consts *historydb.Constants) *Updater {
|
consts *historydb.Constants, rfp *RecommendedFeePolicy) (*Updater, error) {
|
||||||
|
if ok := rfp.valid(); !ok {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("Invalid recommended fee policy: %v", rfp.PolicyType))
|
||||||
|
}
|
||||||
u := Updater{
|
u := Updater{
|
||||||
hdb: hdb,
|
hdb: hdb,
|
||||||
config: *config,
|
config: *config,
|
||||||
@@ -31,9 +74,10 @@ func NewUpdater(hdb *historydb.HistoryDB, config *historydb.NodeConfig, vars *co
|
|||||||
ForgeDelay: config.ForgeDelay,
|
ForgeDelay: config.ForgeDelay,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
rfp: rfp,
|
||||||
}
|
}
|
||||||
u.SetSCVars(vars.AsPtr())
|
u.SetSCVars(vars.AsPtr())
|
||||||
return &u
|
return &u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store the State in the HistoryDB
|
// Store the State in the HistoryDB
|
||||||
@@ -65,13 +109,27 @@ func (u *Updater) SetSCVars(vars *common.SCVariablesPtr) {
|
|||||||
|
|
||||||
// UpdateRecommendedFee update Status.RecommendedFee information
|
// UpdateRecommendedFee update Status.RecommendedFee information
|
||||||
func (u *Updater) UpdateRecommendedFee() error {
|
func (u *Updater) UpdateRecommendedFee() error {
|
||||||
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
|
switch u.rfp.PolicyType {
|
||||||
if err != nil {
|
case RecommendedFeePolicyTypeStatic:
|
||||||
return tracerr.Wrap(err)
|
u.rw.Lock()
|
||||||
|
u.state.RecommendedFee = common.RecommendedFee{
|
||||||
|
ExistingAccount: u.rfp.StaticValue,
|
||||||
|
CreatesAccount: u.rfp.StaticValue,
|
||||||
|
CreatesAccountInternal: u.rfp.StaticValue,
|
||||||
|
}
|
||||||
|
u.rw.Unlock()
|
||||||
|
case RecommendedFeePolicyTypeAvgLastHour:
|
||||||
|
recommendedFee, err := u.hdb.GetRecommendedFee(u.config.MinFeeUSD, u.config.MaxFeeUSD)
|
||||||
|
if err != nil {
|
||||||
|
return tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
u.rw.Lock()
|
||||||
|
u.state.RecommendedFee = *recommendedFee
|
||||||
|
u.rw.Unlock()
|
||||||
|
default:
|
||||||
|
return tracerr.New("Invalid recommende fee policy")
|
||||||
}
|
}
|
||||||
u.rw.Lock()
|
|
||||||
u.state.RecommendedFee = *recommendedFee
|
|
||||||
u.rw.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3040,10 +3040,15 @@ components:
|
|||||||
- maxEmergencyModeTime
|
- maxEmergencyModeTime
|
||||||
- hermezRollup
|
- hermezRollup
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
chainId:
|
||||||
|
type: integer
|
||||||
|
description: Id of the chain
|
||||||
|
example: 27
|
||||||
required:
|
required:
|
||||||
- hermez
|
- hermez
|
||||||
- auction
|
- auction
|
||||||
- withdrawalDelayer
|
- withdrawalDelayer
|
||||||
|
- chainId
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
Error:
|
Error:
|
||||||
type: object
|
type: object
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
|
|||||||
poolTx.TokenID, account.TokenID))
|
poolTx.TokenID, account.TokenID))
|
||||||
}
|
}
|
||||||
// Check signature
|
// Check signature
|
||||||
if !poolTx.VerifySignature(a.chainID, account.BJJ) {
|
if !poolTx.VerifySignature(a.cg.ChainID, account.BJJ) {
|
||||||
return tracerr.Wrap(errors.New("wrong signature"))
|
return tracerr.Wrap(errors.New("wrong signature"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ COMMANDS:
|
|||||||
genbjj Generate a new BabyJubJub key
|
genbjj Generate a new BabyJubJub key
|
||||||
wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state
|
wipesql Wipe the SQL DB (HistoryDB and L2DB) and the StateDBs, leaving the DB in a clean state
|
||||||
run Run the hermez-node in the indicated mode
|
run Run the hermez-node in the indicated mode
|
||||||
|
serveapi Serve the API only
|
||||||
discard Discard blocks up to a specified block number
|
discard Discard blocks up to a specified block number
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
@@ -54,6 +55,10 @@ To read the documentation of each configuration parameter, please check the
|
|||||||
with `Coordinator` are only used in coord mode, and don't need to be defined
|
with `Coordinator` are only used in coord mode, and don't need to be defined
|
||||||
when running the coordinator in sync mode
|
when running the coordinator in sync mode
|
||||||
|
|
||||||
|
When running the API in standalone mode, the required configuration is a subset
|
||||||
|
of the node configuration. Please, check the `type APIServer` at
|
||||||
|
[config/config.go](../../config/config.go) to learn about all the parametes.
|
||||||
|
|
||||||
### Notes
|
### Notes
|
||||||
|
|
||||||
- The private key corresponding to the parameter `Coordinator.ForgerAddress` needs to be imported in the ethereum keystore
|
- The private key corresponding to the parameter `Coordinator.ForgerAddress` needs to be imported in the ethereum keystore
|
||||||
@@ -68,6 +73,9 @@ when running the coordinator in sync mode
|
|||||||
monitor the size of the folder to avoid running out of space.
|
monitor the size of the folder to avoid running out of space.
|
||||||
- The node requires a PostgreSQL database. The parameters of the server and
|
- The node requires a PostgreSQL database. The parameters of the server and
|
||||||
database must be set in the `PostgreSQL` section.
|
database must be set in the `PostgreSQL` section.
|
||||||
|
- The node requires a web3 RPC server to work. The node has only been tested
|
||||||
|
with geth and may not work correctly with other ethereum nodes
|
||||||
|
implementations.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
@@ -107,6 +115,14 @@ Run the node in mode coordinator:
|
|||||||
./node run --mode coord --cfg cfg.buidler.toml
|
./node run --mode coord --cfg cfg.buidler.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Serve the API in standalone mode. This command allows serving the API just
|
||||||
|
with access to the PostgreSQL database that a node is using. Several instances
|
||||||
|
of `serveapi` can be running at the same time with a single PostgreSQL
|
||||||
|
database:
|
||||||
|
```shell
|
||||||
|
./node serveapi --mode coord --cfg cfg.buidler.toml
|
||||||
|
```
|
||||||
|
|
||||||
Import an ethereum private key into the keystore:
|
Import an ethereum private key into the keystore:
|
||||||
```shell
|
```shell
|
||||||
./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
./node importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x618b35096c477aab18b11a752be619f0023a539bb02dd6c813477a6211916cde
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ Symbol = "SUSHI"
|
|||||||
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
|
Addr = "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2"
|
||||||
|
|
||||||
[Debug]
|
[Debug]
|
||||||
APIAddress = "localhost:12345"
|
APIAddress = "0.0.0.0:12345"
|
||||||
MeddlerLogs = true
|
MeddlerLogs = true
|
||||||
GinDebugMode = true
|
GinDebugMode = true
|
||||||
|
|
||||||
@@ -145,3 +145,11 @@ Coordinator = true
|
|||||||
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
BatchPath = "/tmp/iden3-test/hermez/batchesdebug"
|
||||||
LightScrypt = true
|
LightScrypt = true
|
||||||
# RollupVerifierIndex = 0
|
# RollupVerifierIndex = 0
|
||||||
|
|
||||||
|
[RecommendedFeePolicy]
|
||||||
|
# Strategy used to calculate the recommended fee that the API will expose.
|
||||||
|
# Available options:
|
||||||
|
# - Static: always return the same value (StaticValue) in USD
|
||||||
|
# - AvgLastHour: calculate using the average fee of the forged transactions during the last hour
|
||||||
|
PolicyType = "Static"
|
||||||
|
StaticValue = 0.99
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
# Non-Boot Coordinator
|
# Non-Boot Coordinator
|
||||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3
|
go run . importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x30f5fddb34cd4166adb2c6003fa6b18f380fd2341376be42cf1c7937004ac7a3
|
||||||
|
|
||||||
# Boot Coordinator
|
# Boot Coordinator
|
||||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563
|
go run . importkey --mode coord --cfg cfg.buidler.toml --privatekey 0xa8a54b2d8197bc0b19bb8a084031be71835580a01e70a45a13babd16c9bc1563
|
||||||
|
|
||||||
# FeeAccount
|
# FeeAccount
|
||||||
go run . --mode coord --cfg cfg.buidler.toml importkey --privatekey 0x3a9270c020e169097808da4b02e8d9100be0f8a38cfad3dcfc0b398076381fdd
|
go run . importkey --mode coord --cfg cfg.buidler.toml --privatekey 0x3a9270c020e169097808da4b02e8d9100be0f8a38cfad3dcfc0b398076381fdd
|
||||||
|
|||||||
@@ -35,18 +35,18 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Version represents the program based on the git tag
|
// version represents the program based on the git tag
|
||||||
Version = "v0.1.0"
|
version = "v0.1.0"
|
||||||
// Build represents the program based on the git commit
|
// commit represents the program based on the git commit
|
||||||
Build = "dev"
|
commit = "dev"
|
||||||
// Date represents the date of application was built
|
// date represents the date of application was built
|
||||||
Date = ""
|
date = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
func cmdVersion(c *cli.Context) error {
|
func cmdVersion(c *cli.Context) error {
|
||||||
fmt.Printf("Version = \"%v\"\n", Version)
|
fmt.Printf("Version = \"%v\"\n", version)
|
||||||
fmt.Printf("Build = \"%v\"\n", Build)
|
fmt.Printf("Build = \"%v\"\n", commit)
|
||||||
fmt.Printf("Date = \"%v\"\n", Date)
|
fmt.Printf("Date = \"%v\"\n", date)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -421,7 +421,7 @@ func getConfigAPIServer(c *cli.Context) (*ConfigAPIServer, error) {
|
|||||||
func main() {
|
func main() {
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = "hermez-node"
|
app.Name = "hermez-node"
|
||||||
app.Version = Version
|
app.Version = version
|
||||||
flags := []cli.Flag{
|
flags := []cli.Flag{
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: flagMode,
|
Name: flagMode,
|
||||||
@@ -485,6 +485,7 @@ func main() {
|
|||||||
Aliases: []string{},
|
Aliases: []string{},
|
||||||
Usage: "Serve the API only",
|
Usage: "Serve the API only",
|
||||||
Action: cmdServeAPI,
|
Action: cmdServeAPI,
|
||||||
|
Flags: flags,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "discard",
|
Name: "discard",
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ type L1Tx struct {
|
|||||||
// - L1UserTx: 0
|
// - L1UserTx: 0
|
||||||
// - L1CoordinatorTx: 1
|
// - L1CoordinatorTx: 1
|
||||||
TxID TxID `meddler:"id"`
|
TxID TxID `meddler:"id"`
|
||||||
// ToForgeL1TxsNum indicates in which the tx was forged / will be forged
|
// ToForgeL1TxsNum indicates in which L1UserTx queue the tx was forged / will be forged
|
||||||
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
|
ToForgeL1TxsNum *int64 `meddler:"to_forge_l1_txs_num"`
|
||||||
Position int `meddler:"position"`
|
Position int `meddler:"position"`
|
||||||
// UserOrigin is set to true if the tx was originated by a user, false if it was
|
// UserOrigin is set to true if the tx was originated by a user, false if it was
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/hermeznetwork/hermez-node/api/stateapiupdater"
|
||||||
"github.com/hermeznetwork/hermez-node/common"
|
"github.com/hermeznetwork/hermez-node/common"
|
||||||
"github.com/hermeznetwork/hermez-node/priceupdater"
|
"github.com/hermeznetwork/hermez-node/priceupdater"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
@@ -299,7 +300,8 @@ type Node struct {
|
|||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
PostgreSQL PostgreSQL `validate:"required"`
|
PostgreSQL PostgreSQL `validate:"required"`
|
||||||
Web3 struct {
|
Web3 struct {
|
||||||
// URL is the URL of the web3 ethereum-node RPC server
|
// URL is the URL of the web3 ethereum-node RPC server. Only
|
||||||
|
// geth is officially supported.
|
||||||
URL string `validate:"required"`
|
URL string `validate:"required"`
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
Synchronizer struct {
|
Synchronizer struct {
|
||||||
@@ -346,8 +348,9 @@ type Node struct {
|
|||||||
// can wait to stablish a SQL connection
|
// can wait to stablish a SQL connection
|
||||||
SQLConnectionTimeout Duration
|
SQLConnectionTimeout Duration
|
||||||
} `validate:"required"`
|
} `validate:"required"`
|
||||||
Debug NodeDebug `validate:"required"`
|
RecommendedFeePolicy stateapiupdater.RecommendedFeePolicy `validate:"required"`
|
||||||
Coordinator Coordinator `validate:"-"`
|
Debug NodeDebug `validate:"required"`
|
||||||
|
Coordinator Coordinator `validate:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIServer is the api server configuration parameters
|
// APIServer is the api server configuration parameters
|
||||||
|
|||||||
@@ -80,6 +80,7 @@ type BatchInfo struct {
|
|||||||
PipelineNum int
|
PipelineNum int
|
||||||
BatchNum common.BatchNum
|
BatchNum common.BatchNum
|
||||||
ServerProof prover.Client
|
ServerProof prover.Client
|
||||||
|
ProofStart time.Time
|
||||||
ZKInputs *common.ZKInputs
|
ZKInputs *common.ZKInputs
|
||||||
Proof *prover.Proof
|
Proof *prover.Proof
|
||||||
PublicInputs []*big.Int
|
PublicInputs []*big.Int
|
||||||
|
|||||||
@@ -1,3 +1,43 @@
|
|||||||
|
/*
|
||||||
|
Package coordinator handles all the logic related to forging batches as a
|
||||||
|
coordinator in the hermez network.
|
||||||
|
|
||||||
|
The forging of batches is done with a pipeline in order to allow multiple
|
||||||
|
batches being forged in parallel. The maximum number of batches that can be
|
||||||
|
forged in parallel is determined by the number of available proof servers.
|
||||||
|
|
||||||
|
The Coordinator begins with the pipeline stopped. The main Coordinator
|
||||||
|
goroutine keeps listening for synchronizer events sent by the node package,
|
||||||
|
which allow the coordinator to determine if the configured forger address is
|
||||||
|
allowed to forge at the current block or not. When the forger address becomes
|
||||||
|
allowed to forge, the pipeline is started, and when it terminates being allowed
|
||||||
|
to forge, the pipeline is stopped.
|
||||||
|
|
||||||
|
The Pipeline consists of two goroutines. The first one is in charge of
|
||||||
|
preparing a batch internally, which involves making a selection of transactions
|
||||||
|
and calculating the ZKInputs for the batch proof, and sending these ZKInputs to
|
||||||
|
an idle proof server. This goroutine will keep preparing batches while there
|
||||||
|
are idle proof servers, if the forging policy determines that a batch should be
|
||||||
|
forged in the current state. The second goroutine is in charge of waiting for
|
||||||
|
the proof server to finish computing the proof, retreiving it, prepare the
|
||||||
|
arguments for the `forgeBatch` Rollup transaction, and sending the result to
|
||||||
|
the TxManager. All the batch information moves between functions and
|
||||||
|
goroutines via the BatchInfo struct.
|
||||||
|
|
||||||
|
Finally, the TxManager contains a single goroutine that makes forgeBatch
|
||||||
|
ethereum transactions for the batches sent by the Pipeline, and keeps them in a
|
||||||
|
list to check them periodically. In the periodic checks, the ethereum
|
||||||
|
transaction is checked for successfulness, and it's only forgotten after a
|
||||||
|
number of confirmation blocks have passed after being successfully mined. At
|
||||||
|
any point if a transaction failure is detected, the TxManager can signal the
|
||||||
|
Coordinator to reset the Pipeline in order to reforge the failed batches.
|
||||||
|
|
||||||
|
The Coordinator goroutine acts as a manager. The synchronizer events (which
|
||||||
|
notify about new blocks and associated new state) that it receives are
|
||||||
|
broadcasted to the Pipeline and the TxManager. This allows the Coordinator,
|
||||||
|
Pipeline and TxManager to have a copy of the current hermez network state
|
||||||
|
required to perform their duties.
|
||||||
|
*/
|
||||||
package coordinator
|
package coordinator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -14,6 +15,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/eth"
|
"github.com/hermeznetwork/hermez-node/eth"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/hermeznetwork/hermez-node/metric"
|
||||||
"github.com/hermeznetwork/hermez-node/prover"
|
"github.com/hermeznetwork/hermez-node/prover"
|
||||||
"github.com/hermeznetwork/hermez-node/synchronizer"
|
"github.com/hermeznetwork/hermez-node/synchronizer"
|
||||||
"github.com/hermeznetwork/hermez-node/txselector"
|
"github.com/hermeznetwork/hermez-node/txselector"
|
||||||
@@ -246,6 +248,7 @@ func (p *Pipeline) handleForgeBatch(ctx context.Context,
|
|||||||
|
|
||||||
// 3. Send the ZKInputs to the proof server
|
// 3. Send the ZKInputs to the proof server
|
||||||
batchInfo.ServerProof = serverProof
|
batchInfo.ServerProof = serverProof
|
||||||
|
batchInfo.ProofStart = time.Now()
|
||||||
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
|
if err := p.sendServerProof(ctx, batchInfo); ctx.Err() != nil {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -520,15 +523,30 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
// l1UserFutureTxs are the l1UserTxs that are not being forged
|
||||||
|
// in the next batch, but that are also in the queue for the
|
||||||
|
// future batches
|
||||||
|
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum + 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
coordIdxs, auths, l1UserTxs, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||||
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs)
|
p.txSelector.GetL1L2TxSelection(p.cfg.TxProcessorConfig, _l1UserTxs, l1UserFutureTxs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// get l1UserFutureTxs which are all the l1 pending in all the
|
||||||
|
// queues
|
||||||
|
l1UserFutureTxs, err := p.historyDB.GetUnforgedL1UserFutureTxs(p.state.lastForgeL1TxsNum) //nolint:gomnd
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
// 2b: only L2 txs
|
// 2b: only L2 txs
|
||||||
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
coordIdxs, auths, l1CoordTxs, poolL2Txs, discardedL2Txs, err =
|
||||||
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig)
|
p.txSelector.GetL2TxSelection(p.cfg.TxProcessorConfig, l1UserFutureTxs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -602,6 +620,9 @@ func (p *Pipeline) forgeBatch(batchNum common.BatchNum) (batchInfo *BatchInfo,
|
|||||||
|
|
||||||
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
// waitServerProof gets the generated zkProof & sends it to the SmartContract
|
||||||
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
|
func (p *Pipeline) waitServerProof(ctx context.Context, batchInfo *BatchInfo) error {
|
||||||
|
defer metric.MeasureDuration(metric.WaitServerProof, batchInfo.ProofStart,
|
||||||
|
batchInfo.BatchNum.BigInt().String(), strconv.Itoa(batchInfo.PipelineNum))
|
||||||
|
|
||||||
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
|
proof, pubInputs, err := batchInfo.ServerProof.GetProof(ctx) // blocking call,
|
||||||
// until not resolved don't continue. Returns when the proof server has calculated the proof
|
// until not resolved don't continue. Returns when the proof server has calculated the proof
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,3 +1,24 @@
|
|||||||
|
/*
|
||||||
|
Package historydb is responsible for storing and retrieving the historic data of the Hermez network.
|
||||||
|
It's mostly but not exclusively used by the API and the synchronizer.
|
||||||
|
|
||||||
|
Apart from the logic defined in this package, it's important to notice that there are some triggers defined in the
|
||||||
|
migration files that have to be taken into consideration to understanding the results of some queries. This is especially true
|
||||||
|
for reorgs: all the data is directly or indirectly related to a block, this makes handling reorgs as easy as deleting the
|
||||||
|
reorged blocks from the block table, and all related items will be dropped in cascade. This is not the only case, in general
|
||||||
|
functions defined in this package that get affected somehow by the SQL level defined logic has a special mention on the function description.
|
||||||
|
|
||||||
|
Some of the database tooling used in this package such as meddler and migration tools is explained in the db package.
|
||||||
|
|
||||||
|
This package is spitted in different files following these ideas:
|
||||||
|
- historydb.go: constructor and functions used by packages other than the api.
|
||||||
|
- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore
|
||||||
|
to restrict the maximum concurrent connections to the database.
|
||||||
|
- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however
|
||||||
|
most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables.
|
||||||
|
In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats.
|
||||||
|
- nodeinfo.go: used to handle the interfaces and structs that allow communication across running in different machines/process but sharing the same database.
|
||||||
|
*/
|
||||||
package historydb
|
package historydb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -751,6 +772,24 @@ func (hdb *HistoryDB) GetUnforgedL1UserTxs(toForgeL1TxsNum int64) ([]common.L1Tx
|
|||||||
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
|
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetUnforgedL1UserFutureTxs gets L1 User Txs to be forged after the L1Batch
|
||||||
|
// with toForgeL1TxsNum (in one of the future batches, not in the next one).
|
||||||
|
func (hdb *HistoryDB) GetUnforgedL1UserFutureTxs(toForgeL1TxsNum int64) ([]common.L1Tx, error) {
|
||||||
|
var txs []*common.L1Tx
|
||||||
|
err := meddler.QueryAll(
|
||||||
|
hdb.dbRead, &txs, // only L1 user txs can have batch_num set to null
|
||||||
|
`SELECT tx.id, tx.to_forge_l1_txs_num, tx.position, tx.user_origin,
|
||||||
|
tx.from_idx, tx.from_eth_addr, tx.from_bjj, tx.to_idx, tx.token_id,
|
||||||
|
tx.amount, NULL AS effective_amount,
|
||||||
|
tx.deposit_amount, NULL AS effective_deposit_amount,
|
||||||
|
tx.eth_block_num, tx.type, tx.batch_num
|
||||||
|
FROM tx WHERE batch_num IS NULL AND to_forge_l1_txs_num > $1
|
||||||
|
ORDER BY position;`,
|
||||||
|
toForgeL1TxsNum,
|
||||||
|
)
|
||||||
|
return db.SlicePtrsToSlice(txs).([]common.L1Tx), tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
|
// GetUnforgedL1UserTxsCount returns the count of unforged L1Txs (either in
|
||||||
// open or frozen queues that are not yet forged)
|
// open or frozen queues that are not yet forged)
|
||||||
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {
|
func (hdb *HistoryDB) GetUnforgedL1UserTxsCount() (int, error) {
|
||||||
|
|||||||
@@ -699,34 +699,55 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
|
|||||||
CreateAccountDeposit(1) B: 5
|
CreateAccountDeposit(1) B: 5
|
||||||
CreateAccountDeposit(1) C: 5
|
CreateAccountDeposit(1) C: 5
|
||||||
CreateAccountDeposit(1) D: 5
|
CreateAccountDeposit(1) D: 5
|
||||||
|
|
||||||
> block
|
> block
|
||||||
|
|
||||||
|
> batchL1
|
||||||
|
> block
|
||||||
|
|
||||||
|
CreateAccountDeposit(1) E: 5
|
||||||
|
CreateAccountDeposit(1) F: 5
|
||||||
|
> block
|
||||||
|
|
||||||
`
|
`
|
||||||
tc := til.NewContext(uint16(0), 128)
|
tc := til.NewContext(uint16(0), 128)
|
||||||
blocks, err := tc.GenerateBlocks(set)
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Sanity check
|
// Sanity check
|
||||||
require.Equal(t, 1, len(blocks))
|
require.Equal(t, 3, len(blocks))
|
||||||
require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs))
|
require.Equal(t, 5, len(blocks[0].Rollup.L1UserTxs))
|
||||||
|
|
||||||
toForgeL1TxsNum := int64(1)
|
|
||||||
|
|
||||||
for i := range blocks {
|
for i := range blocks {
|
||||||
err = historyDB.AddBlockSCData(&blocks[i])
|
err = historyDB.AddBlockSCData(&blocks[i])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
l1UserTxs, err := historyDB.GetUnforgedL1UserTxs(toForgeL1TxsNum)
|
l1UserTxs, err := historyDB.GetUnforgedL1UserFutureTxs(0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 7, len(l1UserTxs))
|
||||||
|
|
||||||
|
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 5, len(l1UserTxs))
|
assert.Equal(t, 5, len(l1UserTxs))
|
||||||
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
|
assert.Equal(t, blocks[0].Rollup.L1UserTxs, l1UserTxs)
|
||||||
|
|
||||||
|
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 2, len(l1UserTxs))
|
||||||
|
|
||||||
count, err := historyDB.GetUnforgedL1UserTxsCount()
|
count, err := historyDB.GetUnforgedL1UserTxsCount()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 5, count)
|
assert.Equal(t, 7, count)
|
||||||
|
|
||||||
|
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 2, len(l1UserTxs))
|
||||||
|
|
||||||
|
l1UserTxs, err = historyDB.GetUnforgedL1UserFutureTxs(2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(l1UserTxs))
|
||||||
|
|
||||||
// No l1UserTxs for this toForgeL1TxsNum
|
// No l1UserTxs for this toForgeL1TxsNum
|
||||||
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(2)
|
l1UserTxs, err = historyDB.GetUnforgedL1UserTxs(3)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(l1UserTxs))
|
assert.Equal(t, 0, len(l1UserTxs))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Package l2db is responsible for storing and retrieving the data received by the coordinator through the api.
|
||||||
|
Note that this data will be different for each coordinator in the network, as this represents the L2 information.
|
||||||
|
|
||||||
|
The data managed by this package is fundamentally PoolL2Tx and AccountCreationAuth. All this data come from
|
||||||
|
the API sent by clients and is used by the txselector to decide which transactions are selected to forge a batch.
|
||||||
|
|
||||||
|
Some of the database tooling used in this package such as meddler and migration tools is explained in the db package.
|
||||||
|
|
||||||
|
This package is spitted in different files following these ideas:
|
||||||
|
- l2db.go: constructor and functions used by packages other than the api.
|
||||||
|
- apiqueries.go: functions used by the API, the queries implemented in this functions use a semaphore
|
||||||
|
to restrict the maximum concurrent connections to the database.
|
||||||
|
- views.go: structs used to retrieve/store data from/to the database. When possible, the common structs are used, however
|
||||||
|
most of the time there is no 1:1 relation between the struct fields and the tables of the schema, especially when joining tables.
|
||||||
|
In some cases, some of the structs defined in this file also include custom Marshallers to easily match the expected api formats.
|
||||||
|
*/
|
||||||
package l2db
|
package l2db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
/*
|
||||||
|
Package db have some common utilities shared by db/l2db and db/historydb, the most relevant ones are:
|
||||||
|
- SQL connection utilities
|
||||||
|
- Managing the SQL schema: this is done using migration files placed under db/migrations. The files are executed by
|
||||||
|
order of the file name.
|
||||||
|
- Custom meddlers: used to easily transform struct <==> table
|
||||||
|
*/
|
||||||
package db
|
package db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ type AuctionInterface interface {
|
|||||||
|
|
||||||
AuctionConstants() (*common.AuctionConstants, error)
|
AuctionConstants() (*common.AuctionConstants, error)
|
||||||
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error)
|
AuctionEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*AuctionEvents, error)
|
||||||
AuctionEventInit() (*AuctionEventInitialize, int64, error)
|
AuctionEventInit(genesisBlockNum int64) (*AuctionEventInitialize, int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@@ -809,12 +809,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// AuctionEventInit returns the initialize event with its corresponding block number
|
// AuctionEventInit returns the initialize event with its corresponding block number
|
||||||
func (c *AuctionClient) AuctionEventInit() (*AuctionEventInitialize, int64, error) {
|
func (c *AuctionClient) AuctionEventInit(genesisBlockNum int64) (*AuctionEventInitialize, int64, error) {
|
||||||
query := ethereum.FilterQuery{
|
query := ethereum.FilterQuery{
|
||||||
Addresses: []ethCommon.Address{
|
Addresses: []ethCommon.Address{
|
||||||
c.address,
|
c.address,
|
||||||
},
|
},
|
||||||
Topics: [][]ethCommon.Hash{{logAuctionInitialize}},
|
FromBlock: big.NewInt(max(0, genesisBlockNum-blocksPerDay)),
|
||||||
|
ToBlock: big.NewInt(genesisBlockNum),
|
||||||
|
Topics: [][]ethCommon.Hash{{logAuctionInitialize}},
|
||||||
}
|
}
|
||||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ func TestAuctionGetCurrentSlotNumber(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAuctionEventInit(t *testing.T) {
|
func TestAuctionEventInit(t *testing.T) {
|
||||||
auctionInit, blockNum, err := auctionClientTest.AuctionEventInit()
|
auctionInit, blockNum, err := auctionClientTest.AuctionEventInit(genesisBlock)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, int64(18), blockNum)
|
assert.Equal(t, int64(18), blockNum)
|
||||||
assert.Equal(t, donationAddressConst, auctionInit.DonationAddress)
|
assert.Equal(t, donationAddressConst, auctionInit.DonationAddress)
|
||||||
|
|||||||
@@ -12,6 +12,17 @@ import (
|
|||||||
|
|
||||||
var errTODO = fmt.Errorf("TODO: Not implemented yet")
|
var errTODO = fmt.Errorf("TODO: Not implemented yet")
|
||||||
|
|
||||||
|
const (
|
||||||
|
blocksPerDay = (3600 * 24) / 15
|
||||||
|
)
|
||||||
|
|
||||||
|
func max(x, y int64) int64 {
|
||||||
|
if x > y {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return y
|
||||||
|
}
|
||||||
|
|
||||||
// ClientInterface is the eth Client interface used by hermez-node modules to
|
// ClientInterface is the eth Client interface used by hermez-node modules to
|
||||||
// interact with Ethereum Blockchain and smart contracts.
|
// interact with Ethereum Blockchain and smart contracts.
|
||||||
type ClientInterface interface {
|
type ClientInterface interface {
|
||||||
|
|||||||
@@ -245,15 +245,15 @@ func (c *EthereumClient) EthBlockByNumber(ctx context.Context, number int64) (*c
|
|||||||
if number == -1 {
|
if number == -1 {
|
||||||
blockNum = nil
|
blockNum = nil
|
||||||
}
|
}
|
||||||
block, err := c.client.BlockByNumber(ctx, blockNum)
|
header, err := c.client.HeaderByNumber(ctx, blockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
b := &common.Block{
|
b := &common.Block{
|
||||||
Num: block.Number().Int64(),
|
Num: header.Number.Int64(),
|
||||||
Timestamp: time.Unix(int64(block.Time()), 0),
|
Timestamp: time.Unix(int64(header.Time), 0),
|
||||||
ParentHash: block.ParentHash(),
|
ParentHash: header.ParentHash,
|
||||||
Hash: block.Hash(),
|
Hash: header.Hash(),
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -273,7 +273,7 @@ type RollupInterface interface {
|
|||||||
RollupConstants() (*common.RollupConstants, error)
|
RollupConstants() (*common.RollupConstants, error)
|
||||||
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error)
|
RollupEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*RollupEvents, error)
|
||||||
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
|
RollupForgeBatchArgs(ethCommon.Hash, uint16) (*RollupForgeBatchArgs, *ethCommon.Address, error)
|
||||||
RollupEventInit() (*RollupEventInitialize, int64, error)
|
RollupEventInit(genesisBlockNum int64) (*RollupEventInitialize, int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@@ -749,12 +749,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RollupEventInit returns the initialize event with its corresponding block number
|
// RollupEventInit returns the initialize event with its corresponding block number
|
||||||
func (c *RollupClient) RollupEventInit() (*RollupEventInitialize, int64, error) {
|
func (c *RollupClient) RollupEventInit(genesisBlockNum int64) (*RollupEventInitialize, int64, error) {
|
||||||
query := ethereum.FilterQuery{
|
query := ethereum.FilterQuery{
|
||||||
Addresses: []ethCommon.Address{
|
Addresses: []ethCommon.Address{
|
||||||
c.address,
|
c.address,
|
||||||
},
|
},
|
||||||
Topics: [][]ethCommon.Hash{{logHermezInitialize}},
|
FromBlock: big.NewInt(max(0, genesisBlockNum-blocksPerDay)),
|
||||||
|
ToBlock: big.NewInt(genesisBlockNum),
|
||||||
|
Topics: [][]ethCommon.Hash{{logHermezInitialize}},
|
||||||
}
|
}
|
||||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func genKeysBjj(i int64) *keys {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRollupEventInit(t *testing.T) {
|
func TestRollupEventInit(t *testing.T) {
|
||||||
rollupInit, blockNum, err := rollupClient.RollupEventInit()
|
rollupInit, blockNum, err := rollupClient.RollupEventInit(genesisBlock)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, int64(19), blockNum)
|
assert.Equal(t, int64(19), blockNum)
|
||||||
assert.Equal(t, uint8(10), rollupInit.ForgeL1L2BatchTimeout)
|
assert.Equal(t, uint8(10), rollupInit.ForgeL1L2BatchTimeout)
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ type WDelayerInterface interface {
|
|||||||
|
|
||||||
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
|
WDelayerEventsByBlock(blockNum int64, blockHash *ethCommon.Hash) (*WDelayerEvents, error)
|
||||||
WDelayerConstants() (*common.WDelayerConstants, error)
|
WDelayerConstants() (*common.WDelayerConstants, error)
|
||||||
WDelayerEventInit() (*WDelayerEventInitialize, int64, error)
|
WDelayerEventInit(genesisBlockNum int64) (*WDelayerEventInitialize, int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@@ -415,12 +415,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// WDelayerEventInit returns the initialize event with its corresponding block number
|
// WDelayerEventInit returns the initialize event with its corresponding block number
|
||||||
func (c *WDelayerClient) WDelayerEventInit() (*WDelayerEventInitialize, int64, error) {
|
func (c *WDelayerClient) WDelayerEventInit(genesisBlockNum int64) (*WDelayerEventInitialize, int64, error) {
|
||||||
query := ethereum.FilterQuery{
|
query := ethereum.FilterQuery{
|
||||||
Addresses: []ethCommon.Address{
|
Addresses: []ethCommon.Address{
|
||||||
c.address,
|
c.address,
|
||||||
},
|
},
|
||||||
Topics: [][]ethCommon.Hash{{logWDelayerInitialize}},
|
FromBlock: big.NewInt(max(0, genesisBlockNum-blocksPerDay)),
|
||||||
|
ToBlock: big.NewInt(genesisBlockNum),
|
||||||
|
Topics: [][]ethCommon.Hash{{logWDelayerInitialize}},
|
||||||
}
|
}
|
||||||
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
logs, err := c.client.client.FilterLogs(context.Background(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ var maxEmergencyModeTime = time.Hour * 24 * 7 * 26
|
|||||||
var maxWithdrawalDelay = time.Hour * 24 * 7 * 2
|
var maxWithdrawalDelay = time.Hour * 24 * 7 * 2
|
||||||
|
|
||||||
func TestWDelayerInit(t *testing.T) {
|
func TestWDelayerInit(t *testing.T) {
|
||||||
wDelayerInit, blockNum, err := wdelayerClientTest.WDelayerEventInit()
|
wDelayerInit, blockNum, err := wdelayerClientTest.WDelayerEventInit(genesisBlock)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, int64(16), blockNum)
|
assert.Equal(t, int64(16), blockNum)
|
||||||
assert.Equal(t, uint64(initWithdrawalDelay), wDelayerInit.InitialWithdrawalDelay)
|
assert.Equal(t, uint64(initWithdrawalDelay), wDelayerInit.InitialWithdrawalDelay)
|
||||||
|
|||||||
192
metric/metric.go
Normal file
192
metric/metric.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
package metric
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Metric represents the metric type
|
||||||
|
Metric string
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
namespaceError = "error"
|
||||||
|
namespaceSync = "synchronizer"
|
||||||
|
namespaceTxSelector = "txselector"
|
||||||
|
namespaceAPI = "api"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Errors errors count metric.
|
||||||
|
Errors = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespaceError,
|
||||||
|
Name: "errors",
|
||||||
|
Help: "",
|
||||||
|
}, []string{"error"})
|
||||||
|
|
||||||
|
// WaitServerProof duration time to get the calculated
|
||||||
|
// proof from the server.
|
||||||
|
WaitServerProof = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: namespaceSync,
|
||||||
|
Name: "wait_server_proof",
|
||||||
|
Help: "",
|
||||||
|
}, []string{"batch_number", "pipeline_number"})
|
||||||
|
|
||||||
|
// Reorgs block reorg count
|
||||||
|
Reorgs = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespaceSync,
|
||||||
|
Name: "reorgs",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// LastBlockNum last block synced
|
||||||
|
LastBlockNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceSync,
|
||||||
|
Name: "synced_last_block_num",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// EthLastBlockNum last eth block synced
|
||||||
|
EthLastBlockNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceSync,
|
||||||
|
Name: "eth_last_block_num",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// LastBatchNum last batch synced
|
||||||
|
LastBatchNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceSync,
|
||||||
|
Name: "synced_last_batch_num",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// EthLastBatchNum last eth batch synced
|
||||||
|
EthLastBatchNum = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceSync,
|
||||||
|
Name: "eth_last_batch_num",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetL2TxSelection L2 tx selection count
|
||||||
|
GetL2TxSelection = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespaceTxSelector,
|
||||||
|
Name: "get_l2_txselection_total",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetL1L2TxSelection L1L2 tx selection count
|
||||||
|
GetL1L2TxSelection = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespaceTxSelector,
|
||||||
|
Name: "get_l1_l2_txselection_total",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// SelectedL1CoordinatorTxs selected L1 coordinator tx count
|
||||||
|
SelectedL1CoordinatorTxs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceTxSelector,
|
||||||
|
Name: "selected_l1_coordinator_txs",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// SelectedL1UserTxs selected L1 user tx count
|
||||||
|
SelectedL1UserTxs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceTxSelector,
|
||||||
|
Name: "selected_l1_user_txs",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// SelectedL2Txs selected L2 tx count
|
||||||
|
SelectedL2Txs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceTxSelector,
|
||||||
|
Name: "selected_l2_txs",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
// DiscardedL2Txs discarded L2 tx count
|
||||||
|
DiscardedL2Txs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespaceTxSelector,
|
||||||
|
Name: "discarded_l2_txs",
|
||||||
|
Help: "",
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if err := registerCollectors(); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func registerCollectors() error {
|
||||||
|
if err := registerCollector(Errors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(WaitServerProof); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(Reorgs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(LastBlockNum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(LastBatchNum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(EthLastBlockNum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(EthLastBatchNum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(GetL2TxSelection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(GetL1L2TxSelection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(SelectedL1CoordinatorTxs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := registerCollector(SelectedL1UserTxs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return registerCollector(DiscardedL2Txs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerCollector(collector prometheus.Collector) error {
|
||||||
|
err := prometheus.Register(collector)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeasureDuration measure the method execution duration
|
||||||
|
// and save it into a histogram metric
|
||||||
|
func MeasureDuration(histogram *prometheus.HistogramVec, start time.Time, lvs ...string) {
|
||||||
|
duration := time.Since(start)
|
||||||
|
histogram.WithLabelValues(lvs...).Observe(float64(duration.Milliseconds()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectError collect the error message and increment
|
||||||
|
// the error count
|
||||||
|
func CollectError(err error) {
|
||||||
|
Errors.With(map[string]string{"error": err.Error()}).Inc()
|
||||||
|
}
|
||||||
78
metric/request.go
Normal file
78
metric/request.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package metric
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
favicon = "/favicon.ico"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prometheus contains the metrics gathered by the instance and its path
|
||||||
|
type Prometheus struct {
|
||||||
|
reqCnt *prometheus.CounterVec
|
||||||
|
reqDur *prometheus.HistogramVec
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPrometheus generates a new set of metrics with a certain subsystem name
|
||||||
|
func NewPrometheus() (*Prometheus, error) {
|
||||||
|
reqCnt := prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespaceAPI,
|
||||||
|
Name: "requests_total",
|
||||||
|
Help: "How many HTTP requests processed, partitioned by status code and HTTP method",
|
||||||
|
},
|
||||||
|
[]string{"code", "method", "path"},
|
||||||
|
)
|
||||||
|
if err := registerCollector(reqCnt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
reqDur := prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: namespaceAPI,
|
||||||
|
Name: "request_duration_seconds",
|
||||||
|
Help: "The HTTP request latencies in seconds",
|
||||||
|
},
|
||||||
|
[]string{"code", "method", "path"},
|
||||||
|
)
|
||||||
|
if err := registerCollector(reqDur); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Prometheus{
|
||||||
|
reqCnt: reqCnt,
|
||||||
|
reqDur: reqDur,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrometheusMiddleware creates the prometheus collector and
|
||||||
|
// defines status handler function for the middleware
|
||||||
|
func PrometheusMiddleware() (gin.HandlerFunc, error) {
|
||||||
|
p, err := NewPrometheus()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.Middleware(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middleware defines status handler function for middleware
|
||||||
|
func (p *Prometheus) Middleware() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
if c.Request.URL.Path == favicon {
|
||||||
|
c.Next()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
c.Next()
|
||||||
|
|
||||||
|
status := strconv.Itoa(c.Writer.Status())
|
||||||
|
elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||||
|
fullPath := c.FullPath()
|
||||||
|
|
||||||
|
p.reqDur.WithLabelValues(status, c.Request.Method, fullPath).Observe(elapsed)
|
||||||
|
p.reqCnt.WithLabelValues(status, c.Request.Method, fullPath).Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
33
node/node.go
33
node/node.go
@@ -1,3 +1,18 @@
|
|||||||
|
/*
|
||||||
|
Package node does the initialization of all the required objects to either run
|
||||||
|
as a synchronizer or as a coordinator.
|
||||||
|
|
||||||
|
The Node contains several goroutines that run in the background or that
|
||||||
|
periodically perform tasks. One of this goroutines periodically calls the
|
||||||
|
`Synchronizer.Sync` function, allowing the synchronization of one block at a
|
||||||
|
time. After every call to `Synchronizer.Sync`, the Node sends a message to the
|
||||||
|
Coordinator to notify it about the new synced block (and associated state) or
|
||||||
|
reorg (and resetted state) in case one happens.
|
||||||
|
|
||||||
|
Other goroutines perform tasks such as: updating the token prices, update
|
||||||
|
metrics stored in the historyDB, update recommended fee stored in the
|
||||||
|
historyDB, run the http API server, run the debug http API server, etc.
|
||||||
|
*/
|
||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -273,7 +288,16 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stateAPIUpdater := stateapiupdater.NewUpdater(historyDB, &hdbNodeCfg, initSCVars, &hdbConsts)
|
stateAPIUpdater, err := stateapiupdater.NewUpdater(
|
||||||
|
historyDB,
|
||||||
|
&hdbNodeCfg,
|
||||||
|
initSCVars,
|
||||||
|
&hdbConsts,
|
||||||
|
&cfg.RecommendedFeePolicy,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
var coord *coordinator.Coordinator
|
var coord *coordinator.Coordinator
|
||||||
if mode == ModeCoordinator {
|
if mode == ModeCoordinator {
|
||||||
@@ -592,12 +616,6 @@ type NodeAPI struct { //nolint:golint
|
|||||||
addr string
|
addr string
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNoRoute(c *gin.Context) {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{
|
|
||||||
"error": "404 page not found",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNodeAPI creates a new NodeAPI (which internally calls api.NewAPI)
|
// NewNodeAPI creates a new NodeAPI (which internally calls api.NewAPI)
|
||||||
func NewNodeAPI(
|
func NewNodeAPI(
|
||||||
addr string,
|
addr string,
|
||||||
@@ -607,7 +625,6 @@ func NewNodeAPI(
|
|||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
) (*NodeAPI, error) {
|
) (*NodeAPI, error) {
|
||||||
engine := gin.Default()
|
engine := gin.Default()
|
||||||
engine.NoRoute(handleNoRoute)
|
|
||||||
engine.Use(cors.Default())
|
engine.Use(cors.Default())
|
||||||
_api, err := api.NewAPI(
|
_api, err := api.NewAPI(
|
||||||
coordinatorEndpoints, explorerEndpoints,
|
coordinatorEndpoints, explorerEndpoints,
|
||||||
|
|||||||
@@ -173,6 +173,10 @@ func (p *PriceUpdater) UpdatePrices(ctx context.Context) {
|
|||||||
tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr)
|
tokenPrice, err = p.getTokenPriceCoingecko(ctx, token.Addr)
|
||||||
case UpdateMethodTypeStatic:
|
case UpdateMethodTypeStatic:
|
||||||
tokenPrice = token.StaticValue
|
tokenPrice = token.StaticValue
|
||||||
|
if tokenPrice == float64(0) {
|
||||||
|
log.Warn("token price is set to 0. Probably StaticValue is not put in the configuration file,",
|
||||||
|
"token", token.Symbol)
|
||||||
|
}
|
||||||
case UpdateMethodTypeIgnore:
|
case UpdateMethodTypeIgnore:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
package synchronizer
|
|
||||||
|
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
|
||||||
|
|
||||||
var (
|
|
||||||
metricReorgsCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "sync_reorgs",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricSyncedLastBlockNum = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "sync_synced_last_block_num",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricEthLastBlockNum = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "sync_eth_last_block_num",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricSyncedLastBatchNum = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "sync_synced_last_batch_num",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricEthLastBatchNum = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "sync_eth_last_batch_num",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
prometheus.MustRegister(metricReorgsCount)
|
|
||||||
prometheus.MustRegister(metricSyncedLastBlockNum)
|
|
||||||
prometheus.MustRegister(metricEthLastBlockNum)
|
|
||||||
prometheus.MustRegister(metricSyncedLastBatchNum)
|
|
||||||
prometheus.MustRegister(metricEthLastBatchNum)
|
|
||||||
}
|
|
||||||
@@ -1,3 +1,35 @@
|
|||||||
|
/*
|
||||||
|
Package synchronizer synchronizes the hermez network state by querying events
|
||||||
|
emitted by the three smart contracts: `Hermez.sol` (referred as Rollup here),
|
||||||
|
`HermezAuctionProtocol.sol` (referred as Auction here) and
|
||||||
|
`WithdrawalDelayer.sol` (referred as WDelayer here).
|
||||||
|
|
||||||
|
The main entry point for synchronization is the `Sync` function, which at most
|
||||||
|
will synchronize one ethereum block, and all the hermez events that happened in
|
||||||
|
that block. During a `Sync` call, a reorg can be detected; in such case, uncle
|
||||||
|
blocks will be discarded, and only in a future `Sync` call correct blocks will
|
||||||
|
be synced.
|
||||||
|
|
||||||
|
The synchronization of the events in each smart contracts are done
|
||||||
|
in the methods `rollupSync`, `auctionSync` and `wdelayerSync`, which in turn
|
||||||
|
use the interface code to read each smart contract state and events found in
|
||||||
|
"github.com/hermeznetwork/hermez-node/eth". After these three methods are
|
||||||
|
called, an object of type `common.BlockData` is built containing all the
|
||||||
|
updates and events that happened in that block, and it is inserted in the
|
||||||
|
HistoryDB in a single SQL transaction.
|
||||||
|
|
||||||
|
`rollupSync` is the method that synchronizes batches sent via the `forgeBatch`
|
||||||
|
transaction in `Hermez.sol`. In `rollupSync`, for every batch, the accounts
|
||||||
|
state is updated in the StateDB by processing all transactions that have been
|
||||||
|
forged in that batch.
|
||||||
|
|
||||||
|
The consistency of the stored data is guaranteed by the HistoryDB: All the
|
||||||
|
block information is inserted in a single SQL transaction at the end of the
|
||||||
|
`Sync` method, once the StateDB has been updated. And every time the
|
||||||
|
Synchronizer starts, it continues from the last block in the HistoryDB. The
|
||||||
|
StateDB stores updates organized by checkpoints for every batch, and each batch
|
||||||
|
is only accessed if it appears in the HistoryDB.
|
||||||
|
*/
|
||||||
package synchronizer
|
package synchronizer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -15,6 +47,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
"github.com/hermeznetwork/hermez-node/eth"
|
"github.com/hermeznetwork/hermez-node/eth"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/hermeznetwork/hermez-node/metric"
|
||||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
)
|
)
|
||||||
@@ -549,6 +582,7 @@ func (s *Synchronizer) Sync(ctx context.Context,
|
|||||||
return nil, nil, tracerr.Wrap(err)
|
return nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
discarded := lastSavedBlock.Num - lastDBBlockNum
|
discarded := lastSavedBlock.Num - lastDBBlockNum
|
||||||
|
metric.Reorgs.Inc()
|
||||||
return nil, &discarded, nil
|
return nil, &discarded, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -641,16 +675,16 @@ func (s *Synchronizer) Sync(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, batchData := range rollupData.Batches {
|
for _, batchData := range rollupData.Batches {
|
||||||
metricSyncedLastBatchNum.Set(float64(batchData.Batch.BatchNum))
|
metric.LastBatchNum.Set(float64(batchData.Batch.BatchNum))
|
||||||
metricEthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
|
metric.EthLastBatchNum.Set(float64(s.stats.Eth.LastBatchNum))
|
||||||
log.Debugw("Synced batch",
|
log.Debugw("Synced batch",
|
||||||
"syncLastBatch", batchData.Batch.BatchNum,
|
"syncLastBatch", batchData.Batch.BatchNum,
|
||||||
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
"syncBatchesPerc", s.stats.batchesPerc(batchData.Batch.BatchNum),
|
||||||
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
"ethLastBatch", s.stats.Eth.LastBatchNum,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
metricSyncedLastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
|
metric.LastBlockNum.Set(float64(s.stats.Sync.LastBlock.Num))
|
||||||
metricEthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
|
metric.EthLastBlockNum.Set(float64(s.stats.Eth.LastBlock.Num))
|
||||||
log.Debugw("Synced block",
|
log.Debugw("Synced block",
|
||||||
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
|
"syncLastBlockNum", s.stats.Sync.LastBlock.Num,
|
||||||
"syncBlocksPerc", s.stats.blocksPerc(),
|
"syncBlocksPerc", s.stats.blocksPerc(),
|
||||||
@@ -704,15 +738,15 @@ func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) {
|
|||||||
|
|
||||||
func getInitialVariables(ethClient eth.ClientInterface,
|
func getInitialVariables(ethClient eth.ClientInterface,
|
||||||
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
|
consts *common.SCConsts) (*common.SCVariables, *StartBlockNums, error) {
|
||||||
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit()
|
rollupInit, rollupInitBlock, err := ethClient.RollupEventInit(consts.Auction.GenesisBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
|
return nil, nil, tracerr.Wrap(fmt.Errorf("RollupEventInit: %w", err))
|
||||||
}
|
}
|
||||||
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit()
|
auctionInit, auctionInitBlock, err := ethClient.AuctionEventInit(consts.Auction.GenesisBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
|
return nil, nil, tracerr.Wrap(fmt.Errorf("AuctionEventInit: %w", err))
|
||||||
}
|
}
|
||||||
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit()
|
wDelayerInit, wDelayerInitBlock, err := ethClient.WDelayerEventInit(consts.Auction.GenesisBlockNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
|
return nil, nil, tracerr.Wrap(fmt.Errorf("WDelayerEventInit: %w", err))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1152,7 +1152,7 @@ func (c *Client) RollupEventsByBlock(blockNum int64,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RollupEventInit returns the initialize event with its corresponding block number
|
// RollupEventInit returns the initialize event with its corresponding block number
|
||||||
func (c *Client) RollupEventInit() (*eth.RollupEventInitialize, int64, error) {
|
func (c *Client) RollupEventInit(genesisBlockNum int64) (*eth.RollupEventInitialize, int64, error) {
|
||||||
vars := c.blocks[0].Rollup.Vars
|
vars := c.blocks[0].Rollup.Vars
|
||||||
return ð.RollupEventInitialize{
|
return ð.RollupEventInitialize{
|
||||||
ForgeL1L2BatchTimeout: uint8(vars.ForgeL1L2BatchTimeout),
|
ForgeL1L2BatchTimeout: uint8(vars.ForgeL1L2BatchTimeout),
|
||||||
@@ -1628,7 +1628,7 @@ func (c *Client) AuctionEventsByBlock(blockNum int64,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AuctionEventInit returns the initialize event with its corresponding block number
|
// AuctionEventInit returns the initialize event with its corresponding block number
|
||||||
func (c *Client) AuctionEventInit() (*eth.AuctionEventInitialize, int64, error) {
|
func (c *Client) AuctionEventInit(genesisBlockNum int64) (*eth.AuctionEventInitialize, int64, error) {
|
||||||
vars := c.blocks[0].Auction.Vars
|
vars := c.blocks[0].Auction.Vars
|
||||||
return ð.AuctionEventInitialize{
|
return ð.AuctionEventInitialize{
|
||||||
DonationAddress: vars.DonationAddress,
|
DonationAddress: vars.DonationAddress,
|
||||||
@@ -1863,7 +1863,7 @@ func (c *Client) WDelayerConstants() (*common.WDelayerConstants, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WDelayerEventInit returns the initialize event with its corresponding block number
|
// WDelayerEventInit returns the initialize event with its corresponding block number
|
||||||
func (c *Client) WDelayerEventInit() (*eth.WDelayerEventInitialize, int64, error) {
|
func (c *Client) WDelayerEventInit(genesisBlockNum int64) (*eth.WDelayerEventInitialize, int64, error) {
|
||||||
vars := c.blocks[0].WDelayer.Vars
|
vars := c.blocks[0].WDelayer.Vars
|
||||||
return ð.WDelayerEventInitialize{
|
return ð.WDelayerEventInitialize{
|
||||||
InitialWithdrawalDelay: vars.WithdrawalDelay,
|
InitialWithdrawalDelay: vars.WithdrawalDelay,
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// TxSelector select the transactions for the next Batch
|
// TxSelector select the transactions for the next Batch
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// BatchBuilder build Batch
|
// BatchBuilder build Batch
|
||||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
@@ -180,7 +180,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||||
// TxSelector select the transactions for the next Batch
|
// TxSelector select the transactions for the next Batch
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// BatchBuilder build Batch
|
// BatchBuilder build Batch
|
||||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
@@ -209,7 +209,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||||
// TxSelector select the transactions for the next Batch
|
// TxSelector select the transactions for the next Batch
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// BatchBuilder build Batch
|
// BatchBuilder build Batch
|
||||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
@@ -236,7 +236,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
||||||
// TxSelector select the transactions for the next Batch
|
// TxSelector select the transactions for the next Batch
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// BatchBuilder build Batch
|
// BatchBuilder build Batch
|
||||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
@@ -256,7 +256,7 @@ func TestTxSelectorBatchBuilderZKInputsMinimumFlow0(t *testing.T) {
|
|||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
// TxSelector select the transactions for the next Batch
|
// TxSelector select the transactions for the next Batch
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// BatchBuilder build Batch
|
// BatchBuilder build Batch
|
||||||
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err = bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
@@ -319,7 +319,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
|||||||
// TxSelector select the transactions for the next Batch
|
// TxSelector select the transactions for the next Batch
|
||||||
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs := til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs)
|
txsel.GetL1L2TxSelection(txprocConfig, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// BatchBuilder build Batch
|
// BatchBuilder build Batch
|
||||||
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
zki, err := bb.BuildBatch(coordIdxs, configBatch, oL1UserTxs, oL1CoordTxs, oL2Txs)
|
||||||
@@ -342,7 +342,7 @@ func TestZKInputsExitWithFee0(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
|
addL2Txs(t, l2DBTxSel, l2Txs) // Add L2s to TxSelector.L2DB
|
||||||
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
coordIdxs, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(txprocConfig, nil)
|
txsel.GetL1L2TxSelection(txprocConfig, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 1, len(coordIdxs))
|
assert.Equal(t, 1, len(coordIdxs))
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
package txselector
|
|
||||||
|
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
|
||||||
|
|
||||||
var (
|
|
||||||
metricGetL2TxSelection = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "txsel_get_l2_txselecton_total",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricGetL1L2TxSelection = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "txsel_get_l1_l2_txselecton_total",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
metricSelectedL1CoordinatorTxs = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "txsel_selected_l1_coordinator_txs",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricSelectedL1UserTxs = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "txsel_selected_l1_user_txs",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricSelectedL2Txs = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "txsel_selected_l2_txs",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
metricDiscardedL2Txs = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "txsel_discarded_l2_txs",
|
|
||||||
Help: "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
prometheus.MustRegister(metricGetL2TxSelection)
|
|
||||||
prometheus.MustRegister(metricGetL1L2TxSelection)
|
|
||||||
|
|
||||||
prometheus.MustRegister(metricSelectedL1CoordinatorTxs)
|
|
||||||
prometheus.MustRegister(metricSelectedL1UserTxs)
|
|
||||||
prometheus.MustRegister(metricSelectedL2Txs)
|
|
||||||
prometheus.MustRegister(metricDiscardedL2Txs)
|
|
||||||
}
|
|
||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/hermeznetwork/hermez-node/db/l2db"
|
"github.com/hermeznetwork/hermez-node/db/l2db"
|
||||||
"github.com/hermeznetwork/hermez-node/db/statedb"
|
"github.com/hermeznetwork/hermez-node/db/statedb"
|
||||||
"github.com/hermeznetwork/hermez-node/log"
|
"github.com/hermeznetwork/hermez-node/log"
|
||||||
|
"github.com/hermeznetwork/hermez-node/metric"
|
||||||
"github.com/hermeznetwork/hermez-node/txprocessor"
|
"github.com/hermeznetwork/hermez-node/txprocessor"
|
||||||
"github.com/hermeznetwork/tracerr"
|
"github.com/hermeznetwork/tracerr"
|
||||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||||
@@ -84,7 +85,7 @@ func (txsel *TxSelector) getCoordIdx(tokenID common.TokenID) (common.Idx, error)
|
|||||||
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
||||||
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
|
tokenID common.TokenID, positionL1 int) (*common.L1Tx, int, error) {
|
||||||
// check if CoordinatorAccount for TokenID is already pending to create
|
// check if CoordinatorAccount for TokenID is already pending to create
|
||||||
if checkAlreadyPendingToCreate(l1CoordinatorTxs, tokenID,
|
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, tokenID,
|
||||||
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
|
txsel.coordAccount.Addr, txsel.coordAccount.BJJ) {
|
||||||
return nil, positionL1, nil
|
return nil, positionL1, nil
|
||||||
}
|
}
|
||||||
@@ -121,11 +122,12 @@ func (txsel *TxSelector) coordAccountForTokenID(l1CoordinatorTxs []common.L1Tx,
|
|||||||
// but there is a transactions to them and the authorization of account
|
// but there is a transactions to them and the authorization of account
|
||||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||||
// included in the next batch.
|
// included in the next batch.
|
||||||
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([]common.Idx,
|
func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config, l1UserFutureTxs []common.L1Tx) ([]common.Idx,
|
||||||
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
metricGetL2TxSelection.Inc()
|
metric.GetL2TxSelection.Inc()
|
||||||
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
|
coordIdxs, accCreationAuths, _, l1CoordinatorTxs, l2Txs,
|
||||||
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, []common.L1Tx{})
|
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig,
|
||||||
|
[]common.L1Tx{}, l1UserFutureTxs)
|
||||||
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
|
return coordIdxs, accCreationAuths, l1CoordinatorTxs, l2Txs,
|
||||||
discardedL2Txs, tracerr.Wrap(err)
|
discardedL2Txs, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -139,11 +141,11 @@ func (txsel *TxSelector) GetL2TxSelection(selectionConfig txprocessor.Config) ([
|
|||||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||||
// included in the next batch.
|
// included in the next batch.
|
||||||
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
metricGetL1L2TxSelection.Inc()
|
metric.GetL1L2TxSelection.Inc()
|
||||||
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
||||||
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs)
|
discardedL2Txs, err := txsel.getL1L2TxSelection(selectionConfig, l1UserTxs, l1UserFutureTxs)
|
||||||
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
return coordIdxs, accCreationAuths, l1UserTxs, l1CoordinatorTxs, l2Txs,
|
||||||
discardedL2Txs, tracerr.Wrap(err)
|
discardedL2Txs, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -157,7 +159,7 @@ func (txsel *TxSelector) GetL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
// creation exists. The L1UserTxs, L1CoordinatorTxs, PoolL2Txs that will be
|
||||||
// included in the next batch.
|
// included in the next batch.
|
||||||
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
||||||
l1UserTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
l1UserTxs, l1UserFutureTxs []common.L1Tx) ([]common.Idx, [][]byte, []common.L1Tx,
|
||||||
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
[]common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
// WIP.0: the TxSelector is not optimized and will need a redesign. The
|
||||||
// current version is implemented in order to have a functional
|
// current version is implemented in order to have a functional
|
||||||
@@ -221,10 +223,11 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||||
metricSelectedL1CoordinatorTxs.Set(0)
|
metric.SelectedL1CoordinatorTxs.Set(0)
|
||||||
metricSelectedL2Txs.Set(0)
|
metric.SelectedL2Txs.Set(0)
|
||||||
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||||
|
|
||||||
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
|
return nil, nil, l1UserTxs, nil, nil, discardedL2Txs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +236,7 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
var validTxs, discardedL2Txs []common.PoolL2Tx
|
var validTxs, discardedL2Txs []common.PoolL2Tx
|
||||||
l2TxsForgable = sortL2Txs(l2TxsForgable)
|
l2TxsForgable = sortL2Txs(l2TxsForgable)
|
||||||
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
|
accAuths, l1CoordinatorTxs, validTxs, discardedL2Txs, err =
|
||||||
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs),
|
txsel.processL2Txs(tp, selectionConfig, len(l1UserTxs), l1UserFutureTxs,
|
||||||
l2TxsForgable, validTxs, discardedL2Txs)
|
l2TxsForgable, validTxs, discardedL2Txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
@@ -247,8 +250,8 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
var l1CoordinatorTxs2 []common.L1Tx
|
var l1CoordinatorTxs2 []common.L1Tx
|
||||||
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
|
accAuths2, l1CoordinatorTxs2, validTxs, discardedL2Txs, err =
|
||||||
txsel.processL2Txs(tp, selectionConfig,
|
txsel.processL2Txs(tp, selectionConfig,
|
||||||
len(l1UserTxs)+len(l1CoordinatorTxs), l2TxsNonForgable,
|
len(l1UserTxs)+len(l1CoordinatorTxs), l1UserFutureTxs,
|
||||||
validTxs, discardedL2Txs)
|
l2TxsNonForgable, validTxs, discardedL2Txs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -320,17 +323,18 @@ func (txsel *TxSelector) getL1L2TxSelection(selectionConfig txprocessor.Config,
|
|||||||
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
return nil, nil, nil, nil, nil, nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metricSelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
metric.SelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
||||||
metricSelectedL1CoordinatorTxs.Set(float64(len(l1CoordinatorTxs)))
|
metric.SelectedL1UserTxs.Set(float64(len(l1UserTxs)))
|
||||||
metricSelectedL2Txs.Set(float64(len(validTxs)))
|
metric.SelectedL2Txs.Set(float64(len(validTxs)))
|
||||||
metricDiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
metric.DiscardedL2Txs.Set(float64(len(discardedL2Txs)))
|
||||||
|
|
||||||
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
return coordIdxs, accAuths, l1UserTxs, l1CoordinatorTxs, validTxs, discardedL2Txs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
||||||
selectionConfig txprocessor.Config, nL1Txs int, l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) (
|
selectionConfig txprocessor.Config, nL1Txs int, l1UserFutureTxs []common.L1Tx,
|
||||||
[][]byte, []common.L1Tx, []common.PoolL2Tx, []common.PoolL2Tx, error) {
|
l2Txs, validTxs, discardedL2Txs []common.PoolL2Tx) ([][]byte, []common.L1Tx,
|
||||||
|
[]common.PoolL2Tx, []common.PoolL2Tx, error) {
|
||||||
var l1CoordinatorTxs []common.L1Tx
|
var l1CoordinatorTxs []common.L1Tx
|
||||||
positionL1 := nL1Txs
|
positionL1 := nL1Txs
|
||||||
var accAuths [][]byte
|
var accAuths [][]byte
|
||||||
@@ -432,7 +436,8 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
|||||||
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
|
if l2Txs[i].ToIdx == 0 { // ToEthAddr/ToBJJ case
|
||||||
validL2Tx, l1CoordinatorTx, accAuth, err :=
|
validL2Tx, l1CoordinatorTx, accAuth, err :=
|
||||||
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
|
txsel.processTxToEthAddrBJJ(validTxs, selectionConfig,
|
||||||
nL1Txs, l1CoordinatorTxs, positionL1, l2Txs[i])
|
nL1Txs, l1UserFutureTxs, l1CoordinatorTxs,
|
||||||
|
positionL1, l2Txs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
|
log.Debugw("txsel.processTxToEthAddrBJJ", "err", err)
|
||||||
// Discard L2Tx, and update Info parameter of
|
// Discard L2Tx, and update Info parameter of
|
||||||
@@ -572,18 +577,35 @@ func (txsel *TxSelector) processL2Txs(tp *txprocessor.TxProcessor,
|
|||||||
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
|
// l1CoordinatorTxs array, and then the PoolL2Tx is added into the validTxs
|
||||||
// array.
|
// array.
|
||||||
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
||||||
selectionConfig txprocessor.Config, nL1UserTxs int, l1CoordinatorTxs []common.L1Tx,
|
selectionConfig txprocessor.Config, nL1UserTxs int, l1UserFutureTxs,
|
||||||
positionL1 int, l2Tx common.PoolL2Tx) (*common.PoolL2Tx, *common.L1Tx,
|
l1CoordinatorTxs []common.L1Tx, positionL1 int, l2Tx common.PoolL2Tx) (
|
||||||
*common.AccountCreationAuth, error) {
|
*common.PoolL2Tx, *common.L1Tx, *common.AccountCreationAuth, error) {
|
||||||
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
|
// if L2Tx needs a new L1CoordinatorTx of CreateAccount type, and a
|
||||||
// previous L2Tx in the current process already created a
|
// previous L2Tx in the current process already created a
|
||||||
// L1CoordinatorTx of this type, in the DB there still seem that needs
|
// L1CoordinatorTx of this type, in the DB there still seem that needs
|
||||||
// to create a new L1CoordinatorTx, but as is already created, the tx
|
// to create a new L1CoordinatorTx, but as is already created, the tx
|
||||||
// is valid
|
// is valid
|
||||||
if checkAlreadyPendingToCreate(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
if checkPendingToCreateL1CoordTx(l1CoordinatorTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
||||||
return &l2Tx, nil, nil, nil
|
return &l2Tx, nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if L2Tx receiver account will be created by a L1UserFutureTxs
|
||||||
|
// (in the next batch, the current frozen queue). In that case, the L2Tx
|
||||||
|
// will be discarded at the current batch, even if there is an
|
||||||
|
// AccountCreationAuth for the account, as there is a L1UserTx in the
|
||||||
|
// frozen queue that will create the receiver Account. The L2Tx is
|
||||||
|
// discarded to avoid the Coordinator creating a new L1CoordinatorTx to
|
||||||
|
// create the receiver account, which will be also created in the next
|
||||||
|
// batch from the L1UserFutureTx, ending with the user having 2
|
||||||
|
// different accounts for the same TokenID. The double account creation
|
||||||
|
// is supported by the Hermez zkRollup specification, but it was decided
|
||||||
|
// to mitigate it at the TxSelector level for the explained cases.
|
||||||
|
if checkPendingToCreateFutureTxs(l1UserFutureTxs, l2Tx.TokenID, l2Tx.ToEthAddr, l2Tx.ToBJJ) {
|
||||||
|
return nil, nil, nil, fmt.Errorf("L2Tx discarded at the current batch, as the" +
|
||||||
|
" receiver account does not exist yet, and there is a L1UserTx that" +
|
||||||
|
" will create that account in a future batch.")
|
||||||
|
}
|
||||||
|
|
||||||
var l1CoordinatorTx *common.L1Tx
|
var l1CoordinatorTx *common.L1Tx
|
||||||
var accAuth *common.AccountCreationAuth
|
var accAuth *common.AccountCreationAuth
|
||||||
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
|
if l2Tx.ToEthAddr != common.EmptyAddr && l2Tx.ToEthAddr != common.FFAddr {
|
||||||
@@ -686,7 +708,7 @@ func (txsel *TxSelector) processTxToEthAddrBJJ(validTxs []common.PoolL2Tx,
|
|||||||
return &l2Tx, l1CoordinatorTx, accAuth, nil
|
return &l2Tx, l1CoordinatorTx, accAuth, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
func checkPendingToCreateL1CoordTx(l1CoordinatorTxs []common.L1Tx, tokenID common.TokenID,
|
||||||
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
||||||
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
for i := 0; i < len(l1CoordinatorTxs); i++ {
|
||||||
if l1CoordinatorTxs[i].FromEthAddr == addr &&
|
if l1CoordinatorTxs[i].FromEthAddr == addr &&
|
||||||
@@ -698,6 +720,23 @@ func checkAlreadyPendingToCreate(l1CoordinatorTxs []common.L1Tx, tokenID common.
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkPendingToCreateFutureTxs(l1UserFutureTxs []common.L1Tx, tokenID common.TokenID,
|
||||||
|
addr ethCommon.Address, bjj babyjub.PublicKeyComp) bool {
|
||||||
|
for i := 0; i < len(l1UserFutureTxs); i++ {
|
||||||
|
if l1UserFutureTxs[i].FromEthAddr == addr &&
|
||||||
|
l1UserFutureTxs[i].TokenID == tokenID &&
|
||||||
|
l1UserFutureTxs[i].FromBJJ == bjj {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if l1UserFutureTxs[i].FromEthAddr == addr &&
|
||||||
|
l1UserFutureTxs[i].TokenID == tokenID &&
|
||||||
|
common.EmptyBJJComp == bjj {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
|
// sortL2Txs sorts the PoolL2Txs by AbsoluteFee and then by Nonce
|
||||||
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
|
func sortL2Txs(l2Txs []common.PoolL2Tx) []common.PoolL2Tx {
|
||||||
// Sort by absolute fee with SliceStable, so that txs with same
|
// Sort by absolute fee with SliceStable, so that txs with same
|
||||||
|
|||||||
@@ -182,7 +182,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
log.Debug("block:0 batch:1")
|
log.Debug("block:0 batch:1")
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -193,7 +193,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
log.Debug("block:0 batch:2")
|
log.Debug("block:0 batch:2")
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -204,7 +204,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
log.Debug("block:0 batch:3")
|
log.Debug("block:0 batch:3")
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 2, len(oL1UserTxs))
|
assert.Equal(t, 2, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -217,7 +217,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
log.Debug("block:0 batch:4")
|
log.Debug("block:0 batch:4")
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 1, len(oL1UserTxs))
|
assert.Equal(t, 1, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -231,7 +231,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
log.Debug("block:0 batch:5")
|
log.Debug("block:0 batch:5")
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -245,7 +245,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
log.Debug("block:0 batch:6")
|
log.Debug("block:0 batch:6")
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 1, len(oL1UserTxs))
|
assert.Equal(t, 1, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -279,7 +279,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||||
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
assert.Equal(t, txsel.coordAccount.AccountCreationAuth, accAuths[0])
|
||||||
@@ -328,7 +328,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[3].VerifySignature(chainID, tc.Users["A"].BJJ.Public().Compress()))
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
assert.Equal(t, []common.Idx{261, 263}, coordIdxs)
|
||||||
assert.Equal(t, 0, len(accAuths))
|
assert.Equal(t, 0, len(accAuths))
|
||||||
@@ -372,7 +372,7 @@ func TestGetL2TxSelectionMinimumFlow0(t *testing.T) {
|
|||||||
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
assert.True(t, l2TxsFromDB[1].VerifySignature(chainID, tc.Users["B"].BJJ.Public().Compress()))
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
||||||
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
coordIdxs, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, _, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []common.Idx{263}, coordIdxs)
|
assert.Equal(t, []common.Idx{263}, coordIdxs)
|
||||||
assert.Equal(t, 0, len(accAuths))
|
assert.Equal(t, 0, len(accAuths))
|
||||||
@@ -434,7 +434,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// batch1
|
// batch1
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// 1st TransferToEthAddr
|
// 1st TransferToEthAddr
|
||||||
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965"
|
expectedTxID0 := "0x028847b86613c0b70be18c8622119ed045b42e4e47d7938fa90bb3d1dc14928965"
|
||||||
@@ -456,7 +456,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
|||||||
|
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 3, len(oL1UserTxs))
|
assert.Equal(t, 3, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -481,7 +481,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
|||||||
|
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -500,7 +500,7 @@ func TestPoolL2TxsWithoutEnoughBalance(t *testing.T) {
|
|||||||
// initial PoolExit, which now is valid as B has enough Balance
|
// initial PoolExit, which now is valid as B has enough Balance
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
assert.Equal(t, 0, len(oL1CoordTxs))
|
assert.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -550,7 +550,7 @@ func TestTransferToBjj(t *testing.T) {
|
|||||||
// batch1 to freeze L1UserTxs that will create some accounts with
|
// batch1 to freeze L1UserTxs that will create some accounts with
|
||||||
// positive balance
|
// positive balance
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
|
// Transfer is ToBJJ to a BJJ-only account that doesn't exist
|
||||||
@@ -568,7 +568,7 @@ func TestTransferToBjj(t *testing.T) {
|
|||||||
|
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 4, len(oL1UserTxs))
|
assert.Equal(t, 4, len(oL1UserTxs))
|
||||||
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
|
// We expect the coordinator to add an L1CoordTx to create an account for the recipient of the l2tx
|
||||||
@@ -595,7 +595,7 @@ func TestTransferToBjj(t *testing.T) {
|
|||||||
|
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
|
// Since the BJJ-only account B already exists, the coordinator doesn't add any L1CoordTxs
|
||||||
@@ -623,7 +623,7 @@ func TestTransferToBjj(t *testing.T) {
|
|||||||
|
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, len(oL1UserTxs))
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
// We expect the coordinator to add an L1CoordTx to create an account
|
// We expect the coordinator to add an L1CoordTx to create an account
|
||||||
@@ -678,7 +678,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// batch1 to freeze L1UserTxs
|
// batch1 to freeze L1UserTxs
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// 8 transfers from the same account
|
// 8 transfers from the same account
|
||||||
@@ -710,7 +710,7 @@ func TestTransferManyFromSameAccount(t *testing.T) {
|
|||||||
// transfers from account A
|
// transfers from account A
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 3, len(oL1UserTxs))
|
assert.Equal(t, 3, len(oL1UserTxs))
|
||||||
require.Equal(t, 0, len(oL1CoordTxs))
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -760,7 +760,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// batch1 to freeze L1UserTxs
|
// batch1 to freeze L1UserTxs
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
batchPoolL2 := `
|
batchPoolL2 := `
|
||||||
@@ -794,7 +794,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
|
|
||||||
// select L1 & L2 txs
|
// select L1 & L2 txs
|
||||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 3, len(oL1UserTxs))
|
require.Equal(t, 3, len(oL1UserTxs))
|
||||||
require.Equal(t, 0, len(oL1CoordTxs))
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -809,7 +809,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
// batch 3
|
// batch 3
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 0, len(oL1UserTxs))
|
require.Equal(t, 0, len(oL1UserTxs))
|
||||||
@@ -825,7 +825,7 @@ func TestPoolL2TxInvalidNonces(t *testing.T) {
|
|||||||
// batch 4
|
// batch 4
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 0, len(oL1UserTxs))
|
require.Equal(t, 0, len(oL1UserTxs))
|
||||||
@@ -873,10 +873,10 @@ func TestProcessL2Selection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// batch1 to freeze L1UserTxs
|
// batch1 to freeze L1UserTxs
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// 8 transfers from the same account
|
// 3 transfers from the same account
|
||||||
batchPoolL2 := `
|
batchPoolL2 := `
|
||||||
Type: PoolL2
|
Type: PoolL2
|
||||||
PoolTransfer(0) A-B: 10 (126)
|
PoolTransfer(0) A-B: 10 (126)
|
||||||
@@ -889,10 +889,10 @@ func TestProcessL2Selection(t *testing.T) {
|
|||||||
|
|
||||||
// add the PoolL2Txs to the l2DB
|
// add the PoolL2Txs to the l2DB
|
||||||
addL2Txs(t, txsel, poolL2Txs)
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
// batch 2 to crate some accounts with positive balance, and do 8 L2Tx transfers from account A
|
// batch 2 to crate some accounts with positive balance, and do 3 L2Tx transfers from account A
|
||||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 3, len(oL1UserTxs))
|
assert.Equal(t, 3, len(oL1UserTxs))
|
||||||
require.Equal(t, 0, len(oL1CoordTxs))
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -968,7 +968,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// batch1 to freeze L1UserTxs
|
// batch1 to freeze L1UserTxs
|
||||||
l1UserTxs := []common.L1Tx{}
|
l1UserTxs := []common.L1Tx{}
|
||||||
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// batch 2 to crate the accounts (from L1UserTxs)
|
// batch 2 to crate the accounts (from L1UserTxs)
|
||||||
@@ -976,7 +976,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
|||||||
|
|
||||||
// select L1 & L2 txs
|
// select L1 & L2 txs
|
||||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 3, len(oL1UserTxs))
|
require.Equal(t, 3, len(oL1UserTxs))
|
||||||
require.Equal(t, 0, len(oL1CoordTxs))
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
@@ -1014,7 +1014,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
|||||||
addL2Txs(t, txsel, poolL2Txs)
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
l1UserTxs = []common.L1Tx{}
|
l1UserTxs = []common.L1Tx{}
|
||||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 0, len(oL1UserTxs))
|
require.Equal(t, 0, len(oL1UserTxs))
|
||||||
@@ -1029,7 +1029,7 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
|||||||
|
|
||||||
// batch 4. In this Batch, account B has enough balance to send the txs
|
// batch 4. In this Batch, account B has enough balance to send the txs
|
||||||
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
_, accAuths, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
txsel.GetL1L2TxSelection(tpc, l1UserTxs)
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 0, len(oL1UserTxs))
|
require.Equal(t, 0, len(oL1UserTxs))
|
||||||
@@ -1038,3 +1038,112 @@ func TestValidTxsWithLowFeeAndInvalidTxsWithHighFee(t *testing.T) {
|
|||||||
require.Equal(t, 3, len(discardedL2Txs))
|
require.Equal(t, 3, len(discardedL2Txs))
|
||||||
require.Equal(t, 0, len(accAuths))
|
require.Equal(t, 0, len(accAuths))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestL1UserFutureTxs(t *testing.T) {
|
||||||
|
set := `
|
||||||
|
Type: Blockchain
|
||||||
|
|
||||||
|
CreateAccountDeposit(0) Coord: 0
|
||||||
|
CreateAccountDeposit(0) A: 100
|
||||||
|
|
||||||
|
> batchL1 // freeze L1User{2}
|
||||||
|
CreateAccountDeposit(0) B: 18
|
||||||
|
> batchL1 // forge L1User{2}, freeze L1User{1}
|
||||||
|
> batchL1 // forge L1User{1}
|
||||||
|
> block
|
||||||
|
`
|
||||||
|
|
||||||
|
chainID := uint16(0)
|
||||||
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
hermezContractAddr := ethCommon.HexToAddress("0xc344E203a046Da13b0B4467EB7B3629D0C99F6E6")
|
||||||
|
txsel, _ := initTest(t, chainID, hermezContractAddr, tc.Users["Coord"])
|
||||||
|
|
||||||
|
// restart nonces of TilContext, as will be set by generating directly
|
||||||
|
// the PoolL2Txs for each specific batch with tc.GeneratePoolL2Txs
|
||||||
|
tc.RestartNonces()
|
||||||
|
|
||||||
|
tpc := txprocessor.Config{
|
||||||
|
NLevels: 16,
|
||||||
|
MaxFeeTx: 10,
|
||||||
|
MaxTx: 10,
|
||||||
|
MaxL1Tx: 10,
|
||||||
|
ChainID: chainID,
|
||||||
|
}
|
||||||
|
// batch1 to freeze L1UserTxs
|
||||||
|
l1UserTxs := []common.L1Tx{}
|
||||||
|
l1UserFutureTxs := []common.L1Tx{}
|
||||||
|
_, _, _, _, _, _, err = txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
batchPoolL2 := `
|
||||||
|
Type: PoolL2
|
||||||
|
PoolTransferToEthAddr(0) A-B: 10 (126)
|
||||||
|
`
|
||||||
|
poolL2Txs, err := tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(poolL2Txs))
|
||||||
|
|
||||||
|
// add AccountCreationAuth for B
|
||||||
|
_ = addAccCreationAuth(t, tc, txsel, chainID, hermezContractAddr, "B")
|
||||||
|
// add the PoolL2Txs to the l2DB
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
// batch 2 to crate some accounts with positive balance, and do 1 L2Tx transfer from account A
|
||||||
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||||
|
l1UserFutureTxs =
|
||||||
|
til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||||
|
require.Equal(t, 2, len(l1UserTxs))
|
||||||
|
require.Equal(t, 1, len(l1UserFutureTxs))
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err :=
|
||||||
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 2, len(oL1UserTxs))
|
||||||
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
|
// no L2Tx selected due the L1UserFutureTx, the L2Tx will be processed
|
||||||
|
// at the next batch once the L1UserTx of CreateAccount B is processed,
|
||||||
|
// despite that there is an AccountCreationAuth for Account B.
|
||||||
|
assert.Equal(t, 0, len(oL2Txs))
|
||||||
|
assert.Equal(t, 1, len(discardedL2Txs))
|
||||||
|
assert.Equal(t, "Tx not selected (in processTxToEthAddrBJJ) due to L2Tx"+
|
||||||
|
" discarded at the current batch, as the receiver account does"+
|
||||||
|
" not exist yet, and there is a L1UserTx that will create that"+
|
||||||
|
" account in a future batch.",
|
||||||
|
discardedL2Txs[0].Info)
|
||||||
|
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
|
txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||||
|
l1UserFutureTxs = []common.L1Tx{}
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
|
txsel.GetL1L2TxSelection(tpc, l1UserTxs, l1UserFutureTxs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(oL1UserTxs))
|
||||||
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
|
// L2Tx selected as now the L1UserTx of CreateAccount B is processed
|
||||||
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
|
txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// generate a new L2Tx A-B and check that is processed
|
||||||
|
poolL2Txs, err = tc.GeneratePoolL2Txs(batchPoolL2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(poolL2Txs))
|
||||||
|
// add the PoolL2Txs to the l2DB
|
||||||
|
addL2Txs(t, txsel, poolL2Txs)
|
||||||
|
_, _, oL1UserTxs, oL1CoordTxs, oL2Txs, discardedL2Txs, err =
|
||||||
|
txsel.GetL1L2TxSelection(tpc, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(oL1UserTxs))
|
||||||
|
require.Equal(t, 0, len(oL1CoordTxs))
|
||||||
|
assert.Equal(t, 1, len(oL2Txs))
|
||||||
|
assert.Equal(t, 0, len(discardedL2Txs))
|
||||||
|
err = txsel.l2db.StartForging(common.TxIDsFromPoolL2Txs(oL2Txs),
|
||||||
|
txsel.localAccountsDB.CurrentBatch())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user