mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Merge pull request #413 from hermeznetwork/feature/parametrize-chainid
Parametrize ChainID
This commit is contained in:
19
api/api.go
19
api/api.go
@@ -31,11 +31,12 @@ type Status struct {
|
|||||||
|
|
||||||
// API serves HTTP requests to allow external interaction with the Hermez node
|
// API serves HTTP requests to allow external interaction with the Hermez node
|
||||||
type API struct {
|
type API struct {
|
||||||
h *historydb.HistoryDB
|
h *historydb.HistoryDB
|
||||||
cg *configAPI
|
cg *configAPI
|
||||||
s *statedb.StateDB
|
s *statedb.StateDB
|
||||||
l2 *l2db.L2DB
|
l2 *l2db.L2DB
|
||||||
status Status
|
status Status
|
||||||
|
chainID uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAPI sets the endpoints and the appropriate handlers, but doesn't start the server
|
// NewAPI sets the endpoints and the appropriate handlers, but doesn't start the server
|
||||||
@@ -46,6 +47,7 @@ func NewAPI(
|
|||||||
sdb *statedb.StateDB,
|
sdb *statedb.StateDB,
|
||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
config *Config,
|
config *Config,
|
||||||
|
chainID uint16,
|
||||||
) (*API, error) {
|
) (*API, error) {
|
||||||
// Check input
|
// Check input
|
||||||
// TODO: is stateDB only needed for explorer endpoints or for both?
|
// TODO: is stateDB only needed for explorer endpoints or for both?
|
||||||
@@ -63,9 +65,10 @@ func NewAPI(
|
|||||||
AuctionConstants: config.AuctionConstants,
|
AuctionConstants: config.AuctionConstants,
|
||||||
WDelayerConstants: config.WDelayerConstants,
|
WDelayerConstants: config.WDelayerConstants,
|
||||||
},
|
},
|
||||||
s: sdb,
|
s: sdb,
|
||||||
l2: l2db,
|
l2: l2db,
|
||||||
status: Status{},
|
status: Status{},
|
||||||
|
chainID: chainID,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add coordinator endpoints
|
// Add coordinator endpoints
|
||||||
|
|||||||
@@ -211,7 +211,8 @@ func TestMain(m *testing.M) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
sdb, err := statedb.NewStateDB(dir, statedb.TypeTxSelector, 0)
|
chainID := uint16(0)
|
||||||
|
sdb, err := statedb.NewStateDB(dir, statedb.TypeTxSelector, 0, chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -237,6 +238,7 @@ func TestMain(m *testing.M) {
|
|||||||
sdb,
|
sdb,
|
||||||
l2DB,
|
l2DB,
|
||||||
&_config,
|
&_config,
|
||||||
|
chainID,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -253,7 +255,7 @@ func TestMain(m *testing.M) {
|
|||||||
test.WipeDB(api.h.DB())
|
test.WipeDB(api.h.DB())
|
||||||
|
|
||||||
// Genratre blockchain data with til
|
// Genratre blockchain data with til
|
||||||
tcc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tcc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
CoordUser: "Coord",
|
CoordUser: "Coord",
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ func (a *API) verifyPoolL2TxWrite(txw *l2db.PoolL2TxWrite) error {
|
|||||||
return tracerr.Wrap(err)
|
return tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
// Check signature
|
// Check signature
|
||||||
if !poolTx.VerifySignature(account.PublicKey) {
|
if !poolTx.VerifySignature(a.chainID, account.PublicKey) {
|
||||||
return tracerr.Wrap(errors.New("wrong signature"))
|
return tracerr.Wrap(errors.New("wrong signature"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -15,7 +15,8 @@ func TestBatchBuilder(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
synchDB, err := statedb.NewStateDB(dir, statedb.TypeBatchBuilder, 0)
|
chainID := uint16(0)
|
||||||
|
synchDB, err := statedb.NewStateDB(dir, statedb.TypeBatchBuilder, 0, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
bbDir, err := ioutil.TempDir("", "tmpBatchBuilderDB")
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
@@ -128,7 +129,7 @@ func (tx *PoolL2Tx) SetID() error {
|
|||||||
// [ 16 bits ] chainId // 2 bytes
|
// [ 16 bits ] chainId // 2 bytes
|
||||||
// [ 32 bits ] signatureConstant // 4 bytes
|
// [ 32 bits ] signatureConstant // 4 bytes
|
||||||
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
// Total bits compressed data: 241 bits // 31 bytes in *big.Int representation
|
||||||
func (tx *PoolL2Tx) TxCompressedData() (*big.Int, error) {
|
func (tx *PoolL2Tx) TxCompressedData(chainID uint16) (*big.Int, error) {
|
||||||
amountFloat16, err := NewFloat16(tx.Amount)
|
amountFloat16, err := NewFloat16(tx.Amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -160,7 +161,7 @@ func (tx *PoolL2Tx) TxCompressedData() (*big.Int, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
copy(b[19:25], fromIdxBytes[:])
|
copy(b[19:25], fromIdxBytes[:])
|
||||||
copy(b[25:27], []byte{0, 0}) // TODO this will be generated by the ChainID config parameter
|
binary.BigEndian.PutUint16(b[25:27], chainID)
|
||||||
copy(b[27:31], SignatureConstantBytes[:])
|
copy(b[27:31], SignatureConstantBytes[:])
|
||||||
|
|
||||||
bi := new(big.Int).SetBytes(b[:])
|
bi := new(big.Int).SetBytes(b[:])
|
||||||
@@ -271,8 +272,8 @@ func (tx *PoolL2Tx) RqTxCompressedDataV2() (*big.Int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HashToSign returns the computed Poseidon hash from the *PoolL2Tx that will be signed by the sender.
|
// HashToSign returns the computed Poseidon hash from the *PoolL2Tx that will be signed by the sender.
|
||||||
func (tx *PoolL2Tx) HashToSign() (*big.Int, error) {
|
func (tx *PoolL2Tx) HashToSign(chainID uint16) (*big.Int, error) {
|
||||||
toCompressedData, err := tx.TxCompressedData()
|
toCompressedData, err := tx.TxCompressedData(chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
@@ -292,8 +293,8 @@ func (tx *PoolL2Tx) HashToSign() (*big.Int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp
|
// VerifySignature returns true if the signature verification is correct for the given PublicKeyComp
|
||||||
func (tx *PoolL2Tx) VerifySignature(pkComp babyjub.PublicKeyComp) bool {
|
func (tx *PoolL2Tx) VerifySignature(chainID uint16, pkComp babyjub.PublicKeyComp) bool {
|
||||||
h, err := tx.HashToSign()
|
h, err := tx.HashToSign(chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ func TestNewPoolL2Tx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTxCompressedData(t *testing.T) {
|
func TestTxCompressedData(t *testing.T) {
|
||||||
|
chainID := uint16(0)
|
||||||
var sk babyjub.PrivateKey
|
var sk babyjub.PrivateKey
|
||||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -36,12 +37,24 @@ func TestTxCompressedData(t *testing.T) {
|
|||||||
Nonce: 6,
|
Nonce: 6,
|
||||||
ToBJJ: sk.Public().Compress(),
|
ToBJJ: sk.Public().Compress(),
|
||||||
}
|
}
|
||||||
txCompressedData, err := tx.TxCompressedData()
|
txCompressedData, err := tx.TxCompressedData(chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// test vector value generated from javascript implementation
|
// test vector value generated from javascript implementation
|
||||||
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
|
expectedStr := "1766847064778421992193717128424891165872736891548909569553540445094274575"
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||||
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
assert.Equal(t, "010000000000060000000500040000000000030000000000020000c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
// using a different chainID
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(100))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedStr = "1766847064778421992193717128424891165872736891548909569553540874591004175"
|
||||||
|
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||||
|
assert.Equal(t, "010000000000060000000500040000000000030000000000020064c60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
txCompressedData, err = tx.TxCompressedData(uint16(65535))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedStr = "1766847064778421992193717128424891165872736891548909569553821915776017935"
|
||||||
|
assert.Equal(t, expectedStr, txCompressedData.String())
|
||||||
|
assert.Equal(t, "01000000000006000000050004000000000003000000000002ffffc60be60f", hex.EncodeToString(txCompressedData.Bytes()))
|
||||||
|
|
||||||
tx = PoolL2Tx{
|
tx = PoolL2Tx{
|
||||||
RqFromIdx: 7,
|
RqFromIdx: 7,
|
||||||
RqToIdx: 8,
|
RqToIdx: 8,
|
||||||
@@ -51,12 +64,12 @@ func TestTxCompressedData(t *testing.T) {
|
|||||||
RqFee: 12,
|
RqFee: 12,
|
||||||
RqToBJJ: sk.Public().Compress(),
|
RqToBJJ: sk.Public().Compress(),
|
||||||
}
|
}
|
||||||
txCompressedData, err = tx.RqTxCompressedDataV2()
|
rqTxCompressedData, err := tx.RqTxCompressedDataV2()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// test vector value generated from javascript implementation
|
// test vector value generated from javascript implementation
|
||||||
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
|
expectedStr = "6571340879233176732837827812956721483162819083004853354503"
|
||||||
assert.Equal(t, expectedStr, txCompressedData.String())
|
assert.Equal(t, expectedStr, rqTxCompressedData.String())
|
||||||
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(txCompressedData.Bytes()))
|
assert.Equal(t, "010c000000000b0000000a0009000000000008000000000007", hex.EncodeToString(rqTxCompressedData.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTxCompressedDataV2(t *testing.T) {
|
func TestTxCompressedDataV2(t *testing.T) {
|
||||||
@@ -109,6 +122,7 @@ func TestRqTxCompressedDataV2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHashToSign(t *testing.T) {
|
func TestHashToSign(t *testing.T) {
|
||||||
|
chainID := uint16(0)
|
||||||
var sk babyjub.PrivateKey
|
var sk babyjub.PrivateKey
|
||||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -120,12 +134,13 @@ func TestHashToSign(t *testing.T) {
|
|||||||
Nonce: 6,
|
Nonce: 6,
|
||||||
ToEthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
|
ToEthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
|
||||||
}
|
}
|
||||||
toSign, err := tx.HashToSign()
|
toSign, err := tx.HashToSign(chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
|
assert.Equal(t, "1469900657138253851938022936440971384682713995864967090251961124784132925291", toSign.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVerifyTxSignature(t *testing.T) {
|
func TestVerifyTxSignature(t *testing.T) {
|
||||||
|
chainID := uint16(0)
|
||||||
var sk babyjub.PrivateKey
|
var sk babyjub.PrivateKey
|
||||||
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
_, err := hex.Decode(sk[:], []byte("0001020304050607080900010203040506070809000102030405060708090001"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -139,13 +154,13 @@ func TestVerifyTxSignature(t *testing.T) {
|
|||||||
RqToEthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
|
RqToEthAddr: ethCommon.HexToAddress("0xc58d29fA6e86E4FAe04DDcEd660d45BCf3Cb2370"),
|
||||||
RqToBJJ: sk.Public().Compress(),
|
RqToBJJ: sk.Public().Compress(),
|
||||||
}
|
}
|
||||||
toSign, err := tx.HashToSign()
|
toSign, err := tx.HashToSign(chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
|
assert.Equal(t, "18645218094210271622244722988708640202588315450486586312909439859037906375295", toSign.String())
|
||||||
|
|
||||||
sig := sk.SignPoseidon(toSign)
|
sig := sk.SignPoseidon(toSign)
|
||||||
tx.Signature = sig.Compress()
|
tx.Signature = sig.Compress()
|
||||||
assert.True(t, tx.VerifySignature(sk.Public().Compress()))
|
assert.True(t, tx.VerifySignature(chainID, sk.Public().Compress()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecompressEmptyBJJComp(t *testing.T) {
|
func TestDecompressEmptyBJJComp(t *testing.T) {
|
||||||
|
|||||||
@@ -282,7 +282,7 @@ func (z ZKInputs) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewZKInputs returns a pointer to an initialized struct of ZKInputs
|
// NewZKInputs returns a pointer to an initialized struct of ZKInputs
|
||||||
func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32, currentNumBatch *big.Int) *ZKInputs {
|
func NewZKInputs(chainID uint16, nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32, currentNumBatch *big.Int) *ZKInputs {
|
||||||
zki := &ZKInputs{}
|
zki := &ZKInputs{}
|
||||||
zki.Metadata.NTx = nTx
|
zki.Metadata.NTx = nTx
|
||||||
zki.Metadata.MaxFeeIdxs = maxFeeIdxs
|
zki.Metadata.MaxFeeIdxs = maxFeeIdxs
|
||||||
@@ -290,12 +290,13 @@ func NewZKInputs(nTx, maxL1Tx, maxTx, maxFeeIdxs, nLevels uint32, currentNumBatc
|
|||||||
zki.Metadata.NLevels = nLevels
|
zki.Metadata.NLevels = nLevels
|
||||||
zki.Metadata.MaxL1Tx = maxL1Tx
|
zki.Metadata.MaxL1Tx = maxL1Tx
|
||||||
zki.Metadata.MaxTx = maxTx
|
zki.Metadata.MaxTx = maxTx
|
||||||
|
zki.Metadata.ChainID = chainID
|
||||||
|
|
||||||
// General
|
// General
|
||||||
zki.CurrentNumBatch = currentNumBatch
|
zki.CurrentNumBatch = currentNumBatch
|
||||||
zki.OldLastIdx = big.NewInt(0)
|
zki.OldLastIdx = big.NewInt(0)
|
||||||
zki.OldStateRoot = big.NewInt(0)
|
zki.OldStateRoot = big.NewInt(0)
|
||||||
zki.GlobalChainID = big.NewInt(0) // TODO pass by parameter
|
zki.GlobalChainID = big.NewInt(int64(chainID))
|
||||||
zki.FeeIdxs = newSlice(maxFeeIdxs)
|
zki.FeeIdxs = newSlice(maxFeeIdxs)
|
||||||
zki.FeePlanTokens = newSlice(maxFeeIdxs)
|
zki.FeePlanTokens = newSlice(maxFeeIdxs)
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestZKInputs(t *testing.T) {
|
func TestZKInputs(t *testing.T) {
|
||||||
zki := NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
|
chainID := uint16(0)
|
||||||
|
zki := NewZKInputs(chainID, 100, 16, 512, 24, 32, big.NewInt(1))
|
||||||
_, err := json.Marshal(zki)
|
_, err := json.Marshal(zki)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// fmt.Println(string(s))
|
// fmt.Println(string(s))
|
||||||
|
|||||||
@@ -91,13 +91,14 @@ var maxL1CoordinatorTxs uint64 = maxL1Txs - maxL1UserTxs
|
|||||||
var maxTxs uint64 = 376
|
var maxTxs uint64 = 376
|
||||||
var nLevels uint32 = 32 //nolint:deadcode,unused
|
var nLevels uint32 = 32 //nolint:deadcode,unused
|
||||||
var maxFeeTxs uint32 = 64 //nolint:deadcode,varcheck
|
var maxFeeTxs uint32 = 64 //nolint:deadcode,varcheck
|
||||||
|
var chainID uint16 = 0
|
||||||
|
|
||||||
func newTestModules(t *testing.T) modules {
|
func newTestModules(t *testing.T) modules {
|
||||||
var err error
|
var err error
|
||||||
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
|
syncDBPath, err = ioutil.TempDir("", "tmpSyncDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
deleteme = append(deleteme, syncDBPath)
|
deleteme = append(deleteme, syncDBPath)
|
||||||
syncStateDB, err := statedb.NewStateDB(syncDBPath, statedb.TypeSynchronizer, 48)
|
syncStateDB, err := statedb.NewStateDB(syncDBPath, statedb.TypeSynchronizer, 48, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
@@ -526,7 +527,7 @@ func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchr
|
|||||||
set = append(set, til.Instruction{Typ: til.TypeNewBatchL1})
|
set = append(set, til.Instruction{Typ: til.TypeNewBatchL1})
|
||||||
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
|
set = append(set, til.Instruction{Typ: til.TypeNewBlock})
|
||||||
|
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocksFromInstructions(set)
|
blocks, err := tc.GenerateBlocksFromInstructions(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, blocks)
|
require.NotNil(t, blocks)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ func newStateDB(t *testing.T) *statedb.LocalStateDB {
|
|||||||
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
|
syncDBPath, err := ioutil.TempDir("", "tmpSyncDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
deleteme = append(deleteme, syncDBPath)
|
deleteme = append(deleteme, syncDBPath)
|
||||||
syncStateDB, err := statedb.NewStateDB(syncDBPath, statedb.TypeSynchronizer, 48)
|
syncStateDB, err := statedb.NewStateDB(syncDBPath, statedb.TypeSynchronizer, 48, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
|
stateDBPath, err := ioutil.TempDir("", "tmpStateDB")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -194,7 +194,7 @@ func TestPoolMarkInvalidOldNonces(t *testing.T) {
|
|||||||
> batchL1
|
> batchL1
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(set0)
|
blocks, err := tc.GenerateBlocks(set0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ func TestBlocks(t *testing.T) {
|
|||||||
> block // blockNum=5
|
> block // blockNum=5
|
||||||
> block // blockNum=6
|
> block // blockNum=6
|
||||||
`
|
`
|
||||||
tc := til.NewContext(1)
|
tc := til.NewContext(uint16(0), 1)
|
||||||
blocks, err := tc.GenerateBlocks(set1)
|
blocks, err := tc.GenerateBlocks(set1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Save timestamp of a block with UTC and change it without UTC
|
// Save timestamp of a block with UTC and change it without UTC
|
||||||
@@ -138,7 +138,7 @@ func TestBatches(t *testing.T) {
|
|||||||
> batch // batchNum=4, L2 only batch, forges transfer (without USD value)
|
> batch // batchNum=4, L2 only batch, forges transfer (without USD value)
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
@@ -359,7 +359,7 @@ func TestTxs(t *testing.T) {
|
|||||||
> block
|
> block
|
||||||
|
|
||||||
`
|
`
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
@@ -615,7 +615,7 @@ func TestGetUnforgedL1UserTxs(t *testing.T) {
|
|||||||
|
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc := til.NewContext(128)
|
tc := til.NewContext(uint16(0), 128)
|
||||||
blocks, err := tc.GenerateBlocks(set)
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Sanity check
|
// Sanity check
|
||||||
@@ -712,7 +712,7 @@ func TestSetL1UserTxEffectiveAmounts(t *testing.T) {
|
|||||||
> block // blockNum=3
|
> block // blockNum=3
|
||||||
`
|
`
|
||||||
|
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
@@ -799,7 +799,7 @@ func TestUpdateExitTree(t *testing.T) {
|
|||||||
> block // blockNum=5 (empty block)
|
> block // blockNum=5 (empty block)
|
||||||
`
|
`
|
||||||
|
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
@@ -887,7 +887,7 @@ func TestGetBestBidCoordinator(t *testing.T) {
|
|||||||
err := historyDB.SetInitialSCVars(rollup, auction, wDelayer)
|
err := historyDB.SetInitialSCVars(rollup, auction, wDelayer)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(`
|
blocks, err := tc.GenerateBlocks(`
|
||||||
Type: Blockchain
|
Type: Blockchain
|
||||||
> block // blockNum=2
|
> block // blockNum=2
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func prepareHistoryDB(historyDB *historydb.HistoryDB) error {
|
|||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
|
|
||||||
tc = til.NewContext(common.RollupConstMaxL1UserTx)
|
tc = til.NewContext(uint16(0), common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
BootCoordAddr: ethCommon.HexToAddress("0xE39fEc6224708f0772D2A74fd3f9055A90E0A9f2"),
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
|
|||||||
@@ -76,6 +76,7 @@ type StateDB struct {
|
|||||||
db *pebble.PebbleStorage
|
db *pebble.PebbleStorage
|
||||||
mt *merkletree.MerkleTree
|
mt *merkletree.MerkleTree
|
||||||
typ TypeStateDB
|
typ TypeStateDB
|
||||||
|
chainID uint16
|
||||||
// idx holds the current Idx that the BatchBuilder is using
|
// idx holds the current Idx that the BatchBuilder is using
|
||||||
idx common.Idx
|
idx common.Idx
|
||||||
zki *common.ZKInputs
|
zki *common.ZKInputs
|
||||||
@@ -88,7 +89,7 @@ type StateDB struct {
|
|||||||
|
|
||||||
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
|
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
|
||||||
// storage
|
// storage
|
||||||
func NewStateDB(path string, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
func NewStateDB(path string, typ TypeStateDB, nLevels int, chainID uint16) (*StateDB, error) {
|
||||||
var sto *pebble.PebbleStorage
|
var sto *pebble.PebbleStorage
|
||||||
var err error
|
var err error
|
||||||
sto, err = pebble.NewPebbleStorage(path+PathCurrent, false)
|
sto, err = pebble.NewPebbleStorage(path+PathCurrent, false)
|
||||||
@@ -108,10 +109,11 @@ func NewStateDB(path string, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sdb := &StateDB{
|
sdb := &StateDB{
|
||||||
path: path,
|
path: path,
|
||||||
db: sto,
|
db: sto,
|
||||||
mt: mt,
|
mt: mt,
|
||||||
typ: typ,
|
typ: typ,
|
||||||
|
chainID: chainID,
|
||||||
}
|
}
|
||||||
|
|
||||||
// load currentBatch
|
// load currentBatch
|
||||||
@@ -521,7 +523,7 @@ type LocalStateDB struct {
|
|||||||
// NewLocalStateDB returns a new LocalStateDB connected to the given
|
// NewLocalStateDB returns a new LocalStateDB connected to the given
|
||||||
// synchronizerDB
|
// synchronizerDB
|
||||||
func NewLocalStateDB(path string, synchronizerDB *StateDB, typ TypeStateDB, nLevels int) (*LocalStateDB, error) {
|
func NewLocalStateDB(path string, synchronizerDB *StateDB, typ TypeStateDB, nLevels int) (*LocalStateDB, error) {
|
||||||
s, err := NewStateDB(path, typ, nLevels)
|
s, err := NewStateDB(path, typ, nLevels, synchronizerDB.chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeTxSelector, 0)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeTxSelector, 0, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// test values
|
// test values
|
||||||
@@ -67,7 +68,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
|
|
||||||
// call NewStateDB which should get the db at the last checkpoint state
|
// call NewStateDB which should get the db at the last checkpoint state
|
||||||
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
|
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
|
||||||
sdb, err = NewStateDB(dir, TypeTxSelector, 0)
|
sdb, err = NewStateDB(dir, TypeTxSelector, 0, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
v, err = sdb.db.Get(k0)
|
v, err = sdb.db.Get(k0)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
@@ -109,7 +110,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
|||||||
|
|
||||||
// call NewStateDB which should get the db at the last checkpoint state
|
// call NewStateDB which should get the db at the last checkpoint state
|
||||||
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
|
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
|
||||||
sdb, err = NewStateDB(dir, TypeTxSelector, 0)
|
sdb, err = NewStateDB(dir, TypeTxSelector, 0, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
v, err = sdb.db.Get(k0)
|
v, err = sdb.db.Get(k0)
|
||||||
@@ -127,7 +128,8 @@ func TestStateDBWithoutMT(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeTxSelector, 0)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeTxSelector, 0, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -181,7 +183,8 @@ func TestStateDBWithMT(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -233,7 +236,8 @@ func TestCheckpoints(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -350,7 +354,8 @@ func TestStateDBGetAccounts(t *testing.T) {
|
|||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeTxSelector, 0)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeTxSelector, 0, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// create test accounts
|
// create test accounts
|
||||||
@@ -397,7 +402,8 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32, chainID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
|
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ func (s *StateDB) ProcessTxs(ptc ProcessTxsConfig, coordIdxs []common.Idx, l1use
|
|||||||
exits := make([]processedExit, nTx)
|
exits := make([]processedExit, nTx)
|
||||||
|
|
||||||
if s.typ == TypeBatchBuilder {
|
if s.typ == TypeBatchBuilder {
|
||||||
s.zki = common.NewZKInputs(ptc.MaxTx, ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels, s.currentBatch.BigInt())
|
s.zki = common.NewZKInputs(s.chainID, ptc.MaxTx, ptc.MaxL1Tx, ptc.MaxTx, ptc.MaxFeeTx, ptc.NLevels, s.currentBatch.BigInt())
|
||||||
s.zki.OldLastIdx = s.idx.BigInt()
|
s.zki.OldLastIdx = s.idx.BigInt()
|
||||||
s.zki.OldStateRoot = s.mt.Root().BigInt()
|
s.zki.OldStateRoot = s.mt.Root().BigInt()
|
||||||
s.zki.Metadata.NewLastIdxRaw = s.idx
|
s.zki.Metadata.NewLastIdxRaw = s.idx
|
||||||
@@ -544,7 +544,7 @@ func (s *StateDB) ProcessL2Tx(coordIdxsMap map[common.TokenID]common.Idx, collec
|
|||||||
// ZKInputs
|
// ZKInputs
|
||||||
if s.zki != nil {
|
if s.zki != nil {
|
||||||
// Txs
|
// Txs
|
||||||
s.zki.TxCompressedData[s.i], err = tx.TxCompressedData()
|
s.zki.TxCompressedData[s.i], err = tx.TxCompressedData(s.chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, tracerr.Wrap(err)
|
return nil, nil, false, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,8 @@ func TestComputeEffectiveAmounts(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
set := `
|
set := `
|
||||||
@@ -42,7 +43,7 @@ func TestComputeEffectiveAmounts(t *testing.T) {
|
|||||||
> batchL1
|
> batchL1
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(set)
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -201,11 +202,12 @@ func TestProcessTxsBalances(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// generate test transactions from test.SetBlockchain0 code
|
// generate test transactions from test.SetBlockchain0 code
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -333,11 +335,12 @@ func TestProcessTxsSynchronizer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeSynchronizer, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// generate test transactions from test.SetBlockchain0 code
|
// generate test transactions from test.SetBlockchain0 code
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -461,11 +464,12 @@ func TestProcessTxsBatchBuilder(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// generate test transactions from test.SetBlockchain0 code
|
// generate test transactions from test.SetBlockchain0 code
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -549,7 +553,8 @@ func TestProcessTxsRootTestVectors(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -597,8 +602,8 @@ func TestCreateAccountDepositMaxValue(t *testing.T) {
|
|||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
users := generateJsUsers(t)
|
users := generateJsUsers(t)
|
||||||
|
|||||||
@@ -20,7 +20,8 @@ func TestGetIdx(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeTxSelector, 0)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeTxSelector, 0, chainID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
var sk babyjub.PrivateKey
|
var sk babyjub.PrivateKey
|
||||||
|
|||||||
@@ -65,8 +65,8 @@ func generateJsUsers(t *testing.T) []til.User {
|
|||||||
return users
|
return users
|
||||||
}
|
}
|
||||||
|
|
||||||
func signL2Tx(t *testing.T, user til.User, l2Tx common.PoolL2Tx) common.PoolL2Tx {
|
func signL2Tx(t *testing.T, chainID uint16, user til.User, l2Tx common.PoolL2Tx) common.PoolL2Tx {
|
||||||
toSign, err := l2Tx.HashToSign()
|
toSign, err := l2Tx.HashToSign(chainID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := user.BJJ.SignPoseidon(toSign)
|
sig := user.BJJ.SignPoseidon(toSign)
|
||||||
l2Tx.Signature = sig.Compress()
|
l2Tx.Signature = sig.Compress()
|
||||||
@@ -78,7 +78,8 @@ func TestZKInputsHashTestVector0(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -152,7 +153,8 @@ func TestZKInputsHashTestVector1(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, 32, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -251,7 +253,8 @@ func TestZKInputsEmpty(t *testing.T) {
|
|||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
chainID := uint16(0)
|
||||||
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
ptc := ProcessTxsConfig{
|
ptc := ProcessTxsConfig{
|
||||||
@@ -330,7 +333,7 @@ func TestZKInputsEmpty(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
toSign, err := l2Txs[0].HashToSign()
|
toSign, err := l2Txs[0].HashToSign(sdb.chainID)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
sig := users[0].BJJ.SignPoseidon(toSign)
|
sig := users[0].BJJ.SignPoseidon(toSign)
|
||||||
l2Txs[0].Signature = sig.Compress()
|
l2Txs[0].Signature = sig.Compress()
|
||||||
@@ -399,8 +402,8 @@ func TestZKInputs0(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -431,7 +434,7 @@ func TestZKInputs0(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
toSign, err := l2Txs[0].HashToSign()
|
toSign, err := l2Txs[0].HashToSign(sdb.chainID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := users[0].BJJ.SignPoseidon(toSign)
|
sig := users[0].BJJ.SignPoseidon(toSign)
|
||||||
l2Txs[0].Signature = sig.Compress()
|
l2Txs[0].Signature = sig.Compress()
|
||||||
@@ -487,8 +490,8 @@ func TestZKInputs1(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -530,7 +533,7 @@ func TestZKInputs1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
toSign, err := l2Txs[0].HashToSign()
|
toSign, err := l2Txs[0].HashToSign(sdb.chainID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := users[0].BJJ.SignPoseidon(toSign)
|
sig := users[0].BJJ.SignPoseidon(toSign)
|
||||||
l2Txs[0].Signature = sig.Compress()
|
l2Txs[0].Signature = sig.Compress()
|
||||||
@@ -594,8 +597,8 @@ func TestZKInputs2(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -668,8 +671,8 @@ func TestZKInputs2(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
l2Txs[0] = signL2Tx(t, users[0], l2Txs[0])
|
l2Txs[0] = signL2Tx(t, sdb.chainID, users[0], l2Txs[0])
|
||||||
l2Txs[1] = signL2Tx(t, users[0], l2Txs[1])
|
l2Txs[1] = signL2Tx(t, sdb.chainID, users[0], l2Txs[1])
|
||||||
|
|
||||||
ptc := ProcessTxsConfig{
|
ptc := ProcessTxsConfig{
|
||||||
NLevels: uint32(nLevels),
|
NLevels: uint32(nLevels),
|
||||||
@@ -738,8 +741,8 @@ func TestZKInputs3(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -812,8 +815,8 @@ func TestZKInputs3(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
l2Txs[0] = signL2Tx(t, users[0], l2Txs[0])
|
l2Txs[0] = signL2Tx(t, sdb.chainID, users[0], l2Txs[0])
|
||||||
l2Txs[1] = signL2Tx(t, users[0], l2Txs[1])
|
l2Txs[1] = signL2Tx(t, sdb.chainID, users[0], l2Txs[1])
|
||||||
|
|
||||||
ptc := ProcessTxsConfig{
|
ptc := ProcessTxsConfig{
|
||||||
NLevels: uint32(nLevels),
|
NLevels: uint32(nLevels),
|
||||||
@@ -882,8 +885,8 @@ func TestZKInputs4(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -966,8 +969,8 @@ func TestZKInputs4(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
l2Txs[0] = signL2Tx(t, users[0], l2Txs[0])
|
l2Txs[0] = signL2Tx(t, sdb.chainID, users[0], l2Txs[0])
|
||||||
l2Txs[1] = signL2Tx(t, users[0], l2Txs[1])
|
l2Txs[1] = signL2Tx(t, sdb.chainID, users[0], l2Txs[1])
|
||||||
|
|
||||||
ptc := ProcessTxsConfig{
|
ptc := ProcessTxsConfig{
|
||||||
NLevels: uint32(nLevels),
|
NLevels: uint32(nLevels),
|
||||||
@@ -1036,8 +1039,8 @@ func TestZKInputs5(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// same values than in the js test
|
// same values than in the js test
|
||||||
@@ -1098,8 +1101,8 @@ func TestZKInputs5(t *testing.T) {
|
|||||||
Type: common.TxTypeExit,
|
Type: common.TxTypeExit,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
l2Txs[0] = signL2Tx(t, users[0], l2Txs[0])
|
l2Txs[0] = signL2Tx(t, sdb.chainID, users[0], l2Txs[0])
|
||||||
l2Txs[1] = signL2Tx(t, users[0], l2Txs[1])
|
l2Txs[1] = signL2Tx(t, sdb.chainID, users[0], l2Txs[1])
|
||||||
|
|
||||||
ptc := ProcessTxsConfig{
|
ptc := ProcessTxsConfig{
|
||||||
NLevels: uint32(nLevels),
|
NLevels: uint32(nLevels),
|
||||||
@@ -1160,8 +1163,8 @@ func TestZKInputs6(t *testing.T) {
|
|||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
nLevels := 16
|
nLevels := 16
|
||||||
|
chainID := uint16(0)
|
||||||
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels)
|
sdb, err := NewStateDB(dir, TypeBatchBuilder, nLevels, chainID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// Coordinator Idx where to send the fees
|
// Coordinator Idx where to send the fees
|
||||||
@@ -1174,7 +1177,7 @@ func TestZKInputs6(t *testing.T) {
|
|||||||
MaxFeeTx: 4,
|
MaxFeeTx: 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -265,6 +265,7 @@ type AuctionInterface interface {
|
|||||||
// AuctionClient is the implementation of the interface to the Auction Smart Contract in ethereum.
|
// AuctionClient is the implementation of the interface to the Auction Smart Contract in ethereum.
|
||||||
type AuctionClient struct {
|
type AuctionClient struct {
|
||||||
client *EthereumClient
|
client *EthereumClient
|
||||||
|
chainID *big.Int
|
||||||
address ethCommon.Address
|
address ethCommon.Address
|
||||||
tokenHEZCfg TokenConfig
|
tokenHEZCfg TokenConfig
|
||||||
auction *HermezAuctionProtocol.HermezAuctionProtocol
|
auction *HermezAuctionProtocol.HermezAuctionProtocol
|
||||||
@@ -287,8 +288,13 @@ func NewAuctionClient(client *EthereumClient, address ethCommon.Address, tokenHE
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
chainID, err := client.EthChainID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
return &AuctionClient{
|
return &AuctionClient{
|
||||||
client: client,
|
client: client,
|
||||||
|
chainID: chainID,
|
||||||
address: address,
|
address: address,
|
||||||
tokenHEZCfg: tokenHEZCfg,
|
tokenHEZCfg: tokenHEZCfg,
|
||||||
auction: auction,
|
auction: auction,
|
||||||
@@ -580,8 +586,7 @@ func (c *AuctionClient) AuctionBid(amount *big.Int, slot int64, bidAmount *big.I
|
|||||||
}
|
}
|
||||||
tokenName := c.tokenHEZCfg.Name
|
tokenName := c.tokenHEZCfg.Name
|
||||||
tokenAddr := c.tokenHEZCfg.Address
|
tokenAddr := c.tokenHEZCfg.Address
|
||||||
chainid, _ := c.client.Client().ChainID(context.Background())
|
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
|
||||||
digest, _ := createPermitDigest(tokenAddr, owner, spender, chainid, amount, nonce, deadline, tokenName)
|
|
||||||
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
|
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
|
||||||
permit := createPermit(owner, spender, amount, deadline, digest, signature)
|
permit := createPermit(owner, spender, amount, deadline, digest, signature)
|
||||||
_slot := big.NewInt(slot)
|
_slot := big.NewInt(slot)
|
||||||
@@ -607,9 +612,8 @@ func (c *AuctionClient) AuctionMultiBid(amount *big.Int, startingSlot, endingSlo
|
|||||||
}
|
}
|
||||||
tokenName := c.tokenHEZCfg.Name
|
tokenName := c.tokenHEZCfg.Name
|
||||||
tokenAddr := c.tokenHEZCfg.Address
|
tokenAddr := c.tokenHEZCfg.Address
|
||||||
chainid, _ := c.client.Client().ChainID(context.Background())
|
|
||||||
|
|
||||||
digest, _ := createPermitDigest(tokenAddr, owner, spender, chainid, amount, nonce, deadline, tokenName)
|
digest, _ := createPermitDigest(tokenAddr, owner, spender, c.chainID, amount, nonce, deadline, tokenName)
|
||||||
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
|
signature, _ := c.client.ks.SignHash(*c.client.account, digest)
|
||||||
permit := createPermit(owner, spender, amount, deadline, digest, signature)
|
permit := createPermit(owner, spender, amount, deadline, digest, signature)
|
||||||
_startingSlot := big.NewInt(startingSlot)
|
_startingSlot := big.NewInt(startingSlot)
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ type EthereumInterface interface {
|
|||||||
EthTransactionReceipt(context.Context, ethCommon.Hash) (*types.Receipt, error)
|
EthTransactionReceipt(context.Context, ethCommon.Hash) (*types.Receipt, error)
|
||||||
|
|
||||||
EthERC20Consts(ethCommon.Address) (*ERC20Consts, error)
|
EthERC20Consts(ethCommon.Address) (*ERC20Consts, error)
|
||||||
|
EthChainID() (*big.Int, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -101,6 +102,15 @@ func NewEthereumClient(client *ethclient.Client, account *accounts.Account, ks *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EthChainID returns the ChainID of the ethereum network
|
||||||
|
func (c *EthereumClient) EthChainID() (*big.Int, error) {
|
||||||
|
chainID, err := c.client.ChainID(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
return chainID, nil
|
||||||
|
}
|
||||||
|
|
||||||
// BalanceAt retieves information about the default account
|
// BalanceAt retieves information about the default account
|
||||||
func (c *EthereumClient) BalanceAt(addr ethCommon.Address) (*big.Int, error) {
|
func (c *EthereumClient) BalanceAt(addr ethCommon.Address) (*big.Int, error) {
|
||||||
return c.client.BalanceAt(context.TODO(), addr, nil)
|
return c.client.BalanceAt(context.TODO(), addr, nil)
|
||||||
|
|||||||
@@ -283,6 +283,7 @@ type RollupClient struct {
|
|||||||
tokenHEZ *HEZ.HEZ
|
tokenHEZ *HEZ.HEZ
|
||||||
contractAbi abi.ABI
|
contractAbi abi.ABI
|
||||||
opts *bind.CallOpts
|
opts *bind.CallOpts
|
||||||
|
consts *common.RollupConstants
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRollupClient creates a new RollupClient
|
// NewRollupClient creates a new RollupClient
|
||||||
@@ -299,11 +300,11 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
chainID, err := client.client.ChainID(context.Background())
|
chainID, err := client.EthChainID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
return &RollupClient{
|
c := &RollupClient{
|
||||||
client: client,
|
client: client,
|
||||||
chainID: chainID,
|
chainID: chainID,
|
||||||
address: address,
|
address: address,
|
||||||
@@ -312,7 +313,13 @@ func NewRollupClient(client *EthereumClient, address ethCommon.Address, tokenHEZ
|
|||||||
tokenHEZ: tokenHEZ,
|
tokenHEZ: tokenHEZ,
|
||||||
contractAbi: contractAbi,
|
contractAbi: contractAbi,
|
||||||
opts: newCallOpts(),
|
opts: newCallOpts(),
|
||||||
}, nil
|
}
|
||||||
|
consts, err := c.RollupConstants()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
c.consts = consts
|
||||||
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RollupForgeBatch is the interface to call the smart contract function
|
// RollupForgeBatch is the interface to call the smart contract function
|
||||||
@@ -320,11 +327,7 @@ func (c *RollupClient) RollupForgeBatch(args *RollupForgeBatchArgs) (tx *types.T
|
|||||||
if tx, err = c.client.CallAuth(
|
if tx, err = c.client.CallAuth(
|
||||||
1000000, //nolint:gomnd
|
1000000, //nolint:gomnd
|
||||||
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
|
func(ec *ethclient.Client, auth *bind.TransactOpts) (*types.Transaction, error) {
|
||||||
rollupConst, err := c.RollupConstants()
|
nLevels := c.consts.Verifiers[args.VerifierIdx].NLevels
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
nLevels := rollupConst.Verifiers[args.VerifierIdx].NLevels
|
|
||||||
lenBytes := nLevels / 8 //nolint:gomnd
|
lenBytes := nLevels / 8 //nolint:gomnd
|
||||||
newLastIdx := big.NewInt(int64(args.NewLastIdx))
|
newLastIdx := big.NewInt(int64(args.NewLastIdx))
|
||||||
// L1CoordinatorBytes
|
// L1CoordinatorBytes
|
||||||
@@ -915,11 +918,7 @@ func (c *RollupClient) RollupForgeBatchArgs(ethTxHash ethCommon.Hash, l1UserTxsL
|
|||||||
L2TxsData: []common.L2Tx{},
|
L2TxsData: []common.L2Tx{},
|
||||||
FeeIdxCoordinator: []common.Idx{},
|
FeeIdxCoordinator: []common.Idx{},
|
||||||
}
|
}
|
||||||
rollupConsts, err := c.RollupConstants()
|
nLevels := c.consts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
|
||||||
if err != nil {
|
|
||||||
return nil, nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
nLevels := rollupConsts.Verifiers[rollupForgeBatchArgs.VerifierIdx].NLevels
|
|
||||||
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
|
lenL1L2TxsBytes := int((nLevels/8)*2 + 2 + 1)
|
||||||
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
|
numBytesL1TxUser := int(l1UserTxsLen) * lenL1L2TxsBytes
|
||||||
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
|
numTxsL1Coord := len(aux.EncodedL1CoordinatorTx) / common.L1CoordinatorTxBytesLen
|
||||||
|
|||||||
27
node/node.go
27
node/node.go
@@ -83,11 +83,6 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
|
|
||||||
historyDB := historydb.NewHistoryDB(db)
|
historyDB := historydb.NewHistoryDB(db)
|
||||||
|
|
||||||
stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, statedb.TypeSynchronizer, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, tracerr.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ethClient, err := ethclient.Dial(cfg.Web3.URL)
|
ethClient, err := ethclient.Dial(cfg.Web3.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -122,6 +117,25 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
chainID, err := client.EthChainID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
if !chainID.IsUint64() {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("chainID cannot be represented as uint64"))
|
||||||
|
}
|
||||||
|
chainIDU64 := chainID.Uint64()
|
||||||
|
const maxUint16 uint64 = 0xffff
|
||||||
|
if chainIDU64 > maxUint16 {
|
||||||
|
return nil, tracerr.Wrap(fmt.Errorf("chainID overflows uint16"))
|
||||||
|
}
|
||||||
|
chainIDU16 := uint16(chainIDU64)
|
||||||
|
|
||||||
|
stateDB, err := statedb.NewStateDB(cfg.StateDB.Path, statedb.TypeSynchronizer, 32, chainIDU16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, tracerr.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB, synchronizer.Config{
|
sync, err := synchronizer.NewSynchronizer(client, historyDB, stateDB, synchronizer.Config{
|
||||||
StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration,
|
StatsRefreshPeriod: cfg.Synchronizer.StatsRefreshPeriod.Duration,
|
||||||
})
|
})
|
||||||
@@ -232,6 +246,7 @@ func NewNode(mode Mode, cfg *config.Node) (*Node, error) {
|
|||||||
AuctionConstants: scConsts.Auction,
|
AuctionConstants: scConsts.Auction,
|
||||||
WDelayerConstants: scConsts.WDelayer,
|
WDelayerConstants: scConsts.WDelayer,
|
||||||
},
|
},
|
||||||
|
chainIDU16,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
@@ -286,6 +301,7 @@ func NewNodeAPI(
|
|||||||
sdb *statedb.StateDB,
|
sdb *statedb.StateDB,
|
||||||
l2db *l2db.L2DB,
|
l2db *l2db.L2DB,
|
||||||
config *api.Config,
|
config *api.Config,
|
||||||
|
chainID uint16,
|
||||||
) (*NodeAPI, error) {
|
) (*NodeAPI, error) {
|
||||||
engine := gin.Default()
|
engine := gin.Default()
|
||||||
engine.NoRoute(handleNoRoute)
|
engine.NoRoute(handleNoRoute)
|
||||||
@@ -297,6 +313,7 @@ func NewNodeAPI(
|
|||||||
sdb,
|
sdb,
|
||||||
l2db,
|
l2db,
|
||||||
config,
|
config,
|
||||||
|
chainID,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(err)
|
return nil, tracerr.Wrap(err)
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func testAPIStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCalculateProof(t *testing.T) {
|
func testCalculateProof(t *testing.T) {
|
||||||
zkInputs := common.NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
|
zkInputs := common.NewZKInputs(0, 100, 16, 512, 24, 32, big.NewInt(1))
|
||||||
err := proofServerClient.CalculateProof(context.Background(), zkInputs)
|
err := proofServerClient.CalculateProof(context.Background(), zkInputs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -71,7 +71,7 @@ func testGetProof(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCancel(t *testing.T) {
|
func testCancel(t *testing.T) {
|
||||||
zkInputs := common.NewZKInputs(100, 16, 512, 24, 32, big.NewInt(1))
|
zkInputs := common.NewZKInputs(0, 100, 16, 512, 24, 32, big.NewInt(1))
|
||||||
err := proofServerClient.CalculateProof(context.Background(), zkInputs)
|
err := proofServerClient.CalculateProof(context.Background(), zkInputs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// TODO: remove sleep when the server has been reviewed
|
// TODO: remove sleep when the server has been reviewed
|
||||||
|
|||||||
@@ -282,7 +282,8 @@ func TestSync(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.Nil(t, os.RemoveAll(dir))
|
defer assert.Nil(t, os.RemoveAll(dir))
|
||||||
|
|
||||||
stateDB, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
stateDB, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32, chainID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Init History DB
|
// Init History DB
|
||||||
@@ -379,7 +380,7 @@ func TestSync(t *testing.T) {
|
|||||||
> batchL1 // forge L1UserTxs{3}, freeze defined L1UserTxs{nil}
|
> batchL1 // forge L1UserTxs{3}, freeze defined L1UserTxs{nil}
|
||||||
> block // blockNum=3
|
> block // blockNum=3
|
||||||
`
|
`
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra := til.ConfigExtra{
|
tilCfgExtra := til.ConfigExtra{
|
||||||
BootCoordAddr: bootCoordAddr,
|
BootCoordAddr: bootCoordAddr,
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
@@ -567,7 +568,7 @@ func TestSync(t *testing.T) {
|
|||||||
> block // blockNum=5
|
> block // blockNum=5
|
||||||
> block // blockNum=6
|
> block // blockNum=6
|
||||||
`
|
`
|
||||||
tc = til.NewContext(common.RollupConstMaxL1UserTx)
|
tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
tilCfgExtra = til.ConfigExtra{
|
tilCfgExtra = til.ConfigExtra{
|
||||||
BootCoordAddr: bootCoordAddr,
|
BootCoordAddr: bootCoordAddr,
|
||||||
CoordUser: "A",
|
CoordUser: "A",
|
||||||
|
|||||||
@@ -44,7 +44,8 @@ func TestDebugAPI(t *testing.T) {
|
|||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
sdb, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32)
|
chainID := uint16(0)
|
||||||
|
sdb, err := statedb.NewStateDB(dir, statedb.TypeSynchronizer, 32, chainID)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
|
err = sdb.MakeCheckpoint() // Make a checkpoint to increment the batchNum
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|||||||
@@ -272,6 +272,7 @@ type ClientSetup struct {
|
|||||||
WDelayerConstants *common.WDelayerConstants
|
WDelayerConstants *common.WDelayerConstants
|
||||||
WDelayerVariables *common.WDelayerVariables
|
WDelayerVariables *common.WDelayerVariables
|
||||||
VerifyProof bool
|
VerifyProof bool
|
||||||
|
ChainID *big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientSetupExample returns a ClientSetup example with hardcoded realistic
|
// NewClientSetupExample returns a ClientSetup example with hardcoded realistic
|
||||||
@@ -357,6 +358,8 @@ func NewClientSetupExample() *ClientSetup {
|
|||||||
AuctionVariables: auctionVariables,
|
AuctionVariables: auctionVariables,
|
||||||
WDelayerConstants: wDelayerConstants,
|
WDelayerConstants: wDelayerConstants,
|
||||||
WDelayerVariables: wDelayerVariables,
|
WDelayerVariables: wDelayerVariables,
|
||||||
|
VerifyProof: false,
|
||||||
|
ChainID: big.NewInt(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -383,6 +386,7 @@ type Client struct {
|
|||||||
rw *sync.RWMutex
|
rw *sync.RWMutex
|
||||||
log bool
|
log bool
|
||||||
addr *ethCommon.Address
|
addr *ethCommon.Address
|
||||||
|
chainID *big.Int
|
||||||
rollupConstants *common.RollupConstants
|
rollupConstants *common.RollupConstants
|
||||||
auctionConstants *common.AuctionConstants
|
auctionConstants *common.AuctionConstants
|
||||||
wDelayerConstants *common.WDelayerConstants
|
wDelayerConstants *common.WDelayerConstants
|
||||||
@@ -602,6 +606,11 @@ func (c *Client) CtlLastForgedBatch() int64 {
|
|||||||
return int64(len(e.State.ExitRoots)) - 1
|
return int64(len(e.State.ExitRoots)) - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EthChainID returns the ChainID of the ethereum network
|
||||||
|
func (c *Client) EthChainID() (*big.Int, error) {
|
||||||
|
return c.chainID, nil
|
||||||
|
}
|
||||||
|
|
||||||
// EthLastBlock returns the last blockNum
|
// EthLastBlock returns the last blockNum
|
||||||
func (c *Client) EthLastBlock() (int64, error) {
|
func (c *Client) EthLastBlock() (int64, error) {
|
||||||
c.rw.RLock()
|
c.rw.RLock()
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func TestCompileSetsBase(t *testing.T) {
|
|||||||
_, err = parser.parse()
|
_, err = parser.parse()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(SetBlockchain0)
|
_, err = tc.GenerateBlocks(SetBlockchain0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = tc.GeneratePoolL2Txs(SetPool0)
|
_, err = tc.GeneratePoolL2Txs(SetPool0)
|
||||||
@@ -25,7 +25,7 @@ func TestCompileSetsBase(t *testing.T) {
|
|||||||
|
|
||||||
func TestCompileSetsMinimumFlow(t *testing.T) {
|
func TestCompileSetsMinimumFlow(t *testing.T) {
|
||||||
// minimum flow
|
// minimum flow
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err := tc.GenerateBlocks(SetBlockchainMinimumFlow0)
|
_, err := tc.GenerateBlocks(SetBlockchainMinimumFlow0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = tc.GeneratePoolL2Txs(SetPoolL2MinimumFlow0)
|
_, err = tc.GeneratePoolL2Txs(SetPoolL2MinimumFlow0)
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ type Context struct {
|
|||||||
// queued in a batch
|
// queued in a batch
|
||||||
rollupConstMaxL1UserTx int
|
rollupConstMaxL1UserTx int
|
||||||
|
|
||||||
|
chainID uint16
|
||||||
idx int
|
idx int
|
||||||
currBlock common.BlockData
|
currBlock common.BlockData
|
||||||
currBatch common.BatchData
|
currBatch common.BatchData
|
||||||
@@ -78,7 +79,7 @@ type Context struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewContext returns a new Context
|
// NewContext returns a new Context
|
||||||
func NewContext(rollupConstMaxL1UserTx int) *Context {
|
func NewContext(chainID uint16, rollupConstMaxL1UserTx int) *Context {
|
||||||
currBatchNum := 1 // The protocol defines the first batchNum to be 1
|
currBatchNum := 1 // The protocol defines the first batchNum to be 1
|
||||||
return &Context{
|
return &Context{
|
||||||
Users: make(map[string]*User),
|
Users: make(map[string]*User),
|
||||||
@@ -88,6 +89,7 @@ func NewContext(rollupConstMaxL1UserTx int) *Context {
|
|||||||
LastRegisteredTokenID: 0,
|
LastRegisteredTokenID: 0,
|
||||||
|
|
||||||
rollupConstMaxL1UserTx: rollupConstMaxL1UserTx,
|
rollupConstMaxL1UserTx: rollupConstMaxL1UserTx,
|
||||||
|
chainID: chainID,
|
||||||
idx: common.UserThreshold,
|
idx: common.UserThreshold,
|
||||||
// We use some placeholder values for StateRoot and ExitTree
|
// We use some placeholder values for StateRoot and ExitTree
|
||||||
// because these values will never be nil
|
// because these values will never be nil
|
||||||
@@ -630,7 +632,7 @@ func (tc *Context) generatePoolL2Txs() ([]common.PoolL2Tx, error) {
|
|||||||
}
|
}
|
||||||
tx = *nTx
|
tx = *nTx
|
||||||
// perform signature and set it to tx.Signature
|
// perform signature and set it to tx.Signature
|
||||||
toSign, err := tx.HashToSign()
|
toSign, err := tx.HashToSign(tc.chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Line %d: %s", inst.LineNum, err.Error()))
|
return nil, tracerr.Wrap(fmt.Errorf("Line %d: %s", inst.LineNum, err.Error()))
|
||||||
}
|
}
|
||||||
@@ -656,7 +658,7 @@ func (tc *Context) generatePoolL2Txs() ([]common.PoolL2Tx, error) {
|
|||||||
}
|
}
|
||||||
tx = *nTx
|
tx = *nTx
|
||||||
// perform signature and set it to tx.Signature
|
// perform signature and set it to tx.Signature
|
||||||
toSign, err := tx.HashToSign()
|
toSign, err := tx.HashToSign(tc.chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, tracerr.Wrap(fmt.Errorf("Line %d: %s", inst.LineNum, err.Error()))
|
return nil, tracerr.Wrap(fmt.Errorf("Line %d: %s", inst.LineNum, err.Error()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func TestGenerateBlocksNoBatches(t *testing.T) {
|
|||||||
|
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(set)
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 1, len(blocks))
|
assert.Equal(t, 1, len(blocks))
|
||||||
@@ -87,7 +87,7 @@ func TestGenerateBlocks(t *testing.T) {
|
|||||||
// batch and last block
|
// batch and last block
|
||||||
Transfer(1) User1-User0: 1 (1)
|
Transfer(1) User1-User0: 1 (1)
|
||||||
`
|
`
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blocks, err := tc.GenerateBlocks(set)
|
blocks, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 2, len(blocks))
|
assert.Equal(t, 2, len(blocks))
|
||||||
@@ -191,7 +191,7 @@ func TestGeneratePoolL2Txs(t *testing.T) {
|
|||||||
> batchL1
|
> batchL1
|
||||||
> batchL1
|
> batchL1
|
||||||
`
|
`
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err := tc.GenerateBlocks(set)
|
_, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
set = `
|
set = `
|
||||||
@@ -251,7 +251,7 @@ func TestGeneratePoolL2Txs(t *testing.T) {
|
|||||||
> batchL1
|
> batchL1
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
set = `
|
set = `
|
||||||
@@ -282,7 +282,7 @@ func TestGeneratePoolL2TxsFromInstructions(t *testing.T) {
|
|||||||
> batchL1
|
> batchL1
|
||||||
> batchL1
|
> batchL1
|
||||||
`
|
`
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err := tc.GenerateBlocks(set)
|
_, err := tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -314,7 +314,7 @@ func TestGeneratePoolL2TxsFromInstructions(t *testing.T) {
|
|||||||
txsFromInstructions, err := tc.GeneratePoolL2TxsFromInstructions(instructionSet)
|
txsFromInstructions, err := tc.GeneratePoolL2TxsFromInstructions(instructionSet)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Generate Pool txs using string
|
// Generate Pool txs using string
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
stringSet := `
|
stringSet := `
|
||||||
@@ -338,7 +338,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
CreateAccountDeposit(1) A: 5
|
CreateAccountDeposit(1) A: 5
|
||||||
> batchL1
|
> batchL1
|
||||||
`
|
`
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err := tc.GenerateBlocks(set)
|
_, err := tc.GenerateBlocks(set)
|
||||||
assert.Equal(t, "Line 2: Can not process CreateAccountDeposit: TokenID 1 not registered, last registered TokenID: 0", err.Error())
|
assert.Equal(t, "Line 2: Can not process CreateAccountDeposit: TokenID 1 not registered, last registered TokenID: 0", err.Error())
|
||||||
|
|
||||||
@@ -347,7 +347,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
Type: Blockchain
|
Type: Blockchain
|
||||||
AddToken(0)
|
AddToken(0)
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.Equal(t, "Line 2: AddToken can not register TokenID 0", err.Error())
|
require.Equal(t, "Line 2: AddToken can not register TokenID 0", err.Error())
|
||||||
|
|
||||||
@@ -355,7 +355,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
Type: Blockchain
|
Type: Blockchain
|
||||||
AddToken(2)
|
AddToken(2)
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.Equal(t, "Line 2: AddToken TokenID should be sequential, expected TokenID: 1, defined TokenID: 2", err.Error())
|
require.Equal(t, "Line 2: AddToken TokenID should be sequential, expected TokenID: 1, defined TokenID: 2", err.Error())
|
||||||
|
|
||||||
@@ -366,7 +366,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
AddToken(3)
|
AddToken(3)
|
||||||
AddToken(5)
|
AddToken(5)
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.Equal(t, "Line 5: AddToken TokenID should be sequential, expected TokenID: 4, defined TokenID: 5", err.Error())
|
require.Equal(t, "Line 5: AddToken TokenID should be sequential, expected TokenID: 4, defined TokenID: 5", err.Error())
|
||||||
|
|
||||||
@@ -380,7 +380,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
Transfer(1) A-B: 6 (1)
|
Transfer(1) A-B: 6 (1)
|
||||||
> batch
|
> batch
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.Equal(t, "Line 5: CreateAccountDeposit(1)BTransfer(1) A-B: 6 (1)\n, err: Expected ':', found 'Transfer'", err.Error())
|
require.Equal(t, "Line 5: CreateAccountDeposit(1)BTransfer(1) A-B: 6 (1)\n, err: Expected ':', found 'Transfer'", err.Error())
|
||||||
set = `
|
set = `
|
||||||
@@ -394,7 +394,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
Transfer(1) A-B: 6 (1)
|
Transfer(1) A-B: 6 (1)
|
||||||
> batch
|
> batch
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -412,7 +412,7 @@ func TestGenerateErrors(t *testing.T) {
|
|||||||
Exit(1) A: 3 (1)
|
Exit(1) A: 3 (1)
|
||||||
> batch
|
> batch
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
_, err = tc.GenerateBlocks(set)
|
_, err = tc.GenerateBlocks(set)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, common.Nonce(3), tc.Users["A"].Accounts[common.TokenID(1)].Nonce)
|
assert.Equal(t, common.Nonce(3), tc.Users["A"].Accounts[common.TokenID(1)].Nonce)
|
||||||
@@ -518,7 +518,7 @@ func TestGenerateFromInstructions(t *testing.T) {
|
|||||||
Typ: TypeNewBlock,
|
Typ: TypeNewBlock,
|
||||||
})
|
})
|
||||||
|
|
||||||
tc := NewContext(common.RollupConstMaxL1UserTx)
|
tc := NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blockFromInstructions, err := tc.GenerateBlocksFromInstructions(setInst)
|
blockFromInstructions, err := tc.GenerateBlocksFromInstructions(setInst)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -537,7 +537,7 @@ func TestGenerateFromInstructions(t *testing.T) {
|
|||||||
> batch
|
> batch
|
||||||
> block
|
> block
|
||||||
`
|
`
|
||||||
tc = NewContext(common.RollupConstMaxL1UserTx)
|
tc = NewContext(0, common.RollupConstMaxL1UserTx)
|
||||||
blockFromString, err := tc.GenerateBlocks(setString)
|
blockFromString, err := tc.GenerateBlocks(setString)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func initTest(t *testing.T, testSet string) *TxSelector {
|
func initTest(t *testing.T, chainID uint16, testSet string) *TxSelector {
|
||||||
pass := os.Getenv("POSTGRES_PASS")
|
pass := os.Getenv("POSTGRES_PASS")
|
||||||
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
db, err := dbUtils.InitSQLDB(5432, "localhost", "hermez", pass, "hermez")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -29,7 +29,7 @@ func initTest(t *testing.T, testSet string) *TxSelector {
|
|||||||
dir, err := ioutil.TempDir("", "tmpdb")
|
dir, err := ioutil.TempDir("", "tmpdb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer assert.NoError(t, os.RemoveAll(dir))
|
defer assert.NoError(t, os.RemoveAll(dir))
|
||||||
sdb, err := statedb.NewStateDB(dir, statedb.TypeTxSelector, 0)
|
sdb, err := statedb.NewStateDB(dir, statedb.TypeTxSelector, 0, chainID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
txselDir, err := ioutil.TempDir("", "tmpTxSelDB")
|
||||||
@@ -64,7 +64,8 @@ func addTokens(t *testing.T, tokens []common.Token, db *sqlx.DB) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCoordIdxsDB(t *testing.T) {
|
func TestCoordIdxsDB(t *testing.T) {
|
||||||
txsel := initTest(t, til.SetPool0)
|
chainID := uint16(0)
|
||||||
|
txsel := initTest(t, chainID, til.SetPool0)
|
||||||
test.WipeDB(txsel.l2db.DB())
|
test.WipeDB(txsel.l2db.DB())
|
||||||
|
|
||||||
coordIdxs := make(map[common.TokenID]common.Idx)
|
coordIdxs := make(map[common.TokenID]common.Idx)
|
||||||
@@ -81,10 +82,11 @@ func TestCoordIdxsDB(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetL2TxSelection(t *testing.T) {
|
func TestGetL2TxSelection(t *testing.T) {
|
||||||
txsel := initTest(t, til.SetPool0)
|
chainID := uint16(0)
|
||||||
|
txsel := initTest(t, chainID, til.SetPool0)
|
||||||
test.WipeDB(txsel.l2db.DB())
|
test.WipeDB(txsel.l2db.DB())
|
||||||
|
|
||||||
tc := til.NewContext(common.RollupConstMaxL1UserTx)
|
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||||
// generate test transactions
|
// generate test transactions
|
||||||
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|||||||
Reference in New Issue
Block a user