mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
Abstract TxProcessor from StateDB
- Abstract TxProcessor from StateDB - Upgrade to last version of go-merkletree for the key-value DB usage
This commit is contained in:
@@ -49,6 +49,8 @@ var (
|
||||
PrefixKeyAddr = []byte("a:")
|
||||
// PrefixKeyAddrBJJ is the key prefix for address-babyjubjub in the db
|
||||
PrefixKeyAddrBJJ = []byte("ab:")
|
||||
// keyidx is used as key in the db to store the current Idx
|
||||
keyidx = []byte("k:idx")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -74,28 +76,21 @@ type TypeStateDB string
|
||||
|
||||
// StateDB represents the StateDB object
|
||||
type StateDB struct {
|
||||
path string
|
||||
currentBatch common.BatchNum
|
||||
db *pebble.PebbleStorage
|
||||
mt *merkletree.MerkleTree
|
||||
typ TypeStateDB
|
||||
chainID uint16
|
||||
// idx holds the current Idx that the BatchBuilder is using
|
||||
idx common.Idx
|
||||
zki *common.ZKInputs
|
||||
// i is the current transaction index in the ZKInputs generation (zki)
|
||||
i int
|
||||
// AccumulatedFees contains the accumulated fees for each token (Coord
|
||||
// Idx) in the processed batch
|
||||
AccumulatedFees map[common.Idx]*big.Int
|
||||
keep int
|
||||
path string
|
||||
Typ TypeStateDB
|
||||
// CurrentIdx holds the current Idx that the BatchBuilder is using
|
||||
CurrentIdx common.Idx
|
||||
CurrentBatch common.BatchNum
|
||||
db *pebble.Storage
|
||||
MT *merkletree.MerkleTree
|
||||
keep int
|
||||
}
|
||||
|
||||
// NewStateDB creates a new StateDB, allowing to use an in-memory or in-disk
|
||||
// storage. Checkpoints older than the value defined by `keep` will be
|
||||
// deleted.
|
||||
func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int, chainID uint16) (*StateDB, error) {
|
||||
var sto *pebble.PebbleStorage
|
||||
func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int) (*StateDB, error) {
|
||||
var sto *pebble.Storage
|
||||
var err error
|
||||
sto, err = pebble.NewPebbleStorage(path.Join(pathDB, PathCurrent), false)
|
||||
if err != nil {
|
||||
@@ -114,22 +109,21 @@ func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int, chainID u
|
||||
}
|
||||
|
||||
sdb := &StateDB{
|
||||
path: pathDB,
|
||||
db: sto,
|
||||
mt: mt,
|
||||
typ: typ,
|
||||
chainID: chainID,
|
||||
keep: keep,
|
||||
path: pathDB,
|
||||
db: sto,
|
||||
MT: mt,
|
||||
Typ: typ,
|
||||
keep: keep,
|
||||
}
|
||||
|
||||
// load currentBatch
|
||||
sdb.currentBatch, err = sdb.GetCurrentBatch()
|
||||
sdb.CurrentBatch, err = sdb.GetCurrentBatch()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// make reset (get checkpoint) at currentBatch
|
||||
err = sdb.reset(sdb.currentBatch, false)
|
||||
err = sdb.reset(sdb.CurrentBatch, false)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -137,8 +131,8 @@ func NewStateDB(pathDB string, keep int, typ TypeStateDB, nLevels int, chainID u
|
||||
return sdb, nil
|
||||
}
|
||||
|
||||
// DB returns the *pebble.PebbleStorage from the StateDB
|
||||
func (s *StateDB) DB() *pebble.PebbleStorage {
|
||||
// DB returns the *pebble.Storage from the StateDB
|
||||
func (s *StateDB) DB() *pebble.Storage {
|
||||
return s.db
|
||||
}
|
||||
|
||||
@@ -160,7 +154,7 @@ func (s *StateDB) setCurrentBatch() error {
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
err = tx.Put(KeyCurrentBatch, s.currentBatch.Bytes())
|
||||
err = tx.Put(KeyCurrentBatch, s.CurrentBatch.Bytes())
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -173,10 +167,10 @@ func (s *StateDB) setCurrentBatch() error {
|
||||
// MakeCheckpoint does a checkpoint at the given batchNum in the defined path. Internally this advances & stores the current BatchNum, and then stores a Checkpoint of the current state of the StateDB.
|
||||
func (s *StateDB) MakeCheckpoint() error {
|
||||
// advance currentBatch
|
||||
s.currentBatch++
|
||||
log.Debugw("Making StateDB checkpoint", "batch", s.currentBatch, "type", s.typ)
|
||||
s.CurrentBatch++
|
||||
log.Debugw("Making StateDB checkpoint", "batch", s.CurrentBatch, "type", s.Typ)
|
||||
|
||||
checkpointPath := path.Join(s.path, fmt.Sprintf("%s%d", PathBatchNum, s.currentBatch))
|
||||
checkpointPath := path.Join(s.path, fmt.Sprintf("%s%d", PathBatchNum, s.CurrentBatch))
|
||||
|
||||
if err := s.setCurrentBatch(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
@@ -322,7 +316,7 @@ func (s *StateDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// remove all checkpoints > batchNum
|
||||
for i := batchNum + 1; i <= s.currentBatch; i++ {
|
||||
for i := batchNum + 1; i <= s.CurrentBatch; i++ {
|
||||
if err := s.DeleteCheckpoint(i); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
@@ -334,16 +328,16 @@ func (s *StateDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.db = sto
|
||||
s.idx = 255
|
||||
s.currentBatch = batchNum
|
||||
s.CurrentIdx = 255
|
||||
s.CurrentBatch = batchNum
|
||||
|
||||
if s.mt != nil {
|
||||
if s.MT != nil {
|
||||
// open the MT for the current s.db
|
||||
mt, err := merkletree.NewMerkleTree(s.db.WithPrefix(PrefixKeyMT), s.mt.MaxLevels())
|
||||
mt, err := merkletree.NewMerkleTree(s.db.WithPrefix(PrefixKeyMT), s.MT.MaxLevels())
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.mt = mt
|
||||
s.MT = mt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -363,31 +357,66 @@ func (s *StateDB) reset(batchNum common.BatchNum, closeCurrent bool) error {
|
||||
s.db = sto
|
||||
|
||||
// get currentBatch num
|
||||
s.currentBatch, err = s.GetCurrentBatch()
|
||||
s.CurrentBatch, err = s.GetCurrentBatch()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// idx is obtained from the statedb reset
|
||||
s.idx, err = s.GetIdx()
|
||||
s.CurrentIdx, err = s.GetIdx()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
if s.mt != nil {
|
||||
if s.MT != nil {
|
||||
// open the MT for the current s.db
|
||||
mt, err := merkletree.NewMerkleTree(s.db.WithPrefix(PrefixKeyMT), s.mt.MaxLevels())
|
||||
mt, err := merkletree.NewMerkleTree(s.db.WithPrefix(PrefixKeyMT), s.MT.MaxLevels())
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
s.mt = mt
|
||||
s.MT = mt
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIdx returns the stored Idx from the localStateDB, which is the last Idx
|
||||
// used for an Account in the localStateDB.
|
||||
func (s *StateDB) GetIdx() (common.Idx, error) {
|
||||
idxBytes, err := s.DB().Get(keyidx)
|
||||
if tracerr.Unwrap(err) == db.ErrNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, tracerr.Wrap(err)
|
||||
}
|
||||
return common.IdxFromBytes(idxBytes[:])
|
||||
}
|
||||
|
||||
// SetIdx stores Idx in the localStateDB
|
||||
func (s *StateDB) SetIdx(idx common.Idx) error {
|
||||
s.CurrentIdx = idx
|
||||
|
||||
tx, err := s.DB().NewTx()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
idxBytes, err := idx.Bytes()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
err = tx.Put(keyidx, idxBytes[:])
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAccount returns the account for the given Idx
|
||||
func (s *StateDB) GetAccount(idx common.Idx) (*common.Account, error) {
|
||||
return getAccountInTreeDB(s.db, idx)
|
||||
return GetAccountInTreeDB(s.db, idx)
|
||||
}
|
||||
|
||||
// GetAccounts returns all the accounts in the db. Use for debugging pruposes
|
||||
@@ -419,9 +448,9 @@ func (s *StateDB) GetAccounts() ([]common.Account, error) {
|
||||
return accs, nil
|
||||
}
|
||||
|
||||
// getAccountInTreeDB is abstracted from StateDB to be used from StateDB and
|
||||
// GetAccountInTreeDB is abstracted from StateDB to be used from StateDB and
|
||||
// from ExitTree. GetAccount returns the account for the given Idx
|
||||
func getAccountInTreeDB(sto db.Storage, idx common.Idx) (*common.Account, error) {
|
||||
func GetAccountInTreeDB(sto db.Storage, idx common.Idx) (*common.Account, error) {
|
||||
idxBytes, err := idx.Bytes()
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
@@ -445,10 +474,10 @@ func getAccountInTreeDB(sto db.Storage, idx common.Idx) (*common.Account, error)
|
||||
}
|
||||
|
||||
// CreateAccount creates a new Account in the StateDB for the given Idx. If
|
||||
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
|
||||
// StateDB.MT==nil, MerkleTree is not affected, otherwise updates the
|
||||
// MerkleTree, returning a CircomProcessorProof.
|
||||
func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
cpp, err := createAccountInTreeDB(s.db, s.mt, idx, account)
|
||||
cpp, err := CreateAccountInTreeDB(s.db, s.MT, idx, account)
|
||||
if err != nil {
|
||||
return cpp, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -457,11 +486,11 @@ func (s *StateDB) CreateAccount(idx common.Idx, account *common.Account) (*merkl
|
||||
return cpp, tracerr.Wrap(err)
|
||||
}
|
||||
|
||||
// createAccountInTreeDB is abstracted from StateDB to be used from StateDB and
|
||||
// CreateAccountInTreeDB is abstracted from StateDB to be used from StateDB and
|
||||
// from ExitTree. Creates a new Account in the StateDB for the given Idx. If
|
||||
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
|
||||
// StateDB.MT==nil, MerkleTree is not affected, otherwise updates the
|
||||
// MerkleTree, returning a CircomProcessorProof.
|
||||
func createAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
func CreateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
// store at the DB the key: v, and value: leaf.Bytes()
|
||||
v, err := account.HashValue()
|
||||
if err != nil {
|
||||
@@ -511,14 +540,14 @@ func createAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common
|
||||
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
|
||||
// MerkleTree, returning a CircomProcessorProof.
|
||||
func (s *StateDB) UpdateAccount(idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
return updateAccountInTreeDB(s.db, s.mt, idx, account)
|
||||
return UpdateAccountInTreeDB(s.db, s.MT, idx, account)
|
||||
}
|
||||
|
||||
// updateAccountInTreeDB is abstracted from StateDB to be used from StateDB and
|
||||
// UpdateAccountInTreeDB is abstracted from StateDB to be used from StateDB and
|
||||
// from ExitTree. Updates the Account in the StateDB for the given Idx. If
|
||||
// StateDB.mt==nil, MerkleTree is not affected, otherwise updates the
|
||||
// MerkleTree, returning a CircomProcessorProof.
|
||||
func updateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
func UpdateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common.Idx, account *common.Account) (*merkletree.CircomProcessorProof, error) {
|
||||
// store at the DB the key: v, and value: account.Bytes()
|
||||
v, err := account.HashValue()
|
||||
if err != nil {
|
||||
@@ -559,20 +588,15 @@ func updateAccountInTreeDB(sto db.Storage, mt *merkletree.MerkleTree, idx common
|
||||
|
||||
// MTGetProof returns the CircomVerifierProof for a given Idx
|
||||
func (s *StateDB) MTGetProof(idx common.Idx) (*merkletree.CircomVerifierProof, error) {
|
||||
if s.mt == nil {
|
||||
if s.MT == nil {
|
||||
return nil, tracerr.Wrap(ErrStateDBWithoutMT)
|
||||
}
|
||||
return s.mt.GenerateCircomVerifierProof(idx.BigInt(), s.mt.Root())
|
||||
return s.MT.GenerateCircomVerifierProof(idx.BigInt(), s.MT.Root())
|
||||
}
|
||||
|
||||
// MTGetRoot returns the current root of the underlying Merkle Tree
|
||||
func (s *StateDB) MTGetRoot() *big.Int {
|
||||
return s.mt.Root().BigInt()
|
||||
}
|
||||
|
||||
// MerkleTree returns the underlying StateDB merkle tree. It can be nil.
|
||||
func (s *StateDB) MerkleTree() *merkletree.MerkleTree {
|
||||
return s.mt
|
||||
return s.MT.Root().BigInt()
|
||||
}
|
||||
|
||||
// LocalStateDB represents the local StateDB which allows to make copies from
|
||||
@@ -588,7 +612,7 @@ type LocalStateDB struct {
|
||||
// deleted.
|
||||
func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeStateDB,
|
||||
nLevels int) (*LocalStateDB, error) {
|
||||
s, err := NewStateDB(path, keep, typ, nLevels, synchronizerDB.chainID)
|
||||
s, err := NewStateDB(path, keep, typ, nLevels)
|
||||
if err != nil {
|
||||
return nil, tracerr.Wrap(err)
|
||||
}
|
||||
@@ -602,7 +626,7 @@ func NewLocalStateDB(path string, keep int, synchronizerDB *StateDB, typ TypeSta
|
||||
// gets the state from LocalStateDB.synchronizerStateDB for the given batchNum. If fromSynchronizer is false, get the state from LocalStateDB checkpoints.
|
||||
func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) error {
|
||||
if batchNum == 0 {
|
||||
l.idx = 0
|
||||
l.CurrentIdx = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -646,17 +670,17 @@ func (l *LocalStateDB) Reset(batchNum common.BatchNum, fromSynchronizer bool) er
|
||||
l.db = sto
|
||||
|
||||
// get currentBatch num
|
||||
l.currentBatch, err = l.GetCurrentBatch()
|
||||
l.CurrentBatch, err = l.GetCurrentBatch()
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
// open the MT for the current s.db
|
||||
if l.mt != nil {
|
||||
mt, err := merkletree.NewMerkleTree(l.db.WithPrefix(PrefixKeyMT), l.mt.MaxLevels())
|
||||
if l.MT != nil {
|
||||
mt, err := merkletree.NewMerkleTree(l.db.WithPrefix(PrefixKeyMT), l.MT.MaxLevels())
|
||||
if err != nil {
|
||||
return tracerr.Wrap(err)
|
||||
}
|
||||
l.mt = mt
|
||||
l.MT = mt
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -45,8 +45,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// test values
|
||||
@@ -68,7 +67,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
|
||||
// call NewStateDB which should get the db at the last checkpoint state
|
||||
// executing a Reset (discarding the last 'testkey0'&'testvalue0' data)
|
||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0, chainID)
|
||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
assert.NoError(t, err)
|
||||
v, err = sdb.db.Get(k0)
|
||||
assert.NotNil(t, err)
|
||||
@@ -110,7 +109,7 @@ func TestNewStateDBIntermediateState(t *testing.T) {
|
||||
|
||||
// call NewStateDB which should get the db at the last checkpoint state
|
||||
// executing a Reset (discarding the last 'testkey1'&'testvalue1' data)
|
||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0, chainID)
|
||||
sdb, err = NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
v, err = sdb.db.Get(k0)
|
||||
@@ -128,8 +127,7 @@ func TestStateDBWithoutMT(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -183,8 +181,7 @@ func TestStateDBWithMT(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -236,8 +233,7 @@ func TestCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -354,8 +350,7 @@ func TestStateDBGetAccounts(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create test accounts
|
||||
@@ -402,8 +397,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
require.NoError(t, err)
|
||||
|
||||
ay0 := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(253), nil), big.NewInt(1))
|
||||
@@ -467,7 +461,7 @@ func TestCheckAccountsTreeTestVectors(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// root value generated by js version:
|
||||
assert.Equal(t, "17298264051379321456969039521810887093935433569451713402227686942080129181291", sdb.mt.Root().BigInt().String())
|
||||
assert.Equal(t, "17298264051379321456969039521810887093935433569451713402227686942080129181291", sdb.MT.Root().BigInt().String())
|
||||
}
|
||||
|
||||
func TestListCheckpoints(t *testing.T) {
|
||||
@@ -475,8 +469,7 @@ func TestListCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32)
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 16
|
||||
@@ -506,9 +499,8 @@ func TestDeleteOldCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
keep := 16
|
||||
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32, chainID)
|
||||
sdb, err := NewStateDB(dir, keep, TypeSynchronizer, 32)
|
||||
require.NoError(t, err)
|
||||
|
||||
numCheckpoints := 32
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,665 +0,0 @@
|
||||
package statedb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/hermez-node/test/til"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func checkBalance(t *testing.T, tc *til.Context, sdb *StateDB, username string, tokenid int, expected string) {
|
||||
idx := tc.Users[username].Accounts[common.TokenID(tokenid)].Idx
|
||||
acc, err := sdb.GetAccount(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, acc.Balance.String())
|
||||
}
|
||||
|
||||
func TestComputeEffectiveAmounts(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
set := `
|
||||
Type: Blockchain
|
||||
AddToken(1)
|
||||
|
||||
CreateAccountDeposit(0) A: 10
|
||||
CreateAccountDeposit(0) B: 10
|
||||
CreateAccountDeposit(1) C: 10
|
||||
> batchL1
|
||||
> batchL1
|
||||
> block
|
||||
`
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(set)
|
||||
require.NoError(t, err)
|
||||
|
||||
ptc := ProcessTxsConfig{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 16,
|
||||
}
|
||||
_, err = sdb.ProcessTxs(ptc, nil, blocks[0].Rollup.L1UserTxs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
tx := common.L1Tx{
|
||||
FromIdx: 256,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(10),
|
||||
DepositAmount: big.NewInt(0),
|
||||
FromEthAddr: tc.Users["A"].Addr,
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(10), tx.EffectiveAmount)
|
||||
|
||||
// expect error due not enough funds
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 256,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(11),
|
||||
DepositAmount: big.NewInt(0),
|
||||
FromEthAddr: tc.Users["A"].Addr,
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// expect no-error as there are enough funds in a
|
||||
// CreateAccountDepositTransfer transction
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 0,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(10),
|
||||
DepositAmount: big.NewInt(10),
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(10), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(10), tx.EffectiveAmount)
|
||||
|
||||
// expect error due not enough funds in a CreateAccountDepositTransfer
|
||||
// transction
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 0,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(11),
|
||||
DepositAmount: big.NewInt(10),
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(10), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// expect error due not same TokenID
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 256,
|
||||
ToIdx: 258,
|
||||
Amount: big.NewInt(5),
|
||||
DepositAmount: big.NewInt(0),
|
||||
FromEthAddr: tc.Users["A"].Addr,
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// expect error due not same EthAddr
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 256,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(8),
|
||||
DepositAmount: big.NewInt(0),
|
||||
FromEthAddr: tc.Users["B"].Addr,
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// expect on TxTypeDepositTransfer EffectiveAmount=0, but
|
||||
// EffectiveDepositAmount!=0, due not enough funds to make the transfer
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 256,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(20),
|
||||
DepositAmount: big.NewInt(8),
|
||||
FromEthAddr: tc.Users["A"].Addr,
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(8), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// expect on TxTypeDepositTransfer EffectiveAmount=0, but
|
||||
// EffectiveDepositAmount!=0, due different EthAddr from FromIdx
|
||||
// address
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 256,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(8),
|
||||
DepositAmount: big.NewInt(8),
|
||||
FromEthAddr: tc.Users["B"].Addr,
|
||||
UserOrigin: true,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(8), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// CreateAccountDepositTransfer for TokenID=1 when receiver does not
|
||||
// have an account for that TokenID, expect that the
|
||||
// EffectiveDepositAmount=DepositAmount, but EffectiveAmount==0
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 0,
|
||||
ToIdx: 257,
|
||||
Amount: big.NewInt(8),
|
||||
DepositAmount: big.NewInt(8),
|
||||
FromEthAddr: tc.Users["A"].Addr,
|
||||
TokenID: 2,
|
||||
UserOrigin: true,
|
||||
Type: common.TxTypeCreateAccountDepositTransfer,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(8), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
|
||||
// DepositTransfer for TokenID=1 when receiver does not have an account
|
||||
// for that TokenID, expect that the
|
||||
// EffectiveDepositAmount=DepositAmount, but EffectiveAmount=0
|
||||
tx = common.L1Tx{
|
||||
FromIdx: 258,
|
||||
ToIdx: 256,
|
||||
Amount: big.NewInt(8),
|
||||
DepositAmount: big.NewInt(8),
|
||||
FromEthAddr: tc.Users["C"].Addr,
|
||||
TokenID: 1,
|
||||
UserOrigin: true,
|
||||
Type: common.TxTypeDepositTransfer,
|
||||
}
|
||||
sdb.computeEffectiveAmounts(&tx)
|
||||
assert.Equal(t, big.NewInt(8), tx.EffectiveDepositAmount)
|
||||
assert.Equal(t, big.NewInt(0), tx.EffectiveAmount)
|
||||
}
|
||||
|
||||
func TestProcessTxsBalances(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// generate test transactions from test.SetBlockchain0 code
|
||||
tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(til.SetBlockchainMinimumFlow0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Coordinator Idx where to send the fees
|
||||
coordIdxs := []common.Idx{256, 257}
|
||||
ptc := ProcessTxsConfig{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 16,
|
||||
}
|
||||
|
||||
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
|
||||
_, err = sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
log.Debug("block:0 batch:1")
|
||||
l1UserTxs := []common.L1Tx{}
|
||||
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
|
||||
log.Debug("block:0 batch:2")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[2].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "500")
|
||||
|
||||
log.Debug("block:0 batch:3")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[3].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[3].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[3].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "500")
|
||||
checkBalance(t, tc, sdb, "A", 1, "500")
|
||||
|
||||
log.Debug("block:0 batch:4")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[4].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[4].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[4].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "500")
|
||||
checkBalance(t, tc, sdb, "A", 1, "500")
|
||||
|
||||
log.Debug("block:0 batch:5")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[5].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[5].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[5].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "A", 0, "600")
|
||||
checkBalance(t, tc, sdb, "A", 1, "500")
|
||||
checkBalance(t, tc, sdb, "B", 0, "400")
|
||||
|
||||
log.Debug("block:0 batch:6")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[6].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[6].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[6].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "Coord", 0, "10")
|
||||
checkBalance(t, tc, sdb, "Coord", 1, "20")
|
||||
checkBalance(t, tc, sdb, "A", 0, "600")
|
||||
checkBalance(t, tc, sdb, "A", 1, "280")
|
||||
checkBalance(t, tc, sdb, "B", 0, "290")
|
||||
checkBalance(t, tc, sdb, "B", 1, "200")
|
||||
checkBalance(t, tc, sdb, "C", 0, "100")
|
||||
checkBalance(t, tc, sdb, "D", 0, "800")
|
||||
|
||||
log.Debug("block:0 batch:7")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[0].Rollup.Batches[7].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[7].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[0].Rollup.Batches[7].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "Coord", 0, "35")
|
||||
checkBalance(t, tc, sdb, "Coord", 1, "30")
|
||||
checkBalance(t, tc, sdb, "A", 0, "430")
|
||||
checkBalance(t, tc, sdb, "A", 1, "280")
|
||||
checkBalance(t, tc, sdb, "B", 0, "390")
|
||||
checkBalance(t, tc, sdb, "B", 1, "90")
|
||||
checkBalance(t, tc, sdb, "C", 0, "45")
|
||||
checkBalance(t, tc, sdb, "C", 1, "100")
|
||||
checkBalance(t, tc, sdb, "D", 0, "800")
|
||||
|
||||
log.Debug("block:1 batch:0")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[0].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "Coord", 0, "75")
|
||||
checkBalance(t, tc, sdb, "Coord", 1, "30")
|
||||
checkBalance(t, tc, sdb, "A", 0, "730")
|
||||
checkBalance(t, tc, sdb, "A", 1, "280")
|
||||
checkBalance(t, tc, sdb, "B", 0, "380")
|
||||
checkBalance(t, tc, sdb, "B", 1, "90")
|
||||
checkBalance(t, tc, sdb, "C", 0, "845")
|
||||
checkBalance(t, tc, sdb, "C", 1, "100")
|
||||
checkBalance(t, tc, sdb, "D", 0, "470")
|
||||
|
||||
log.Debug("block:1 batch:1")
|
||||
l1UserTxs = til.L1TxsToCommonL1Txs(tc.Queues[*blocks[1].Rollup.Batches[1].Batch.ForgeL1TxsNum])
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, l1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// use Set of PoolL2 txs
|
||||
poolL2Txs, err := tc.GeneratePoolL2Txs(til.SetPoolL2MinimumFlow1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, []common.L1Tx{}, []common.L1Tx{}, poolL2Txs)
|
||||
require.NoError(t, err)
|
||||
checkBalance(t, tc, sdb, "Coord", 0, "105")
|
||||
checkBalance(t, tc, sdb, "Coord", 1, "40")
|
||||
checkBalance(t, tc, sdb, "A", 0, "510")
|
||||
checkBalance(t, tc, sdb, "A", 1, "170")
|
||||
checkBalance(t, tc, sdb, "B", 0, "480")
|
||||
checkBalance(t, tc, sdb, "B", 1, "190")
|
||||
checkBalance(t, tc, sdb, "C", 0, "845")
|
||||
checkBalance(t, tc, sdb, "C", 1, "100")
|
||||
checkBalance(t, tc, sdb, "D", 0, "360")
|
||||
checkBalance(t, tc, sdb, "F", 0, "100")
|
||||
}
|
||||
|
||||
func TestProcessTxsSynchronizer(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeSynchronizer, 32, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// generate test transactions from test.SetBlockchain0 code
|
||||
tc := til.NewContext(0, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 31, len(blocks[0].Rollup.L1UserTxs))
|
||||
assert.Equal(t, 4, len(blocks[0].Rollup.Batches[0].L1CoordinatorTxs))
|
||||
assert.Equal(t, 0, len(blocks[0].Rollup.Batches[1].L1CoordinatorTxs))
|
||||
assert.Equal(t, 22, len(blocks[0].Rollup.Batches[2].L2Txs))
|
||||
assert.Equal(t, 1, len(blocks[1].Rollup.Batches[0].L1CoordinatorTxs))
|
||||
assert.Equal(t, 62, len(blocks[1].Rollup.Batches[0].L2Txs))
|
||||
assert.Equal(t, 1, len(blocks[1].Rollup.Batches[1].L1CoordinatorTxs))
|
||||
assert.Equal(t, 8, len(blocks[1].Rollup.Batches[1].L2Txs))
|
||||
|
||||
// Coordinator Idx where to send the fees
|
||||
coordIdxs := []common.Idx{256, 257, 258, 259}
|
||||
|
||||
// Idx of user 'A'
|
||||
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
|
||||
|
||||
ptc := ProcessTxsConfig{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 32,
|
||||
}
|
||||
|
||||
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
|
||||
// to create the Coordinator accounts to receive the fees
|
||||
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
|
||||
ptOut, err := sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, len(ptOut.CreatedAccounts))
|
||||
assert.Equal(t, 0, len(ptOut.CollectedFees))
|
||||
|
||||
log.Debug("block:0 batch:1")
|
||||
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
|
||||
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[0].Rollup.L1UserTxs,
|
||||
blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(ptOut.ExitInfos))
|
||||
assert.Equal(t, 31, len(ptOut.CreatedAccounts))
|
||||
assert.Equal(t, 4, len(ptOut.CollectedFees))
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(0)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(1)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
|
||||
acc, err := sdb.GetAccount(idxA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "50", acc.Balance.String())
|
||||
|
||||
log.Debug("block:0 batch:2")
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
|
||||
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(ptOut.ExitInfos))
|
||||
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
|
||||
assert.Equal(t, 4, len(ptOut.CollectedFees))
|
||||
assert.Equal(t, "2", ptOut.CollectedFees[common.TokenID(0)].String())
|
||||
assert.Equal(t, "1", ptOut.CollectedFees[common.TokenID(1)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
|
||||
acc, err = sdb.GetAccount(idxA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "35", acc.Balance.String())
|
||||
|
||||
log.Debug("block:1 batch:0")
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
|
||||
// before processing expect l2Txs[0:2].Nonce==0
|
||||
assert.Equal(t, common.Nonce(0), l2Txs[0].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), l2Txs[1].Nonce)
|
||||
assert.Equal(t, common.Nonce(0), l2Txs[2].Nonce)
|
||||
|
||||
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// after processing expect l2Txs[0:2].Nonce!=0 and has expected value
|
||||
assert.Equal(t, common.Nonce(5), l2Txs[0].Nonce)
|
||||
assert.Equal(t, common.Nonce(6), l2Txs[1].Nonce)
|
||||
assert.Equal(t, common.Nonce(7), l2Txs[2].Nonce)
|
||||
|
||||
assert.Equal(t, 4, len(ptOut.ExitInfos)) // the 'ForceExit(1)' is not computed yet, as the batch is without L1UserTxs
|
||||
assert.Equal(t, 1, len(ptOut.CreatedAccounts))
|
||||
assert.Equal(t, 4, len(ptOut.CollectedFees))
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(0)].String())
|
||||
assert.Equal(t, "1", ptOut.CollectedFees[common.TokenID(1)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
|
||||
acc, err = sdb.GetAccount(idxA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "57", acc.Balance.String())
|
||||
|
||||
log.Debug("block:1 batch:1")
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
|
||||
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[1].Rollup.L1UserTxs,
|
||||
blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(ptOut.ExitInfos)) // 2, as previous batch was without L1UserTxs, and has pending the 'ForceExit(1) A: 5'
|
||||
assert.Equal(t, 1, len(ptOut.CreatedAccounts))
|
||||
assert.Equal(t, 4, len(ptOut.CollectedFees))
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(0)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(1)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(2)].String())
|
||||
assert.Equal(t, "0", ptOut.CollectedFees[common.TokenID(3)].String())
|
||||
acc, err = sdb.GetAccount(idxA1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "77", acc.Balance.String())
|
||||
|
||||
idxB0 := tc.Users["C"].Accounts[common.TokenID(0)].Idx
|
||||
acc, err = sdb.GetAccount(idxB0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "51", acc.Balance.String())
|
||||
|
||||
// get balance of Coordinator account for TokenID==0
|
||||
acc, err = sdb.GetAccount(common.Idx(256))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "2", acc.Balance.String())
|
||||
}
|
||||
|
||||
func TestProcessTxsBatchBuilder(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeBatchBuilder, 32, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// generate test transactions from test.SetBlockchain0 code
|
||||
tc := til.NewContext(0, common.RollupConstMaxL1UserTx)
|
||||
blocks, err := tc.GenerateBlocks(til.SetBlockchain0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Coordinator Idx where to send the fees
|
||||
coordIdxs := []common.Idx{256, 257, 258, 259}
|
||||
|
||||
// Idx of user 'A'
|
||||
idxA1 := tc.Users["A"].Accounts[common.TokenID(1)].Idx
|
||||
|
||||
ptc := ProcessTxsConfig{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 64,
|
||||
MaxTx: 512,
|
||||
MaxL1Tx: 32,
|
||||
}
|
||||
|
||||
// Process the 1st batch, which contains the L1CoordinatorTxs necessary
|
||||
// to create the Coordinator accounts to receive the fees
|
||||
log.Debug("block:0 batch:0, only L1CoordinatorTxs")
|
||||
ptOut, err := sdb.ProcessTxs(ptc, nil, nil, blocks[0].Rollup.Batches[0].L1CoordinatorTxs, nil)
|
||||
require.NoError(t, err)
|
||||
// expect 0 at CreatedAccount, as is only computed when StateDB.Type==TypeSynchronizer
|
||||
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
|
||||
|
||||
log.Debug("block:0 batch:1")
|
||||
l2Txs := common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[1].L2Txs)
|
||||
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[0].Rollup.L1UserTxs, blocks[0].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(ptOut.ExitInfos))
|
||||
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
|
||||
acc, err := sdb.GetAccount(idxA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "50", acc.Balance.String())
|
||||
|
||||
log.Debug("block:0 batch:2")
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[0].Rollup.Batches[2].L2Txs)
|
||||
ptOut, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[0].Rollup.Batches[2].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(ptOut.ExitInfos))
|
||||
assert.Equal(t, 0, len(ptOut.CreatedAccounts))
|
||||
acc, err = sdb.GetAccount(idxA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "35", acc.Balance.String())
|
||||
|
||||
log.Debug("block:1 batch:0")
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[0].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, nil, blocks[1].Rollup.Batches[0].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
acc, err = sdb.GetAccount(idxA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "57", acc.Balance.String())
|
||||
|
||||
log.Debug("block:1 batch:1")
|
||||
l2Txs = common.L2TxsToPoolL2Txs(blocks[1].Rollup.Batches[1].L2Txs)
|
||||
_, err = sdb.ProcessTxs(ptc, coordIdxs, blocks[1].Rollup.L1UserTxs, blocks[1].Rollup.Batches[1].L1CoordinatorTxs, l2Txs)
|
||||
require.NoError(t, err)
|
||||
acc, err = sdb.GetAccount(idxA1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "77", acc.Balance.String())
|
||||
|
||||
idxB0 := tc.Users["C"].Accounts[common.TokenID(0)].Idx
|
||||
acc, err = sdb.GetAccount(idxB0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "51", acc.Balance.String())
|
||||
|
||||
// get balance of Coordinator account for TokenID==0
|
||||
acc, err = sdb.GetAccount(common.Idx(256))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.TokenID(0), acc.TokenID)
|
||||
assert.Equal(t, "2", acc.Balance.String())
|
||||
acc, err = sdb.GetAccount(common.Idx(257))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.TokenID(1), acc.TokenID)
|
||||
assert.Equal(t, "2", acc.Balance.String())
|
||||
|
||||
assert.Equal(t, "2720257526434001367979405991743527513807903085728407823609738212616896104498", sdb.mt.Root().BigInt().String())
|
||||
}
|
||||
|
||||
func TestProcessTxsRootTestVectors(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeBatchBuilder, 32, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// same values than in the js test
|
||||
bjj0, err := common.BJJFromStringWithChecksum("21b0a1688b37f77b1d1d5539ec3b826db5ac78b2513f574a04c50a7d4f8246d7")
|
||||
assert.NoError(t, err)
|
||||
l1Txs := []common.L1Tx{
|
||||
{
|
||||
FromIdx: 0,
|
||||
DepositAmount: big.NewInt(16000000),
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 1,
|
||||
FromBJJ: bjj0,
|
||||
FromEthAddr: ethCommon.HexToAddress("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf"),
|
||||
ToIdx: 0,
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
UserOrigin: true,
|
||||
},
|
||||
}
|
||||
l2Txs := []common.PoolL2Tx{
|
||||
{
|
||||
FromIdx: 256,
|
||||
ToIdx: 256,
|
||||
TokenID: 1,
|
||||
Amount: big.NewInt(1000),
|
||||
Nonce: 0,
|
||||
Fee: 126,
|
||||
Type: common.TxTypeTransfer,
|
||||
},
|
||||
}
|
||||
|
||||
ptc := ProcessTxsConfig{
|
||||
NLevels: 32,
|
||||
MaxFeeTx: 8,
|
||||
MaxTx: 32,
|
||||
MaxL1Tx: 16,
|
||||
}
|
||||
_, err = sdb.ProcessTxs(ptc, nil, l1Txs, nil, l2Txs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9827704113668630072730115158977131501210702363656902211840117643154933433410", sdb.mt.Root().BigInt().String())
|
||||
}
|
||||
|
||||
func TestCreateAccountDepositMaxValue(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "tmpdb")
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
nLevels := 16
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeBatchBuilder, nLevels, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
users := generateJsUsers(t)
|
||||
|
||||
daMaxHex, err := hex.DecodeString("FFFF")
|
||||
require.NoError(t, err)
|
||||
daMaxF16 := common.Float16(binary.BigEndian.Uint16(daMaxHex))
|
||||
daMaxBI := daMaxF16.BigInt()
|
||||
assert.Equal(t, "10235000000000000000000000000000000", daMaxBI.String())
|
||||
|
||||
daMax1Hex, err := hex.DecodeString("FFFE")
|
||||
require.NoError(t, err)
|
||||
daMax1F16 := common.Float16(binary.BigEndian.Uint16(daMax1Hex))
|
||||
daMax1BI := daMax1F16.BigInt()
|
||||
assert.Equal(t, "10225000000000000000000000000000000", daMax1BI.String())
|
||||
|
||||
l1Txs := []common.L1Tx{
|
||||
{
|
||||
FromIdx: 0,
|
||||
DepositAmount: daMaxBI,
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 1,
|
||||
FromBJJ: users[0].BJJ.Public().Compress(),
|
||||
FromEthAddr: users[0].Addr,
|
||||
ToIdx: 0,
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
UserOrigin: true,
|
||||
},
|
||||
{
|
||||
FromIdx: 0,
|
||||
DepositAmount: daMax1BI,
|
||||
Amount: big.NewInt(0),
|
||||
TokenID: 1,
|
||||
FromBJJ: users[1].BJJ.Public().Compress(),
|
||||
FromEthAddr: users[1].Addr,
|
||||
ToIdx: 0,
|
||||
Type: common.TxTypeCreateAccountDeposit,
|
||||
UserOrigin: true,
|
||||
},
|
||||
}
|
||||
|
||||
ptc := ProcessTxsConfig{
|
||||
NLevels: uint32(nLevels),
|
||||
MaxTx: 3,
|
||||
MaxL1Tx: 2,
|
||||
MaxFeeTx: 2,
|
||||
}
|
||||
|
||||
_, err = sdb.ProcessTxs(ptc, nil, l1Txs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check balances
|
||||
acc, err := sdb.GetAccount(common.Idx(256))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, daMaxBI, acc.Balance)
|
||||
acc, err = sdb.GetAccount(common.Idx(257))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, daMax1BI, acc.Balance)
|
||||
}
|
||||
@@ -3,14 +3,12 @@ package statedb
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
ethCommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/log"
|
||||
"github.com/hermeznetwork/tracerr"
|
||||
"github.com/iden3/go-iden3-crypto/babyjub"
|
||||
"github.com/iden3/go-merkletree"
|
||||
)
|
||||
|
||||
func concatEthAddrTokenID(addr ethCommon.Address, tokenID common.TokenID) []byte {
|
||||
@@ -136,44 +134,3 @@ func (s *StateDB) GetTokenIDsFromIdxs(idxs []common.Idx) (map[common.TokenID]com
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func siblingsToZKInputFormat(s []*merkletree.Hash) []*big.Int {
|
||||
b := make([]*big.Int, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
b[i] = s[i].BigInt()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// BJJCompressedTo256BigInts returns a [256]*big.Int array with the bit
|
||||
// representation of the babyjub.PublicKeyComp
|
||||
func BJJCompressedTo256BigInts(pkComp babyjub.PublicKeyComp) [256]*big.Int {
|
||||
var r [256]*big.Int
|
||||
b := pkComp[:]
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
if b[i/8]&(1<<(i%8)) == 0 {
|
||||
r[i] = big.NewInt(0)
|
||||
} else {
|
||||
r[i] = big.NewInt(1)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// formatAccumulatedFees returns an array of [nFeeAccounts]*big.Int containing
|
||||
// the balance of each FeeAccount, taken from the 'collectedFees' map, in the
|
||||
// order of the 'orderTokenIDs'
|
||||
func formatAccumulatedFees(collectedFees map[common.TokenID]*big.Int, orderTokenIDs []*big.Int) []*big.Int {
|
||||
accFeeOut := make([]*big.Int, len(orderTokenIDs))
|
||||
for i := 0; i < len(orderTokenIDs); i++ {
|
||||
tokenID := common.TokenIDFromBigInt(orderTokenIDs[i])
|
||||
if _, ok := collectedFees[tokenID]; ok {
|
||||
accFeeOut[i] = new(big.Int).Set(collectedFees[tokenID])
|
||||
} else {
|
||||
accFeeOut[i] = big.NewInt(0)
|
||||
}
|
||||
}
|
||||
return accFeeOut
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package statedb
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -20,8 +19,7 @@ func TestGetIdx(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer assert.NoError(t, os.RemoveAll(dir))
|
||||
|
||||
chainID := uint16(0)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0, chainID)
|
||||
sdb, err := NewStateDB(dir, 128, TypeTxSelector, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var sk babyjub.PrivateKey
|
||||
@@ -92,33 +90,3 @@ func TestGetIdx(t *testing.T) {
|
||||
_, err = sdb.GetIdxByEthAddr(addr, tokenID1)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestBJJCompressedTo256BigInt(t *testing.T) {
|
||||
var pkComp babyjub.PublicKeyComp
|
||||
r := BJJCompressedTo256BigInts(pkComp)
|
||||
zero := big.NewInt(0)
|
||||
for i := 0; i < 256; i++ {
|
||||
assert.Equal(t, zero, r[i])
|
||||
}
|
||||
|
||||
pkComp[0] = 3
|
||||
r = BJJCompressedTo256BigInts(pkComp)
|
||||
one := big.NewInt(1)
|
||||
for i := 0; i < 256; i++ {
|
||||
if i != 0 && i != 1 {
|
||||
assert.Equal(t, zero, r[i])
|
||||
} else {
|
||||
assert.Equal(t, one, r[i])
|
||||
}
|
||||
}
|
||||
|
||||
pkComp[31] = 4
|
||||
r = BJJCompressedTo256BigInts(pkComp)
|
||||
for i := 0; i < 256; i++ {
|
||||
if i != 0 && i != 1 && i != 250 {
|
||||
assert.Equal(t, zero, r[i])
|
||||
} else {
|
||||
assert.Equal(t, one, r[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user