mirror of
https://github.com/arnaucube/hermez-node.git
synced 2026-02-07 03:16:45 +01:00
94
db/historydb/historydb.go
Normal file
94
db/historydb/historydb.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package historydb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gobuffalo/packr/v2"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/hermeznetwork/hermez-node/db"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" // driver for postgres DB
|
||||
migrate "github.com/rubenv/sql-migrate"
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
// HistoryDB persist the historic of the rollup
|
||||
type HistoryDB struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewHistoryDB initialize the DB
|
||||
func NewHistoryDB(port int, host, user, password, dbname string) (*HistoryDB, error) {
|
||||
// Connect to DB
|
||||
psqlconn := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", host, port, user, password, dbname)
|
||||
hdb, err := sqlx.Connect("postgres", psqlconn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Init meddler
|
||||
db.InitMeddler()
|
||||
meddler.Default = meddler.PostgreSQL
|
||||
|
||||
// Run DB migrations
|
||||
migrations := &migrate.PackrMigrationSource{
|
||||
Box: packr.New("history-migrations", "./migrations"),
|
||||
}
|
||||
if _, err := migrate.Exec(hdb.DB, "postgres", migrations, migrate.Up); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HistoryDB{hdb}, nil
|
||||
}
|
||||
|
||||
// addBlocks insert blocks into the DB
|
||||
func (hdb *HistoryDB) addBlocks(blocks []common.Block) error {
|
||||
return db.BulkInsert(
|
||||
hdb.db,
|
||||
"INSERT INTO block (eth_block_num, timestamp, hash) VALUES %s",
|
||||
blocks[:],
|
||||
)
|
||||
}
|
||||
|
||||
// GetBlocks retrrieve blocks from the DB
|
||||
func (hdb *HistoryDB) GetBlocks(from, to uint64) ([]*common.Block, error) {
|
||||
var blocks []*common.Block
|
||||
err := meddler.QueryAll(
|
||||
hdb.db, &blocks,
|
||||
"SELECT * FROM block WHERE $1 <= eth_block_num AND eth_block_num < $2",
|
||||
from, to,
|
||||
)
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// reorg deletes all the information that was added into the DB after the lastValidBlock
|
||||
// WARNING: this is a draaft of the function, useful at the moment for tests
|
||||
func (hdb *HistoryDB) reorg(lastValidBlock uint64) error {
|
||||
_, err := hdb.db.Exec("DELETE FROM block WHERE eth_block_num > $1;", lastValidBlock)
|
||||
return err
|
||||
}
|
||||
|
||||
// addBids insert Bids into the DB
|
||||
func (hdb *HistoryDB) addBids(bids []common.Bid) error {
|
||||
// TODO: check the coordinator info
|
||||
return db.BulkInsert(
|
||||
hdb.db,
|
||||
"INSERT INTO bid (slot_num, forger_addr, bid_value, eth_block_num) VALUES %s",
|
||||
bids[:],
|
||||
)
|
||||
}
|
||||
|
||||
// GetBidsByBlock return the bids done between the block from and to
|
||||
func (hdb *HistoryDB) GetBidsByBlock(from, to uint64) ([]*common.Bid, error) {
|
||||
var bids []*common.Bid
|
||||
err := meddler.QueryAll(
|
||||
hdb.db, &bids,
|
||||
"SELECT * FROM bid WHERE $1 <= eth_block_num AND eth_block_num < $2",
|
||||
from, to,
|
||||
)
|
||||
return bids, err
|
||||
}
|
||||
|
||||
// Close frees the resources used by HistoryDB
|
||||
func (hdb *HistoryDB) Close() error {
|
||||
return hdb.db.Close()
|
||||
}
|
||||
122
db/historydb/historydb_test.go
Normal file
122
db/historydb/historydb_test.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package historydb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
eth "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var historyDB *HistoryDB
|
||||
|
||||
// In order to run the test you need to run a Posgres DB with
|
||||
// a database named "history" that is accessible by
|
||||
// user: "hermez"
|
||||
// pass: set it using the env var POSTGRES_PASS
|
||||
// This can be achieved by running: POSTGRES_PASS=your_strong_pass && sudo docker run --rm --name hermez-db-test -p 5432:5432 -e POSTGRES_DB=history -e POSTGRES_USER=hermez -e POSTGRES_PASSWORD=$POSTGRES_PASS -d postgres && sleep 2s && sudo docker exec -it hermez-db-test psql -a history -U hermez -c "CREATE DATABASE l2;"
|
||||
// After running the test you can stop the container by running: sudo docker kill hermez-db-test
|
||||
// If you already did that for the L2DB you don't have to do it again
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// init DB
|
||||
var err error
|
||||
pass := os.Getenv("POSTGRES_PASS")
|
||||
historyDB, err = NewHistoryDB(5432, "localhost", "hermez", pass, "history")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Run tests
|
||||
result := m.Run()
|
||||
// Close DB
|
||||
if err := historyDB.Close(); err != nil {
|
||||
fmt.Println("Error closing the history DB:", err)
|
||||
}
|
||||
os.Exit(result)
|
||||
}
|
||||
|
||||
func TestAddBlock(t *testing.T) {
|
||||
var fromBlock, toBlock uint64
|
||||
fromBlock = 1
|
||||
toBlock = 5
|
||||
// Delete peviously created rows (clean previous test execs)
|
||||
assert.NoError(t, historyDB.reorg(fromBlock-1))
|
||||
// Generate fake blocks
|
||||
blocks := genBlocks(fromBlock, toBlock)
|
||||
// Insert blocks into DB
|
||||
err := historyDB.addBlocks(blocks)
|
||||
assert.NoError(t, err)
|
||||
// Get blocks from DB
|
||||
fetchedBlocks, err := historyDB.GetBlocks(fromBlock, toBlock)
|
||||
// Compare generated vs getted blocks
|
||||
assert.NoError(t, err)
|
||||
for i, fetchedBlock := range fetchedBlocks {
|
||||
assert.Equal(t, blocks[i].EthBlockNum, fetchedBlock.EthBlockNum)
|
||||
assert.Equal(t, blocks[i].Hash, fetchedBlock.Hash)
|
||||
assert.Equal(t, blocks[i].Timestamp.Unix(), fetchedBlock.Timestamp.Unix())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBids(t *testing.T) {
|
||||
const fromBlock uint64 = 1
|
||||
const toBlock uint64 = 5
|
||||
const bidsPerSlot = 5
|
||||
// Prepare blocks in the DB
|
||||
setTestBlocks(fromBlock, toBlock)
|
||||
// Generate fake bids
|
||||
bids := make([]common.Bid, 0, (toBlock-fromBlock)*bidsPerSlot)
|
||||
for i := fromBlock; i < toBlock; i++ {
|
||||
for j := 0; j < bidsPerSlot; j++ {
|
||||
bids = append(bids, common.Bid{
|
||||
SlotNum: common.SlotNum(i),
|
||||
BidValue: big.NewInt(int64(j)),
|
||||
EthBlockNum: i,
|
||||
ForgerAddr: eth.BigToAddress(big.NewInt(int64(j))),
|
||||
})
|
||||
}
|
||||
}
|
||||
err := historyDB.addBids(bids)
|
||||
assert.NoError(t, err)
|
||||
// Fetch bids
|
||||
fetchedBidsPtr, err := historyDB.GetBidsByBlock(fromBlock, toBlock)
|
||||
assert.NoError(t, err)
|
||||
// Compare fetched bids vs generated bids
|
||||
fetchedBids := make([]common.Bid, 0, (toBlock-fromBlock)*bidsPerSlot)
|
||||
for _, bid := range fetchedBidsPtr {
|
||||
fetchedBids = append(fetchedBids, *bid)
|
||||
}
|
||||
assert.Equal(t, bids, fetchedBids)
|
||||
}
|
||||
|
||||
// setTestBlocks WARNING: this will delete the blocks and recreate them
|
||||
func setTestBlocks(from, to uint64) {
|
||||
if from == 0 {
|
||||
if err := historyDB.reorg(from); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
if err := historyDB.reorg(from - 1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
blocks := genBlocks(from, to)
|
||||
if err := historyDB.addBlocks(blocks); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func genBlocks(from, to uint64) []common.Block {
|
||||
var blocks []common.Block
|
||||
for i := from; i < to; i++ {
|
||||
blocks = append(blocks, common.Block{
|
||||
EthBlockNum: i,
|
||||
Timestamp: time.Now().Add(time.Second * 13).UTC(),
|
||||
Hash: eth.BigToHash(big.NewInt(int64(i))),
|
||||
})
|
||||
}
|
||||
return blocks
|
||||
}
|
||||
106
db/historydb/migrations/001_init.sql
Normal file
106
db/historydb/migrations/001_init.sql
Normal file
@@ -0,0 +1,106 @@
|
||||
-- +migrate Up
|
||||
CREATE TABLE block (
|
||||
eth_block_num BIGINT PRIMARY KEY,
|
||||
timestamp TIMESTAMP WITHOUT TIME ZONE NOT NULL,
|
||||
hash BYTEA NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE slot_min_prices (
|
||||
eth_block_num BIGINT PRIMARY KEY REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
min_prices VARCHAR(200) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE coordiantor (
|
||||
forger_addr BYTEA NOT NULL,
|
||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
beneficiary_addr BYTEA NOT NULL,
|
||||
withdraw_addr BYTEA NOT NULL,
|
||||
url VARCHAR(200) NOT NULL,
|
||||
PRIMARY KEY (forger_addr, eth_block_num)
|
||||
);
|
||||
|
||||
CREATE TABLE batch (
|
||||
batch_num BIGINT PRIMARY KEY,
|
||||
eth_block_num BIGINT REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
||||
fees_collected BYTEA NOT NULL,
|
||||
state_root BYTEA NOT NULL,
|
||||
num_accounts BIGINT NOT NULL,
|
||||
exit_root BYTEA NOT NULL,
|
||||
forge_l1_txs_num BIGINT,
|
||||
slot_num BIGINT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE exit_tree (
|
||||
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||
account_idx BIGINT NOT NULL,
|
||||
merkle_proof BYTEA NOT NULL,
|
||||
amount NUMERIC NOT NULL,
|
||||
nullifier BYTEA NOT NULL,
|
||||
PRIMARY KEY (batch_num, account_idx)
|
||||
);
|
||||
|
||||
CREATE TABLE bid (
|
||||
slot_num BIGINT NOT NULL,
|
||||
bid_value BYTEA NOT NULL, -- (check if we can do a max(), if not add float for order purposes)
|
||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
forger_addr BYTEA NOT NULL, -- fake foreign key for coordinator
|
||||
PRIMARY KEY (slot_num, bid_value)
|
||||
);
|
||||
|
||||
CREATE TABLE token (
|
||||
token_id INT PRIMARY KEY,
|
||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
eth_addr BYTEA UNIQUE NOT NULL,
|
||||
name VARCHAR(20) NOT NULL,
|
||||
symbol VARCHAR(10) NOT NULL,
|
||||
decimals INT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE l1tx (
|
||||
tx_id BYTEA PRIMARY KEY,
|
||||
from_idx BIGINT NOT NULL,
|
||||
to_idx BIGINT NOT NULL,
|
||||
token_id INT NOT NULL REFERENCES token (token_id),
|
||||
amount NUMERIC NOT NULL,
|
||||
nonce BIGINT NOT NULL,
|
||||
fee INT NOT NULL,
|
||||
eth_block_num BIGINT NOT NULL REFERENCES block (eth_block_num) ON DELETE CASCADE,
|
||||
to_forge_l1_txs_num BIGINT NOT NULL,
|
||||
position INT NOT NULL,
|
||||
origin_user BOOLEAN NOT NULL,
|
||||
from_eth_addr BYTEA NOT NULL,
|
||||
from_bjj BYTEA NOT NULL,
|
||||
load_amount BYTEA NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE l2tx (
|
||||
tx_id BYTEA PRIMARY KEY,
|
||||
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||
position INT NOT NULL,
|
||||
from_idx BIGINT NOT NULL,
|
||||
to_idx BIGINT NOT NULL,
|
||||
amount NUMERIC NOT NULL,
|
||||
fee INT NOT NULL,
|
||||
nonce BIGINT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE account (
|
||||
idx BIGINT PRIMARY KEY,
|
||||
token_id INT NOT NULL REFERENCES token (token_id),
|
||||
batch_num BIGINT NOT NULL REFERENCES batch (batch_num) ON DELETE CASCADE,
|
||||
bjj BYTEA NOT NULL,
|
||||
eth_addr BYTEA NOT NULL
|
||||
);
|
||||
|
||||
-- +migrate Down
|
||||
DROP TABLE account;
|
||||
DROP TABLE l2tx;
|
||||
DROP TABLE l1tx;
|
||||
DROP TABLE token;
|
||||
DROP TABLE bid;
|
||||
DROP TABLE exit_tree;
|
||||
DROP TABLE batch;
|
||||
DROP TABLE coordiantor;
|
||||
DROP TABLE slot_min_prices;
|
||||
DROP TABLE block;
|
||||
118
db/l2db/l2db.go
Normal file
118
db/l2db/l2db.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package l2db
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
eth "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hermeznetwork/hermez-node/common"
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// L2DB stores L2 txs and authorization registers received by the coordinator and keeps them until they are no longer relevant
|
||||
// due to them being forged or invalid after a safety period
|
||||
type L2DB struct {
|
||||
db *gorm.DB
|
||||
safetyPeriod uint16
|
||||
ttl time.Duration
|
||||
maxTxs uint32
|
||||
}
|
||||
|
||||
// NewL2DB creates a L2DB.
|
||||
// More info on how to set dbDialect and dbArgs here: http://gorm.io/docs/connecting_to_the_database.html
|
||||
// safetyPeriod is the ammount of blockchain blocks that must be waited before deleting anything (to avoid reorg problems).
|
||||
// maxTxs indicates the desired maximum amount of txs stored on the L2DB.
|
||||
// TTL indicates the maximum amount of time that a tx can be in the L2DB
|
||||
// (to prevent tx that won't ever be forged to stay there, will be used if maxTxs is exceeded).
|
||||
// autoPurgePeriod will be used as delay between calls to Purge. If the value is 0, it will be disabled.
|
||||
func NewL2DB(
|
||||
dbDialect, dbArgs string,
|
||||
safetyPeriod uint16,
|
||||
maxTxs uint32,
|
||||
TTL time.Duration,
|
||||
) (*L2DB, error) {
|
||||
// Stablish DB connection
|
||||
db, err := gorm.Open(dbDialect, dbArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create or update SQL schemas
|
||||
// WARNING: AutoMigrate will ONLY create tables, missing columns and missing indexes,
|
||||
// and WON’T change existing column’s type or delete unused columns to protect your data.
|
||||
// more info: http://gorm.io/docs/migration.html
|
||||
db.AutoMigrate(&common.PoolL2Tx{})
|
||||
// TODO: db.AutoMigrate(&common.RegisterAuthorization{})
|
||||
|
||||
return &L2DB{
|
||||
db: db,
|
||||
safetyPeriod: safetyPeriod,
|
||||
ttl: TTL,
|
||||
maxTxs: maxTxs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AddTx inserts a tx into the L2DB
|
||||
func (l2db *L2DB) AddTx(tx *common.PoolL2Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddAccountCreationAuth inserts an account creation authorization into the DB
|
||||
func (l2db *L2DB) AddAccountCreationAuth(auth *common.AccountCreationAuth) error { // TODO: AddRegisterAuthorization(auth &common.RegisterAuthorization)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTx return the specified Tx
|
||||
func (l2db *L2DB) GetTx(txID common.TxID) (*common.PoolL2Tx, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPendingTxs return all the pending txs of the L2DB
|
||||
func (l2db *L2DB) GetPendingTxs() ([]common.PoolL2Tx, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetAccountCreationAuth return the authorization to make registers of an Ethereum address
|
||||
func (l2db *L2DB) GetAccountCreationAuth(ethAddr eth.Address) (*common.AccountCreationAuth, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// StartForging updates the state of the transactions that will begin the forging process.
|
||||
// The state of the txs referenced by txIDs will be changed from Pending -> Forging
|
||||
func (l2db *L2DB) StartForging(txIDs []common.TxID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoneForging updates the state of the transactions that have been forged
|
||||
// so the state of the txs referenced by txIDs will be changed from Forging -> Forged
|
||||
func (l2db *L2DB) DoneForging(txIDs []common.TxID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InvalidateTxs updates the state of the transactions that are invalid.
|
||||
// The state of the txs referenced by txIDs will be changed from * -> Invalid
|
||||
func (l2db *L2DB) InvalidateTxs(txIDs []common.TxID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckNonces invalidate txs with nonces that are smaller than their respective accounts nonces.
|
||||
// The state of the affected txs will be changed from Pending -> Invalid
|
||||
func (l2db *L2DB) CheckNonces(updatedAccounts []common.Account) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reorg updates the state of txs that were updated in a batch that has been discarted due to a blockchian reorg.
|
||||
// The state of the affected txs can change form Forged -> Pending or from Invalid -> Pending
|
||||
func (l2db *L2DB) Reorg(lastValidBatch common.BatchNum) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Purge deletes transactions that have been forged or marked as invalid for longer than the safety period
|
||||
// it also deletes txs that has been in the L2DB for longer than the ttl if maxTxs has been exceeded
|
||||
func (l2db *L2DB) Purge() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close frees the resources used by the L2DB
|
||||
func (l2db *L2DB) Close() error {
|
||||
return l2db.db.Close()
|
||||
}
|
||||
78
db/utils.go
Normal file
78
db/utils.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
// InitMeddler registers tags to be used to read/write from SQL DBs using meddler
|
||||
func InitMeddler() {
|
||||
meddler.Register("bigint", BigIntMeddler{})
|
||||
}
|
||||
|
||||
// BulkInsert performs a bulk insert with a single statement into the specified table. Example:
|
||||
// `db.BulkInsert(myDB, "INSERT INTO block (eth_block_num, timestamp, hash) VALUES %s", blocks[:])`
|
||||
// Note that all the columns must be specified in the query, and they must be in the same order as in the table.
|
||||
func BulkInsert(db meddler.DB, q string, args interface{}) error {
|
||||
arrayValue := reflect.ValueOf(args)
|
||||
arrayLen := arrayValue.Len()
|
||||
valueStrings := make([]string, 0, arrayLen)
|
||||
var arglist = make([]interface{}, 0)
|
||||
for i := 0; i < arrayLen; i++ {
|
||||
arg := arrayValue.Index(i).Addr().Interface()
|
||||
elemArglist, err := meddler.Default.Values(arg, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
arglist = append(arglist, elemArglist...)
|
||||
value := "("
|
||||
for j := 0; j < len(elemArglist); j++ {
|
||||
value += fmt.Sprintf("$%d, ", i*len(elemArglist)+j+1)
|
||||
}
|
||||
value = value[:len(value)-2] + ")"
|
||||
valueStrings = append(valueStrings, value)
|
||||
}
|
||||
stmt := fmt.Sprintf(q, strings.Join(valueStrings, ","))
|
||||
_, err := db.Exec(stmt, arglist...)
|
||||
return err
|
||||
}
|
||||
|
||||
// BigIntMeddler encodes or decodes the field value to or from JSON
|
||||
type BigIntMeddler struct{}
|
||||
|
||||
// PreRead is called before a Scan operation for fields that have the BigIntMeddler
|
||||
func (b BigIntMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) {
|
||||
// give a pointer to a byte buffer to grab the raw data
|
||||
return new(string), nil
|
||||
}
|
||||
|
||||
// PostRead is called after a Scan operation for fields that have the BigIntMeddler
|
||||
func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error {
|
||||
ptr := scanTarget.(*string)
|
||||
if ptr == nil {
|
||||
return fmt.Errorf("BigIntMeddler.PostRead: nil pointer")
|
||||
}
|
||||
|
||||
data, err := base64.StdEncoding.DecodeString(*ptr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("big.Int decode error: %v", err)
|
||||
}
|
||||
field := fieldPtr.(**big.Int)
|
||||
*field = new(big.Int).SetBytes(data)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreWrite is called before an Insert or Update operation for fields that have the BigIntMeddler
|
||||
func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) {
|
||||
field := fieldPtr.(*big.Int)
|
||||
|
||||
str := base64.StdEncoding.EncodeToString(field.Bytes())
|
||||
|
||||
return str, nil
|
||||
}
|
||||
Reference in New Issue
Block a user