mirror of
https://github.com/arnaucube/go-circom-prover-verifier.git
synced 2026-02-06 19:06:43 +01:00
Merge pull request #11 from iden3/feature/proof-parsers
Add proof parsers to string (decimal & hex)
This commit is contained in:
@@ -467,8 +467,35 @@ func stringToG2(h [][]string) (*bn256.G2, error) {
|
|||||||
return p, err
|
return p, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProofToJson outputs the Proof i Json format
|
// ProofStringToSmartContractFormat converts the ProofString to a ProofString in the SmartContract format in a ProofString structure
|
||||||
func ProofToJson(p *types.Proof) ([]byte, error) {
|
func ProofStringToSmartContractFormat(s ProofString) ProofString {
|
||||||
|
var rs ProofString
|
||||||
|
rs.A = make([]string, 2)
|
||||||
|
rs.B = make([][]string, 2)
|
||||||
|
rs.B[0] = make([]string, 2)
|
||||||
|
rs.B[1] = make([]string, 2)
|
||||||
|
rs.C = make([]string, 2)
|
||||||
|
|
||||||
|
rs.A[0] = s.A[0]
|
||||||
|
rs.A[1] = s.A[1]
|
||||||
|
rs.B[0][0] = s.B[0][1]
|
||||||
|
rs.B[0][1] = s.B[0][0]
|
||||||
|
rs.B[1][0] = s.B[1][1]
|
||||||
|
rs.B[1][1] = s.B[1][0]
|
||||||
|
rs.C[0] = s.C[0]
|
||||||
|
rs.C[1] = s.C[1]
|
||||||
|
rs.Protocol = s.Protocol
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProofToSmartContractFormat converts the *types.Proof to a ProofString in the SmartContract format in a ProofString structure
|
||||||
|
func ProofToSmartContractFormat(p *types.Proof) ProofString {
|
||||||
|
s := ProofToString(p)
|
||||||
|
return ProofStringToSmartContractFormat(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProofToString converts the Proof to ProofString
|
||||||
|
func ProofToString(p *types.Proof) ProofString {
|
||||||
var ps ProofString
|
var ps ProofString
|
||||||
ps.A = make([]string, 3)
|
ps.A = make([]string, 3)
|
||||||
ps.B = make([][]string, 3)
|
ps.B = make([][]string, 3)
|
||||||
@@ -497,10 +524,55 @@ func ProofToJson(p *types.Proof) ([]byte, error) {
|
|||||||
|
|
||||||
ps.Protocol = "groth"
|
ps.Protocol = "groth"
|
||||||
|
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProofToJson outputs the Proof i Json format
|
||||||
|
func ProofToJson(p *types.Proof) ([]byte, error) {
|
||||||
|
ps := ProofToString(p)
|
||||||
return json.Marshal(ps)
|
return json.Marshal(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseWitness parses binary file representation of the Witness into the Witness struct
|
// ProofToHex converts the Proof to ProofString with hexadecimal strings
|
||||||
|
func ProofToHex(p *types.Proof) ProofString {
|
||||||
|
var ps ProofString
|
||||||
|
ps.A = make([]string, 3)
|
||||||
|
ps.B = make([][]string, 3)
|
||||||
|
ps.B[0] = make([]string, 2)
|
||||||
|
ps.B[1] = make([]string, 2)
|
||||||
|
ps.B[2] = make([]string, 2)
|
||||||
|
ps.C = make([]string, 3)
|
||||||
|
|
||||||
|
a := p.A.Marshal()
|
||||||
|
ps.A[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[:32]).Bytes())
|
||||||
|
ps.A[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[32:64]).Bytes())
|
||||||
|
ps.A[2] = "1"
|
||||||
|
|
||||||
|
b := p.B.Marshal()
|
||||||
|
ps.B[0][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[:32]).Bytes())
|
||||||
|
ps.B[0][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[32:64]).Bytes())
|
||||||
|
ps.B[1][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[64:96]).Bytes())
|
||||||
|
ps.B[1][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[96:128]).Bytes())
|
||||||
|
ps.B[2][0] = "1"
|
||||||
|
ps.B[2][1] = "0"
|
||||||
|
|
||||||
|
c := p.C.Marshal()
|
||||||
|
ps.C[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[:32]).Bytes())
|
||||||
|
ps.C[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[32:64]).Bytes())
|
||||||
|
ps.C[2] = "1"
|
||||||
|
|
||||||
|
ps.Protocol = "groth"
|
||||||
|
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProofToJsonHex outputs the Proof i Json format with hexadecimal strings
|
||||||
|
func ProofToJsonHex(p *types.Proof) ([]byte, error) {
|
||||||
|
ps := ProofToHex(p)
|
||||||
|
return json.Marshal(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseWitnessBin parses binary file representation of the Witness into the Witness struct
|
||||||
func ParseWitnessBin(f *os.File) (types.Witness, error) {
|
func ParseWitnessBin(f *os.File) (types.Witness, error) {
|
||||||
var w types.Witness
|
var w types.Witness
|
||||||
r := bufio.NewReader(f)
|
r := bufio.NewReader(f)
|
||||||
|
|||||||
@@ -172,3 +172,27 @@ func TestParseWitnessBin(t *testing.T) {
|
|||||||
testCircuitParseWitnessBin(t, "circuit1k")
|
testCircuitParseWitnessBin(t, "circuit1k")
|
||||||
testCircuitParseWitnessBin(t, "circuit5k")
|
testCircuitParseWitnessBin(t, "circuit5k")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProofSmartContractFormat(t *testing.T) {
|
||||||
|
proofJson, err := ioutil.ReadFile("../testdata/circuit1k/proof.json")
|
||||||
|
require.Nil(t, err)
|
||||||
|
proof, err := ParseProof(proofJson)
|
||||||
|
require.Nil(t, err)
|
||||||
|
pS := ProofToString(proof)
|
||||||
|
|
||||||
|
pSC := ProofToSmartContractFormat(proof)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, pS.A[0], pSC.A[0])
|
||||||
|
assert.Equal(t, pS.A[1], pSC.A[1])
|
||||||
|
assert.Equal(t, pS.B[0][0], pSC.B[0][1])
|
||||||
|
assert.Equal(t, pS.B[0][1], pSC.B[0][0])
|
||||||
|
assert.Equal(t, pS.B[1][0], pSC.B[1][1])
|
||||||
|
assert.Equal(t, pS.B[1][1], pSC.B[1][0])
|
||||||
|
assert.Equal(t, pS.C[0], pSC.C[0])
|
||||||
|
assert.Equal(t, pS.C[1], pSC.C[1])
|
||||||
|
assert.Equal(t, pS.Protocol, pSC.Protocol)
|
||||||
|
|
||||||
|
pSC2 := ProofStringToSmartContractFormat(pS)
|
||||||
|
assert.Equal(t, pSC, pSC2)
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
247
prover/gextra.go
247
prover/gextra.go
@@ -1,16 +1,17 @@
|
|||||||
package prover
|
package prover
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
|
cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
|
||||||
"math/big"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type TableG1 struct {
|
type tableG1 struct {
|
||||||
data []*bn256.G1
|
data []*bn256.G1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t TableG1) GetData() []*bn256.G1 {
|
func (t tableG1) getData() []*bn256.G1 {
|
||||||
return t.data
|
return t.data
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,31 +22,31 @@ func (t TableG1) GetData() []*bn256.G1 {
|
|||||||
// Table[3] = a[0]+a[1]
|
// Table[3] = a[0]+a[1]
|
||||||
// .....
|
// .....
|
||||||
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
||||||
func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int, toaffine bool) {
|
func (t *tableG1) newTableG1(a []*bn256.G1, gsize int, toaffine bool) {
|
||||||
// EC table
|
// EC table
|
||||||
table := make([]*bn256.G1, 0)
|
table := make([]*bn256.G1, 0)
|
||||||
|
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
a_ext := make([]*bn256.G1, 0)
|
aExt := make([]*bn256.G1, 0)
|
||||||
a_ext = append(a_ext, a...)
|
aExt = append(aExt, a...)
|
||||||
|
|
||||||
for i := len(a); i < gsize; i++ {
|
for i := len(a); i < gsize; i++ {
|
||||||
a_ext = append(a_ext, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
|
aExt = append(aExt, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
||||||
table = append(table, elG1)
|
table = append(table, elG1)
|
||||||
last_pow2 := 1
|
lastPow2 := 1
|
||||||
nelems := 0
|
nelems := 0
|
||||||
for i := 1; i < 1<<gsize; i++ {
|
for i := 1; i < 1<<gsize; i++ {
|
||||||
elG1 := new(bn256.G1)
|
elG1 := new(bn256.G1)
|
||||||
// if power of 2
|
// if power of 2
|
||||||
if i&(i-1) == 0 {
|
if i&(i-1) == 0 {
|
||||||
last_pow2 = i
|
lastPow2 = i
|
||||||
elG1.Set(a_ext[nelems])
|
elG1.Set(aExt[nelems])
|
||||||
nelems++
|
nelems++
|
||||||
} else {
|
} else {
|
||||||
elG1.Add(table[last_pow2], table[i-last_pow2])
|
elG1.Add(table[lastPow2], table[i-lastPow2])
|
||||||
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
||||||
//table[i].MakeAffine()
|
//table[i].MakeAffine()
|
||||||
}
|
}
|
||||||
@@ -60,7 +61,7 @@ func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int, toaffine bool) {
|
|||||||
t.data = table
|
t.data = table
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t TableG1) Marshal() []byte {
|
func (t tableG1) Marshal() []byte {
|
||||||
info := make([]byte, 0)
|
info := make([]byte, 0)
|
||||||
for _, el := range t.data {
|
for _, el := range t.data {
|
||||||
info = append(info, el.Marshal()...)
|
info = append(info, el.Marshal()...)
|
||||||
@@ -70,43 +71,42 @@ func (t TableG1) Marshal() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Multiply scalar by precomputed table of G1 elements
|
// Multiply scalar by precomputed table of G1 elements
|
||||||
func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
func (t *tableG1) mulTableG1(k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
k_ext := make([]*big.Int, 0)
|
kExt := make([]*big.Int, 0)
|
||||||
k_ext = append(k_ext, k...)
|
kExt = append(kExt, k...)
|
||||||
|
|
||||||
for i := len(k); i < gsize; i++ {
|
for i := len(k); i < gsize; i++ {
|
||||||
k_ext = append(k_ext, new(big.Int).SetUint64(0))
|
kExt = append(kExt, new(big.Int).SetUint64(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
||||||
|
|
||||||
msb := getMsb(k_ext)
|
msb := getMsb(kExt)
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
Q = new(bn256.G1).Add(Q, Q)
|
Q = new(bn256.G1).Add(Q, Q)
|
||||||
b := getBit(k_ext, i)
|
b := getBit(kExt, i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q.Add(Q, t.data[b])
|
Q.Add(Q, t.data[b])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return Q.Add(Q, Q_prev)
|
return Q.Add(Q, qPrev)
|
||||||
} else {
|
|
||||||
return Q
|
|
||||||
}
|
}
|
||||||
|
return Q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
||||||
func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
min_nelems := len(t) * gsize
|
minNElems := len(t) * gsize
|
||||||
k_ext := make([]*big.Int, 0)
|
kExt := make([]*big.Int, 0)
|
||||||
k_ext = append(k_ext, k...)
|
kExt = append(kExt, k...)
|
||||||
for i := len(k); i < min_nelems; i++ {
|
for i := len(k); i < minNElems; i++ {
|
||||||
k_ext = append(k_ext, new(big.Int).SetUint64(0))
|
kExt = append(kExt, new(big.Int).SetUint64(0))
|
||||||
}
|
}
|
||||||
// Init Adders
|
// Init Adders
|
||||||
nbitsQ := cryptoConstants.Q.BitLen()
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
@@ -118,10 +118,10 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int)
|
|||||||
|
|
||||||
// Perform bitwise addition
|
// Perform bitwise addition
|
||||||
for j := 0; j < len(t); j++ {
|
for j := 0; j < len(t); j++ {
|
||||||
msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q[i].Add(Q[i], t[j].data[b])
|
Q[i].Add(Q[i], t[j].data[b])
|
||||||
@@ -137,45 +137,43 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int)
|
|||||||
R.Add(R, Q[i-1])
|
R.Add(R, Q[i-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return R.Add(R, Q_prev)
|
return R.Add(R, qPrev)
|
||||||
} else {
|
|
||||||
return R
|
|
||||||
}
|
}
|
||||||
|
return R
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute tables within function. This solution should still be faster than std multiplication
|
// Compute tables within function. This solution should still be faster than std multiplication
|
||||||
// for gsize = 7
|
// for gsize = 7
|
||||||
func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
ntables := int((len(a) + gsize - 1) / gsize)
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
table := TableG1{}
|
table := tableG1{}
|
||||||
Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
|
Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
|
||||||
|
|
||||||
for i := 0; i < ntables-1; i++ {
|
for i := 0; i < ntables-1; i++ {
|
||||||
table.NewTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
|
table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
|
||||||
Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
|
Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
|
||||||
}
|
}
|
||||||
table.NewTableG1(a[(ntables-1)*gsize:], gsize, false)
|
table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
|
||||||
Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize)
|
Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize)
|
||||||
|
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return Q.Add(Q, Q_prev)
|
return Q.Add(Q, qPrev)
|
||||||
} else {
|
|
||||||
return Q
|
|
||||||
}
|
}
|
||||||
|
return Q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
||||||
func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
ntables := int((len(a) + gsize - 1) / gsize)
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
table := TableG1{}
|
table := tableG1{}
|
||||||
|
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
min_nelems := ntables * gsize
|
minNElems := ntables * gsize
|
||||||
k_ext := make([]*big.Int, 0)
|
kExt := make([]*big.Int, 0)
|
||||||
k_ext = append(k_ext, k...)
|
kExt = append(kExt, k...)
|
||||||
for i := len(k); i < min_nelems; i++ {
|
for i := len(k); i < minNElems; i++ {
|
||||||
k_ext = append(k_ext, new(big.Int).SetUint64(0))
|
kExt = append(kExt, new(big.Int).SetUint64(0))
|
||||||
}
|
}
|
||||||
// Init Adders
|
// Init Adders
|
||||||
nbitsQ := cryptoConstants.Q.BitLen()
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
@@ -187,22 +185,22 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
|
|||||||
|
|
||||||
// Perform bitwise addition
|
// Perform bitwise addition
|
||||||
for j := 0; j < ntables-1; j++ {
|
for j := 0; j < ntables-1; j++ {
|
||||||
table.NewTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
|
table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
|
||||||
msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q[i].Add(Q[i], table.data[b])
|
Q[i].Add(Q[i], table.data[b])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
table.NewTableG1(a[(ntables-1)*gsize:], gsize, false)
|
table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
|
||||||
msb := getMsb(k_ext[(ntables-1)*gsize:])
|
msb := getMsb(kExt[(ntables-1)*gsize:])
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
b := getBit(k_ext[(ntables-1)*gsize:], i)
|
b := getBit(kExt[(ntables-1)*gsize:], i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q[i].Add(Q[i], table.data[b])
|
Q[i].Add(Q[i], table.data[b])
|
||||||
@@ -216,11 +214,10 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
|
|||||||
R = new(bn256.G1).Add(R, R)
|
R = new(bn256.G1).Add(R, R)
|
||||||
R.Add(R, Q[i-1])
|
R.Add(R, Q[i-1])
|
||||||
}
|
}
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return R.Add(R, Q_prev)
|
return R.Add(R, qPrev)
|
||||||
} else {
|
|
||||||
return R
|
|
||||||
}
|
}
|
||||||
|
return R
|
||||||
}
|
}
|
||||||
|
|
||||||
/////
|
/////
|
||||||
@@ -228,11 +225,11 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
|
|||||||
// TODO - How can avoid replicating code in G2?
|
// TODO - How can avoid replicating code in G2?
|
||||||
//G2
|
//G2
|
||||||
|
|
||||||
type TableG2 struct {
|
type tableG2 struct {
|
||||||
data []*bn256.G2
|
data []*bn256.G2
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t TableG2) GetData() []*bn256.G2 {
|
func (t tableG2) getData() []*bn256.G2 {
|
||||||
return t.data
|
return t.data
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,31 +241,31 @@ func (t TableG2) GetData() []*bn256.G2 {
|
|||||||
// .....
|
// .....
|
||||||
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
||||||
// TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
|
// TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
|
||||||
func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) {
|
func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) {
|
||||||
// EC table
|
// EC table
|
||||||
table := make([]*bn256.G2, 0)
|
table := make([]*bn256.G2, 0)
|
||||||
|
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
a_ext := make([]*bn256.G2, 0)
|
aExt := make([]*bn256.G2, 0)
|
||||||
a_ext = append(a_ext, a...)
|
aExt = append(aExt, a...)
|
||||||
|
|
||||||
for i := len(a); i < gsize; i++ {
|
for i := len(a); i < gsize; i++ {
|
||||||
a_ext = append(a_ext, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
|
aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
||||||
table = append(table, elG2)
|
table = append(table, elG2)
|
||||||
last_pow2 := 1
|
lastPow2 := 1
|
||||||
nelems := 0
|
nelems := 0
|
||||||
for i := 1; i < 1<<gsize; i++ {
|
for i := 1; i < 1<<gsize; i++ {
|
||||||
elG2 := new(bn256.G2)
|
elG2 := new(bn256.G2)
|
||||||
// if power of 2
|
// if power of 2
|
||||||
if i&(i-1) == 0 {
|
if i&(i-1) == 0 {
|
||||||
last_pow2 = i
|
lastPow2 = i
|
||||||
elG2.Set(a_ext[nelems])
|
elG2.Set(aExt[nelems])
|
||||||
nelems++
|
nelems++
|
||||||
} else {
|
} else {
|
||||||
elG2.Add(table[last_pow2], table[i-last_pow2])
|
elG2.Add(table[lastPow2], table[i-lastPow2])
|
||||||
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
||||||
//table[i].MakeAffine()
|
//table[i].MakeAffine()
|
||||||
}
|
}
|
||||||
@@ -283,7 +280,7 @@ func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) {
|
|||||||
t.data = table
|
t.data = table
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t TableG2) Marshal() []byte {
|
func (t tableG2) Marshal() []byte {
|
||||||
info := make([]byte, 0)
|
info := make([]byte, 0)
|
||||||
for _, el := range t.data {
|
for _, el := range t.data {
|
||||||
info = append(info, el.Marshal()...)
|
info = append(info, el.Marshal()...)
|
||||||
@@ -293,43 +290,42 @@ func (t TableG2) Marshal() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Multiply scalar by precomputed table of G2 elements
|
// Multiply scalar by precomputed table of G2 elements
|
||||||
func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
func (t *tableG2) mulTableG2(k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
k_ext := make([]*big.Int, 0)
|
kExt := make([]*big.Int, 0)
|
||||||
k_ext = append(k_ext, k...)
|
kExt = append(kExt, k...)
|
||||||
|
|
||||||
for i := len(k); i < gsize; i++ {
|
for i := len(k); i < gsize; i++ {
|
||||||
k_ext = append(k_ext, new(big.Int).SetUint64(0))
|
kExt = append(kExt, new(big.Int).SetUint64(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
||||||
|
|
||||||
msb := getMsb(k_ext)
|
msb := getMsb(kExt)
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
Q = new(bn256.G2).Add(Q, Q)
|
Q = new(bn256.G2).Add(Q, Q)
|
||||||
b := getBit(k_ext, i)
|
b := getBit(kExt, i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q.Add(Q, t.data[b])
|
Q.Add(Q, t.data[b])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return Q.Add(Q, Q_prev)
|
return Q.Add(Q, qPrev)
|
||||||
} else {
|
|
||||||
return Q
|
|
||||||
}
|
}
|
||||||
|
return Q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
||||||
func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
min_nelems := len(t) * gsize
|
minNElems := len(t) * gsize
|
||||||
k_ext := make([]*big.Int, 0)
|
kExt := make([]*big.Int, 0)
|
||||||
k_ext = append(k_ext, k...)
|
kExt = append(kExt, k...)
|
||||||
for i := len(k); i < min_nelems; i++ {
|
for i := len(k); i < minNElems; i++ {
|
||||||
k_ext = append(k_ext, new(big.Int).SetUint64(0))
|
kExt = append(kExt, new(big.Int).SetUint64(0))
|
||||||
}
|
}
|
||||||
// Init Adders
|
// Init Adders
|
||||||
nbitsQ := cryptoConstants.Q.BitLen()
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
@@ -341,10 +337,10 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int)
|
|||||||
|
|
||||||
// Perform bitwise addition
|
// Perform bitwise addition
|
||||||
for j := 0; j < len(t); j++ {
|
for j := 0; j < len(t); j++ {
|
||||||
msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q[i].Add(Q[i], t[j].data[b])
|
Q[i].Add(Q[i], t[j].data[b])
|
||||||
@@ -359,45 +355,43 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int)
|
|||||||
R = new(bn256.G2).Add(R, R)
|
R = new(bn256.G2).Add(R, R)
|
||||||
R.Add(R, Q[i-1])
|
R.Add(R, Q[i-1])
|
||||||
}
|
}
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return R.Add(R, Q_prev)
|
return R.Add(R, qPrev)
|
||||||
} else {
|
|
||||||
return R
|
|
||||||
}
|
}
|
||||||
|
return R
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute tables within function. This solution should still be faster than std multiplication
|
// Compute tables within function. This solution should still be faster than std multiplication
|
||||||
// for gsize = 7
|
// for gsize = 7
|
||||||
func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
ntables := int((len(a) + gsize - 1) / gsize)
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
table := TableG2{}
|
table := tableG2{}
|
||||||
Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
|
Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
|
||||||
|
|
||||||
for i := 0; i < ntables-1; i++ {
|
for i := 0; i < ntables-1; i++ {
|
||||||
table.NewTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
|
table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
|
||||||
Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
|
Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
|
||||||
}
|
}
|
||||||
table.NewTableG2(a[(ntables-1)*gsize:], gsize, false)
|
table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
|
||||||
Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize)
|
Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize)
|
||||||
|
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return Q.Add(Q, Q_prev)
|
return Q.Add(Q, qPrev)
|
||||||
} else {
|
|
||||||
return Q
|
|
||||||
}
|
}
|
||||||
|
return Q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
||||||
func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
ntables := int((len(a) + gsize - 1) / gsize)
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
table := TableG2{}
|
table := tableG2{}
|
||||||
|
|
||||||
// We need at least gsize elements. If not enough, fill with 0
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
min_nelems := ntables * gsize
|
minNElems := ntables * gsize
|
||||||
k_ext := make([]*big.Int, 0)
|
kExt := make([]*big.Int, 0)
|
||||||
k_ext = append(k_ext, k...)
|
kExt = append(kExt, k...)
|
||||||
for i := len(k); i < min_nelems; i++ {
|
for i := len(k); i < minNElems; i++ {
|
||||||
k_ext = append(k_ext, new(big.Int).SetUint64(0))
|
kExt = append(kExt, new(big.Int).SetUint64(0))
|
||||||
}
|
}
|
||||||
// Init Adders
|
// Init Adders
|
||||||
nbitsQ := cryptoConstants.Q.BitLen()
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
@@ -409,22 +403,22 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i
|
|||||||
|
|
||||||
// Perform bitwise addition
|
// Perform bitwise addition
|
||||||
for j := 0; j < ntables-1; j++ {
|
for j := 0; j < ntables-1; j++ {
|
||||||
table.NewTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
|
table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
|
||||||
msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q[i].Add(Q[i], table.data[b])
|
Q[i].Add(Q[i], table.data[b])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
table.NewTableG2(a[(ntables-1)*gsize:], gsize, false)
|
table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
|
||||||
msb := getMsb(k_ext[(ntables-1)*gsize:])
|
msb := getMsb(kExt[(ntables-1)*gsize:])
|
||||||
|
|
||||||
for i := msb - 1; i >= 0; i-- {
|
for i := msb - 1; i >= 0; i-- {
|
||||||
b := getBit(k_ext[(ntables-1)*gsize:], i)
|
b := getBit(kExt[(ntables-1)*gsize:], i)
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
Q[i].Add(Q[i], table.data[b])
|
Q[i].Add(Q[i], table.data[b])
|
||||||
@@ -438,11 +432,10 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i
|
|||||||
R = new(bn256.G2).Add(R, R)
|
R = new(bn256.G2).Add(R, R)
|
||||||
R.Add(R, Q[i-1])
|
R.Add(R, Q[i-1])
|
||||||
}
|
}
|
||||||
if Q_prev != nil {
|
if qPrev != nil {
|
||||||
return R.Add(R, Q_prev)
|
return R.Add(R, qPrev)
|
||||||
} else {
|
|
||||||
return R
|
|
||||||
}
|
}
|
||||||
|
return R
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return most significant bit position in a group of Big Integers
|
// Return most significant bit position in a group of Big Integers
|
||||||
@@ -450,9 +443,9 @@ func getMsb(k []*big.Int) int {
|
|||||||
msb := 0
|
msb := 0
|
||||||
|
|
||||||
for _, el := range k {
|
for _, el := range k {
|
||||||
tmp_msb := el.BitLen()
|
tmpMsb := el.BitLen()
|
||||||
if tmp_msb > msb {
|
if tmpMsb > msb {
|
||||||
msb = tmp_msb
|
msb = tmpMsb
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return msb
|
return msb
|
||||||
@@ -460,11 +453,11 @@ func getMsb(k []*big.Int) int {
|
|||||||
|
|
||||||
// Return ith bit in group of Big Integers
|
// Return ith bit in group of Big Integers
|
||||||
func getBit(k []*big.Int, i int) uint {
|
func getBit(k []*big.Int, i int) uint {
|
||||||
table_idx := uint(0)
|
tableIdx := uint(0)
|
||||||
|
|
||||||
for idx, el := range k {
|
for idx, el := range k {
|
||||||
b := el.Bit(i)
|
b := el.Bit(i)
|
||||||
table_idx += (b << idx)
|
tableIdx += (b << idx)
|
||||||
}
|
}
|
||||||
return table_idx
|
return tableIdx
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,10 +4,11 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -60,31 +61,31 @@ func TestTableG1(t *testing.T) {
|
|||||||
|
|
||||||
for gsize := 2; gsize < 10; gsize++ {
|
for gsize := 2; gsize < 10; gsize++ {
|
||||||
ntables := int((n + gsize - 1) / gsize)
|
ntables := int((n + gsize - 1) / gsize)
|
||||||
table := make([]TableG1, ntables)
|
table := make([]tableG1, ntables)
|
||||||
|
|
||||||
for i := 0; i < ntables-1; i++ {
|
for i := 0; i < ntables-1; i++ {
|
||||||
table[i].NewTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true)
|
table[i].newTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true)
|
||||||
}
|
}
|
||||||
table[ntables-1].NewTableG1(arrayG1[(ntables-1)*gsize:], gsize, true)
|
table[ntables-1].newTableG1(arrayG1[(ntables-1)*gsize:], gsize, true)
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int))
|
Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int))
|
||||||
for i := 0; i < ntables-1; i++ {
|
for i := 0; i < ntables-1; i++ {
|
||||||
Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
|
Q2 = table[i].mulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
|
||||||
}
|
}
|
||||||
Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
|
Q2 = table[ntables-1].mulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize)
|
Q3 := scalarMultG1(arrayG1, arrayW, nil, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize)
|
Q4 := mulTableNoDoubleG1(table, arrayW, nil, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
|
Q5 := scalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
|
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
|
||||||
@@ -119,31 +120,31 @@ func TestTableG2(t *testing.T) {
|
|||||||
|
|
||||||
for gsize := 2; gsize < 10; gsize++ {
|
for gsize := 2; gsize < 10; gsize++ {
|
||||||
ntables := int((n + gsize - 1) / gsize)
|
ntables := int((n + gsize - 1) / gsize)
|
||||||
table := make([]TableG2, ntables)
|
table := make([]tableG2, ntables)
|
||||||
|
|
||||||
for i := 0; i < ntables-1; i++ {
|
for i := 0; i < ntables-1; i++ {
|
||||||
table[i].NewTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false)
|
table[i].newTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false)
|
||||||
}
|
}
|
||||||
table[ntables-1].NewTableG2(arrayG2[(ntables-1)*gsize:], gsize, false)
|
table[ntables-1].newTableG2(arrayG2[(ntables-1)*gsize:], gsize, false)
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int))
|
Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int))
|
||||||
for i := 0; i < ntables-1; i++ {
|
for i := 0; i < ntables-1; i++ {
|
||||||
Q2 = table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
|
Q2 = table[i].mulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
|
||||||
}
|
}
|
||||||
Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
|
Q2 = table[ntables-1].mulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize)
|
Q3 := scalarMultG2(arrayG2, arrayW, nil, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize)
|
Q4 := mulTableNoDoubleG2(table, arrayW, nil, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
beforeT = time.Now()
|
beforeT = time.Now()
|
||||||
Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
|
Q5 := scalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
|
||||||
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
|
||||||
|
|
||||||
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
|
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
"github.com/iden3/go-circom-prover-verifier/types"
|
"github.com/iden3/go-circom-prover-verifier/types"
|
||||||
"github.com/iden3/go-iden3-crypto/utils"
|
"github.com/iden3/go-iden3-crypto/utils"
|
||||||
//"fmt"
|
//"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proof is the data structure of the Groth16 zkSNARK proof
|
// Proof is the data structure of the Groth16 zkSNARK proof
|
||||||
@@ -45,7 +45,7 @@ type Witness []*big.Int
|
|||||||
|
|
||||||
// Group Size
|
// Group Size
|
||||||
const (
|
const (
|
||||||
GSIZE = 6
|
GSIZE = 6
|
||||||
)
|
)
|
||||||
|
|
||||||
func randBigInt() (*big.Int, error) {
|
func randBigInt() (*big.Int, error) {
|
||||||
@@ -81,34 +81,34 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
|
|||||||
proofB := arrayOfZeroesG2(numcpu)
|
proofB := arrayOfZeroesG2(numcpu)
|
||||||
proofC := arrayOfZeroesG1(numcpu)
|
proofC := arrayOfZeroesG1(numcpu)
|
||||||
proofBG1 := arrayOfZeroesG1(numcpu)
|
proofBG1 := arrayOfZeroesG1(numcpu)
|
||||||
gsize := GSIZE
|
gsize := GSIZE
|
||||||
var wg1 sync.WaitGroup
|
var wg1 sync.WaitGroup
|
||||||
wg1.Add(numcpu)
|
wg1.Add(numcpu)
|
||||||
for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
|
for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
|
||||||
// split 1
|
// split 1
|
||||||
go func(cpu int, ranges [2]int) {
|
go func(cpu int, ranges [2]int) {
|
||||||
proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
|
proofA[cpu] = scalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
|
||||||
w[ranges[0]:ranges[1]],
|
w[ranges[0]:ranges[1]],
|
||||||
proofA[cpu],
|
proofA[cpu],
|
||||||
gsize)
|
gsize)
|
||||||
proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
|
proofB[cpu] = scalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
|
||||||
w[ranges[0]:ranges[1]],
|
w[ranges[0]:ranges[1]],
|
||||||
proofB[cpu],
|
proofB[cpu],
|
||||||
gsize)
|
gsize)
|
||||||
proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
|
proofBG1[cpu] = scalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
|
||||||
w[ranges[0]:ranges[1]],
|
w[ranges[0]:ranges[1]],
|
||||||
proofBG1[cpu],
|
proofBG1[cpu],
|
||||||
gsize)
|
gsize)
|
||||||
min_lim := pk.NPublic+1
|
minLim := pk.NPublic + 1
|
||||||
if ranges[0] > pk.NPublic+1 {
|
if ranges[0] > pk.NPublic+1 {
|
||||||
min_lim = ranges[0]
|
minLim = ranges[0]
|
||||||
}
|
}
|
||||||
if ranges[1] > pk.NPublic + 1 {
|
if ranges[1] > pk.NPublic+1 {
|
||||||
proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]],
|
proofC[cpu] = scalarMultNoDoubleG1(pk.C[minLim:ranges[1]],
|
||||||
w[min_lim:ranges[1]],
|
w[minLim:ranges[1]],
|
||||||
proofC[cpu],
|
proofC[cpu],
|
||||||
gsize)
|
gsize)
|
||||||
}
|
}
|
||||||
wg1.Done()
|
wg1.Done()
|
||||||
}(_cpu, _ranges)
|
}(_cpu, _ranges)
|
||||||
}
|
}
|
||||||
@@ -142,10 +142,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
|
|||||||
for _cpu, _ranges := range ranges(len(h), numcpu) {
|
for _cpu, _ranges := range ranges(len(h), numcpu) {
|
||||||
// split 2
|
// split 2
|
||||||
go func(cpu int, ranges [2]int) {
|
go func(cpu int, ranges [2]int) {
|
||||||
proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
|
proofC[cpu] = scalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
|
||||||
h[ranges[0]:ranges[1]],
|
h[ranges[0]:ranges[1]],
|
||||||
proofC[cpu],
|
proofC[cpu],
|
||||||
gsize)
|
gsize)
|
||||||
wg2.Done()
|
wg2.Done()
|
||||||
}(_cpu, _ranges)
|
}(_cpu, _ranges)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ import (
|
|||||||
func TestCircuitsGenerateProof(t *testing.T) {
|
func TestCircuitsGenerateProof(t *testing.T) {
|
||||||
testCircuitGenerateProof(t, "circuit1k") // 1000 constraints
|
testCircuitGenerateProof(t, "circuit1k") // 1000 constraints
|
||||||
testCircuitGenerateProof(t, "circuit5k") // 5000 constraints
|
testCircuitGenerateProof(t, "circuit5k") // 5000 constraints
|
||||||
//testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
|
//testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
|
||||||
//testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
|
//testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCircuitGenerateProof(t *testing.T, circuit string) {
|
func testCircuitGenerateProof(t *testing.T, circuit string) {
|
||||||
|
|||||||
Reference in New Issue
Block a user