5 Commits

Author SHA1 Message Date
arnaucube
5c2aaec1ca Add ProvingKey binary parser
The time on the parsing doesn't improve, as the data from the binary
file needs to be converted to `affine` representation, and then parsed
into the `bn256.G1` & `bn256.G2` formats (Montgomery). But the size of
the ProvingKey files in binary is much smaller, so will be better
handled by the smarthpones.

- Parse time benchmarks:
```
BenchmarkParsePk/Parse_Pk_bin_circuit1k-4         	       2	 563729994 ns/op
BenchmarkParsePk/Parse_Pk_json_circuit1k-4        	  635641	      1941 ns/op
BenchmarkParsePk/Parse_Pk_bin_circuit5k-4         	       1	2637941658 ns/op
BenchmarkParsePk/Parse_Pk_json_circuit5k-4        	       1	2986560185 ns/op
BenchmarkParsePk/Parse_Pk_bin_circuit10k-4        	       1	5337639150 ns/op
BenchmarkParsePk/Parse_Pk_json_circuit10k-4       	       1	6149568824 ns/op
BenchmarkParsePk/Parse_Pk_bin_circuit20k-4        	       1	10533654623 ns/op
BenchmarkParsePk/Parse_Pk_json_circuit20k-4       	       1	11657326851 ns/op
```

- Size of ProvingKey file for a circuit of 50k constraints:
```
circuit 20k constraints:
10097812 bytes of proving_key.bin
29760049 bytes of proving_key.json

circuit 50k constraints:
24194964 bytes of proving_key.bin
71484081 bytes of proving_key.json
```
2020-05-20 12:25:27 +02:00
Eduard S
1aa316cbd2 Merge pull request #11 from iden3/feature/proof-parsers
Add proof parsers to string (decimal & hex)
2020-05-07 12:15:08 +02:00
arnaucube
0f48cfa2a5 Add proof parsers to string (decimal & hex)
Also adds ProofToSmartContractFormat, which returns a ProofString as the
proof.B elements swap is not a valid point for the bn256.G2 format.

Also unexports internal structs and methods of the prover package.
Also apply golint.
2020-05-06 14:45:44 +02:00
Eduard S
6ec118d4e2 Merge pull request #10 from iden3/feature/tables2
Add G1/G2 table calculation functionality
2020-05-06 11:43:06 +02:00
druiz0992
423d5f0ce7 Add G1/G2 table calculation functionality 2020-05-06 09:27:15 +02:00
9 changed files with 1057 additions and 527 deletions

View File

@@ -3,6 +3,7 @@ package parsers
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
@@ -467,8 +468,35 @@ func stringToG2(h [][]string) (*bn256.G2, error) {
return p, err return p, err
} }
// ProofToJson outputs the Proof i Json format // ProofStringToSmartContractFormat converts the ProofString to a ProofString in the SmartContract format in a ProofString structure
func ProofToJson(p *types.Proof) ([]byte, error) { func ProofStringToSmartContractFormat(s ProofString) ProofString {
var rs ProofString
rs.A = make([]string, 2)
rs.B = make([][]string, 2)
rs.B[0] = make([]string, 2)
rs.B[1] = make([]string, 2)
rs.C = make([]string, 2)
rs.A[0] = s.A[0]
rs.A[1] = s.A[1]
rs.B[0][0] = s.B[0][1]
rs.B[0][1] = s.B[0][0]
rs.B[1][0] = s.B[1][1]
rs.B[1][1] = s.B[1][0]
rs.C[0] = s.C[0]
rs.C[1] = s.C[1]
rs.Protocol = s.Protocol
return rs
}
// ProofToSmartContractFormat converts the *types.Proof to a ProofString in the SmartContract format in a ProofString structure
func ProofToSmartContractFormat(p *types.Proof) ProofString {
s := ProofToString(p)
return ProofStringToSmartContractFormat(s)
}
// ProofToString converts the Proof to ProofString
func ProofToString(p *types.Proof) ProofString {
var ps ProofString var ps ProofString
ps.A = make([]string, 3) ps.A = make([]string, 3)
ps.B = make([][]string, 3) ps.B = make([][]string, 3)
@@ -497,10 +525,55 @@ func ProofToJson(p *types.Proof) ([]byte, error) {
ps.Protocol = "groth" ps.Protocol = "groth"
return ps
}
// ProofToJson outputs the Proof i Json format
func ProofToJson(p *types.Proof) ([]byte, error) {
ps := ProofToString(p)
return json.Marshal(ps) return json.Marshal(ps)
} }
// ParseWitness parses binary file representation of the Witness into the Witness struct // ProofToHex converts the Proof to ProofString with hexadecimal strings
func ProofToHex(p *types.Proof) ProofString {
var ps ProofString
ps.A = make([]string, 3)
ps.B = make([][]string, 3)
ps.B[0] = make([]string, 2)
ps.B[1] = make([]string, 2)
ps.B[2] = make([]string, 2)
ps.C = make([]string, 3)
a := p.A.Marshal()
ps.A[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[:32]).Bytes())
ps.A[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[32:64]).Bytes())
ps.A[2] = "1"
b := p.B.Marshal()
ps.B[0][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[:32]).Bytes())
ps.B[0][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[32:64]).Bytes())
ps.B[1][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[64:96]).Bytes())
ps.B[1][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[96:128]).Bytes())
ps.B[2][0] = "1"
ps.B[2][1] = "0"
c := p.C.Marshal()
ps.C[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[:32]).Bytes())
ps.C[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[32:64]).Bytes())
ps.C[2] = "1"
ps.Protocol = "groth"
return ps
}
// ProofToJsonHex outputs the Proof i Json format with hexadecimal strings
func ProofToJsonHex(p *types.Proof) ([]byte, error) {
ps := ProofToHex(p)
return json.Marshal(ps)
}
// ParseWitnessBin parses binary file representation of the Witness into the Witness struct
func ParseWitnessBin(f *os.File) (types.Witness, error) { func ParseWitnessBin(f *os.File) (types.Witness, error) {
var w types.Witness var w types.Witness
r := bufio.NewReader(f) r := bufio.NewReader(f)
@@ -527,3 +600,368 @@ func swapEndianness(b []byte) []byte {
} }
return o return o
} }
func readNBytes(r io.Reader, n int) ([]byte, error) {
b := make([]byte, n)
_, err := io.ReadFull(r, b)
if err != nil {
return b, err
}
return b, nil
}
// ParsePkBin parses binary file representation of the ProvingKey into the ProvingKey struct
func ParsePkBin(f *os.File) (*types.Pk, error) {
o := 0
var pk types.Pk
r := bufio.NewReader(f)
b, err := readNBytes(r, 12)
if err != nil {
return nil, err
}
pk.NVars = int(binary.LittleEndian.Uint32(b[:4]))
pk.NPublic = int(binary.LittleEndian.Uint32(b[4:8]))
pk.DomainSize = int(binary.LittleEndian.Uint32(b[8:12]))
o += 12
b, err = readNBytes(r, 8)
if err != nil {
return nil, err
}
pPolsA := int(binary.LittleEndian.Uint32(b[:4]))
pPolsB := int(binary.LittleEndian.Uint32(b[4:8]))
o += 8
b, err = readNBytes(r, 20)
if err != nil {
return nil, err
}
pPointsA := int(binary.LittleEndian.Uint32(b[:4]))
pPointsB1 := int(binary.LittleEndian.Uint32(b[4:8]))
pPointsB2 := int(binary.LittleEndian.Uint32(b[8:12]))
pPointsC := int(binary.LittleEndian.Uint32(b[12:16]))
pPointsHExps := int(binary.LittleEndian.Uint32(b[16:20]))
o += 20
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkAlpha1 = new(bn256.G1)
_, err = pk.VkAlpha1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkBeta1 = new(bn256.G1)
_, err = pk.VkBeta1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkDelta1 = new(bn256.G1)
_, err = pk.VkDelta1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
pk.VkBeta2 = new(bn256.G2)
_, err = pk.VkBeta2.Unmarshal(fromMont2Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
pk.VkDelta2 = new(bn256.G2)
_, err = pk.VkDelta2.Unmarshal(fromMont2Q(b))
if err != nil {
return nil, err
}
o += 448
if o != pPolsA {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPolsA, o)
}
// PolsA
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 4)
if err != nil {
return nil, err
}
keysLength := int(binary.LittleEndian.Uint32(b[:4]))
o += 4
polsMap := make(map[int]*big.Int)
for j := 0; j < keysLength; j++ {
bK, err := readNBytes(r, 4)
if err != nil {
return nil, err
}
key := int(binary.LittleEndian.Uint32(bK[:4]))
o += 4
b, err := readNBytes(r, 32)
if err != nil {
return nil, err
}
polsMap[key] = new(big.Int).SetBytes(fromMont1R(b[:32]))
o += 32
}
pk.PolsA = append(pk.PolsA, polsMap)
}
if o != pPolsB {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPolsB, o)
}
// PolsB
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 4)
if err != nil {
return nil, err
}
keysLength := int(binary.LittleEndian.Uint32(b[:4]))
o += 4
polsMap := make(map[int]*big.Int)
for j := 0; j < keysLength; j++ {
bK, err := readNBytes(r, 4)
if err != nil {
return nil, err
}
key := int(binary.LittleEndian.Uint32(bK[:4]))
o += 4
b, err := readNBytes(r, 32)
if err != nil {
return nil, err
}
polsMap[key] = new(big.Int).SetBytes(fromMont1R(b[:32]))
o += 32
}
pk.PolsB = append(pk.PolsB, polsMap)
}
if o != pPointsA {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsA, o)
}
// A
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.A = append(pk.A, p1)
o += 64
}
if o != pPointsB1 {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsB1, o)
}
// B1
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.B1 = append(pk.B1, p1)
o += 64
}
if o != pPointsB2 {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsB2, o)
}
// B2
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
p2 := new(bn256.G2)
_, err = p2.Unmarshal(fromMont2Q(b))
if err != nil {
return nil, err
}
pk.B2 = append(pk.B2, p2)
o += 128
}
if o != pPointsC {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsC, o)
}
// C
zb := make([]byte, 64)
z := new(bn256.G1)
_, err = z.Unmarshal(zb)
if err != nil {
return nil, err
}
pk.C = append(pk.C, z) // circom behaviour (3x null==["0", "0", "0"])
pk.C = append(pk.C, z)
pk.C = append(pk.C, z)
for i := pk.NPublic + 1; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.C = append(pk.C, p1)
o += 64
}
if o != pPointsHExps {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsHExps, o)
}
for i := 0; i < pk.DomainSize; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.HExps = append(pk.HExps, p1)
}
return &pk, nil
}
func fromMont1Q(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
b := new(big.Int).SetBytes(swapEndianness(m[32:64]))
x := coordFromMont(a, types.Q)
y := coordFromMont(b, types.Q)
if bytes.Equal(x.Bytes(), big.NewInt(1).Bytes()) {
x = big.NewInt(0)
}
if bytes.Equal(y.Bytes(), big.NewInt(1).Bytes()) {
y = big.NewInt(0)
}
xBytes := x.Bytes()
yBytes := y.Bytes()
if len(xBytes) != 32 {
xBytes = addZPadding(xBytes)
}
if len(yBytes) != 32 {
yBytes = addZPadding(yBytes)
}
var p []byte
p = append(p, xBytes...)
p = append(p, yBytes...)
return p
}
func fromMont2Q(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
b := new(big.Int).SetBytes(swapEndianness(m[32:64]))
c := new(big.Int).SetBytes(swapEndianness(m[64:96]))
d := new(big.Int).SetBytes(swapEndianness(m[96:128]))
x := coordFromMont(a, types.Q)
y := coordFromMont(b, types.Q)
z := coordFromMont(c, types.Q)
t := coordFromMont(d, types.Q)
if bytes.Equal(x.Bytes(), big.NewInt(1).Bytes()) {
x = big.NewInt(0)
}
if bytes.Equal(y.Bytes(), big.NewInt(1).Bytes()) {
y = big.NewInt(0)
}
if bytes.Equal(z.Bytes(), big.NewInt(1).Bytes()) {
z = big.NewInt(0)
}
if bytes.Equal(t.Bytes(), big.NewInt(1).Bytes()) {
t = big.NewInt(0)
}
xBytes := x.Bytes()
yBytes := y.Bytes()
zBytes := z.Bytes()
tBytes := t.Bytes()
if len(xBytes) != 32 {
xBytes = addZPadding(xBytes)
}
if len(yBytes) != 32 {
yBytes = addZPadding(yBytes)
}
if len(zBytes) != 32 {
zBytes = addZPadding(zBytes)
}
if len(tBytes) != 32 {
tBytes = addZPadding(tBytes)
}
var p []byte
p = append(p, yBytes...) // swap
p = append(p, xBytes...)
p = append(p, tBytes...)
p = append(p, zBytes...)
return p
}
func fromMont1R(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
x := coordFromMont(a, types.R)
return x.Bytes()
}
func fromMont2R(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
b := new(big.Int).SetBytes(swapEndianness(m[32:64]))
c := new(big.Int).SetBytes(swapEndianness(m[64:96]))
d := new(big.Int).SetBytes(swapEndianness(m[96:128]))
x := coordFromMont(a, types.R)
y := coordFromMont(b, types.R)
z := coordFromMont(c, types.R)
t := coordFromMont(d, types.R)
var p []byte
p = append(p, y.Bytes()...) // swap
p = append(p, x.Bytes()...)
p = append(p, t.Bytes()...)
p = append(p, z.Bytes()...)
return p
}
func coordFromMont(u, q *big.Int) *big.Int {
return new(big.Int).Mod(
new(big.Int).Mul(
u,
new(big.Int).ModInverse(
new(big.Int).Lsh(big.NewInt(1), 256),
q,
),
),
q,
)
}

View File

@@ -172,3 +172,87 @@ func TestParseWitnessBin(t *testing.T) {
testCircuitParseWitnessBin(t, "circuit1k") testCircuitParseWitnessBin(t, "circuit1k")
testCircuitParseWitnessBin(t, "circuit5k") testCircuitParseWitnessBin(t, "circuit5k")
} }
func TestProofSmartContractFormat(t *testing.T) {
proofJson, err := ioutil.ReadFile("../testdata/circuit1k/proof.json")
require.Nil(t, err)
proof, err := ParseProof(proofJson)
require.Nil(t, err)
pS := ProofToString(proof)
pSC := ProofToSmartContractFormat(proof)
assert.Nil(t, err)
assert.Equal(t, pS.A[0], pSC.A[0])
assert.Equal(t, pS.A[1], pSC.A[1])
assert.Equal(t, pS.B[0][0], pSC.B[0][1])
assert.Equal(t, pS.B[0][1], pSC.B[0][0])
assert.Equal(t, pS.B[1][0], pSC.B[1][1])
assert.Equal(t, pS.B[1][1], pSC.B[1][0])
assert.Equal(t, pS.C[0], pSC.C[0])
assert.Equal(t, pS.C[1], pSC.C[1])
assert.Equal(t, pS.Protocol, pSC.Protocol)
pSC2 := ProofStringToSmartContractFormat(pS)
assert.Equal(t, pSC, pSC2)
}
func testCircuitParsePkBin(t *testing.T, circuit string) {
pkBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.bin")
require.Nil(t, err)
defer pkBinFile.Close()
pk, err := ParsePkBin(pkBinFile)
require.Nil(t, err)
pkJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
require.Nil(t, err)
pkJ, err := ParsePk(pkJson)
require.Nil(t, err)
assert.Equal(t, pkJ.NVars, pk.NVars)
assert.Equal(t, pkJ.NPublic, pk.NPublic)
assert.Equal(t, pkJ.DomainSize, pk.DomainSize)
assert.Equal(t, pkJ.VkAlpha1, pk.VkAlpha1)
assert.Equal(t, pkJ.VkBeta1, pk.VkBeta1)
assert.Equal(t, pkJ.VkDelta1, pk.VkDelta1)
assert.Equal(t, pkJ.VkDelta2, pk.VkDelta2)
assert.Equal(t, pkJ.PolsA, pk.PolsA)
assert.Equal(t, pkJ.PolsB, pk.PolsB)
assert.Equal(t, pkJ.A, pk.A)
assert.Equal(t, pkJ.B1, pk.B1)
assert.Equal(t, pkJ.B2, pk.B2)
assert.Equal(t, pkJ.C, pk.C)
assert.Equal(t, pkJ.HExps[:pkJ.DomainSize], pk.HExps[:pk.DomainSize]) // circom behaviour
}
func TestParsePkBin(t *testing.T) {
testCircuitParsePkBin(t, "circuit1k")
testCircuitParsePkBin(t, "circuit5k")
}
func benchmarkParsePk(b *testing.B, circuit string) {
pkJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
require.Nil(b, err)
pkBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.bin")
require.Nil(b, err)
defer pkBinFile.Close()
b.Run("Parse Pk bin "+circuit, func(b *testing.B) {
for i := 0; i < b.N; i++ {
ParsePk(pkJson)
}
})
b.Run("Parse Pk json "+circuit, func(b *testing.B) {
for i := 0; i < b.N; i++ {
ParsePkBin(pkBinFile)
}
})
}
func BenchmarkParsePk(b *testing.B) {
benchmarkParsePk(b, "circuit1k")
benchmarkParsePk(b, "circuit5k")
// benchmarkParsePk(b, "circuit10k")
// benchmarkParsePk(b, "circuit20k")
}

View File

@@ -1,21 +1,20 @@
package prover package prover
import ( import (
"math/big" "math/big"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
cryptoConstants "github.com/iden3/go-iden3-crypto/constants" cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
) )
type TableG1 struct{ type tableG1 struct {
data []*bn256.G1 data []*bn256.G1
} }
func (t tableG1) getData() []*bn256.G1 {
func (t TableG1) GetData() []*bn256.G1 { return t.data
return t.data
} }
// Compute table of gsize elements as :: // Compute table of gsize elements as ::
// Table[0] = Inf // Table[0] = Inf
// Table[1] = a[0] // Table[1] = a[0]
@@ -23,191 +22,202 @@ func (t TableG1) GetData() []*bn256.G1 {
// Table[3] = a[0]+a[1] // Table[3] = a[0]+a[1]
// ..... // .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1] // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int){ func (t *tableG1) newTableG1(a []*bn256.G1, gsize int, toaffine bool) {
// EC table // EC table
table := make([]*bn256.G1, 0) table := make([]*bn256.G1, 0)
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
a_ext := make([]*bn256.G1, 0) aExt := make([]*bn256.G1, 0)
a_ext = append(a_ext, a...) aExt = append(aExt, a...)
for i:=len(a); i<gsize; i++ { for i := len(a); i < gsize; i++ {
a_ext = append(a_ext,new(bn256.G1).ScalarBaseMult(big.NewInt(0))) aExt = append(aExt, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
} }
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0)) elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
table = append(table,elG1) table = append(table, elG1)
last_pow2 := 1 lastPow2 := 1
nelems := 0 nelems := 0
for i :=1; i< 1<<gsize; i++ { for i := 1; i < 1<<gsize; i++ {
elG1 := new(bn256.G1) elG1 := new(bn256.G1)
// if power of 2 // if power of 2
if i & (i-1) == 0{ if i&(i-1) == 0 {
last_pow2 = i lastPow2 = i
elG1.Set(a_ext[nelems]) elG1.Set(aExt[nelems])
nelems++ nelems++
} else { } else {
elG1.Add(table[last_pow2], table[i-last_pow2]) elG1.Add(table[lastPow2], table[i-lastPow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo // TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine() //table[i].MakeAffine()
} }
table = append(table, elG1) table = append(table, elG1)
} }
t.data = table if toaffine {
for i := 0; i < len(table); i++ {
info := table[i].Marshal()
table[i].Unmarshal(info)
}
}
t.data = table
}
func (t tableG1) Marshal() []byte {
info := make([]byte, 0)
for _, el := range t.data {
info = append(info, el.Marshal()...)
}
return info
} }
// Multiply scalar by precomputed table of G1 elements // Multiply scalar by precomputed table of G1 elements
func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func (t *tableG1) mulTableG1(k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i:=len(k); i < gsize; i++ { for i := len(k); i < gsize; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
}
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
msb := getMsb(k_ext)
for i := msb-1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G1).Add(Q,Q)
b := getBit(k_ext,i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
} }
}
if Q_prev != nil { Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
return Q.Add(Q,Q_prev)
} else { msb := getMsb(kExt)
return Q
} for i := msb - 1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G1).Add(Q, Q)
b := getBit(kExt, i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
}
}
if qPrev != nil {
return Q.Add(Q, qPrev)
}
return Q
} }
// Multiply scalar by precomputed table of G1 elements without intermediate doubling // Multiply scalar by precomputed table of G1 elements without intermediate doubling
func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := len(t) * gsize minNElems := len(t) * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1,nbitsQ)
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j:=0; j < len(t); j++ {
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
} }
} // Init Adders
} nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1, nbitsQ)
// Consolidate Addition for i := 0; i < nbitsQ; i++ {
R := new(bn256.G1).Set(Q[nbitsQ-1]) Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
for i:=nbitsQ-1; i>0; i-- { }
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R,R)
R.Add(R,Q[i-1])
}
if Q_prev != nil { // Perform bitwise addition
return R.Add(R,Q_prev) for j := 0; j < len(t); j++ {
} else { msb := getMsb(kExt[j*gsize : (j+1)*gsize])
return R
} for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
}
}
}
// Consolidate Addition
R := new(bn256.G1).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
} }
// Compute tables within function. This solution should still be faster than std multiplication // Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7 // for gsize = 7
func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG1{} table := tableG1{}
Q:= new(bn256.G1).ScalarBaseMult(new(big.Int)) Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i:=0; i<ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table.NewTableG1( a[i*gsize:(i+1)*gsize], gsize) table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
} }
table.NewTableG1( a[(ntables-1)*gsize:], gsize) table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize) Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize)
if Q_prev != nil { if qPrev != nil {
return Q.Add(Q,Q_prev) return Q.Add(Q, qPrev)
} else { }
return Q return Q
}
} }
// Multiply scalar by precomputed table of G1 elements without intermediate doubling // Multiply scalar by precomputed table of G1 elements without intermediate doubling
func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG1{} table := tableG1{}
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := ntables * gsize minNElems := ntables * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1,nbitsQ)
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j:=0; j < ntables-1; j++ {
table.NewTableG1( a[j*gsize:(j+1)*gsize], gsize)
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
} }
} // Init Adders
} nbitsQ := cryptoConstants.Q.BitLen()
table.NewTableG1( a[(ntables-1)*gsize:], gsize) Q := make([]*bn256.G1, nbitsQ)
msb := getMsb(k_ext[(ntables-1)*gsize:])
for i := msb-1; i >= 0; i-- { for i := 0; i < nbitsQ; i++ {
b := getBit(k_ext[(ntables-1)*gsize:],i) Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
if b != 0 { }
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
// Consolidate Addition // Perform bitwise addition
R := new(bn256.G1).Set(Q[nbitsQ-1]) for j := 0; j < ntables-1; j++ {
for i:=nbitsQ-1; i>0; i-- { table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it msb := getMsb(kExt[j*gsize : (j+1)*gsize])
R = new(bn256.G1).Add(R,R)
R.Add(R,Q[i-1]) for i := msb - 1; i >= 0; i-- {
} b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if Q_prev != nil { if b != 0 {
return R.Add(R,Q_prev) // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
} else { Q[i].Add(Q[i], table.data[b])
return R }
} }
}
table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
msb := getMsb(kExt[(ntables-1)*gsize:])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[(ntables-1)*gsize:], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
// Consolidate Addition
R := new(bn256.G1).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
} }
///// /////
@@ -215,16 +225,14 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
// TODO - How can avoid replicating code in G2? // TODO - How can avoid replicating code in G2?
//G2 //G2
type TableG2 struct{ type tableG2 struct {
data []*bn256.G2 data []*bn256.G2
} }
func (t tableG2) getData() []*bn256.G2 {
func (t TableG2) GetData() []*bn256.G2 { return t.data
return t.data
} }
// Compute table of gsize elements as :: // Compute table of gsize elements as ::
// Table[0] = Inf // Table[0] = Inf
// Table[1] = a[0] // Table[1] = a[0]
@@ -232,212 +240,224 @@ func (t TableG2) GetData() []*bn256.G2 {
// Table[3] = a[0]+a[1] // Table[3] = a[0]+a[1]
// ..... // .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1] // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int){ // TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
// EC table func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) {
table := make([]*bn256.G2, 0) // EC table
table := make([]*bn256.G2, 0)
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
a_ext := make([]*bn256.G2, 0) aExt := make([]*bn256.G2, 0)
a_ext = append(a_ext, a...) aExt = append(aExt, a...)
for i:=len(a); i<gsize; i++ { for i := len(a); i < gsize; i++ {
a_ext = append(a_ext,new(bn256.G2).ScalarBaseMult(big.NewInt(0))) aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
} }
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
table = append(table,elG2) table = append(table, elG2)
last_pow2 := 1 lastPow2 := 1
nelems := 0 nelems := 0
for i :=1; i< 1<<gsize; i++ { for i := 1; i < 1<<gsize; i++ {
elG2 := new(bn256.G2) elG2 := new(bn256.G2)
// if power of 2 // if power of 2
if i & (i-1) == 0{ if i&(i-1) == 0 {
last_pow2 = i lastPow2 = i
elG2.Set(a_ext[nelems]) elG2.Set(aExt[nelems])
nelems++ nelems++
} else { } else {
elG2.Add(table[last_pow2], table[i-last_pow2]) elG2.Add(table[lastPow2], table[i-lastPow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo // TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine() //table[i].MakeAffine()
} }
table = append(table, elG2) table = append(table, elG2)
} }
t.data = table if toaffine {
for i := 0; i < len(table); i++ {
info := table[i].Marshal()
table[i].Unmarshal(info)
}
}
t.data = table
}
func (t tableG2) Marshal() []byte {
info := make([]byte, 0)
for _, el := range t.data {
info = append(info, el.Marshal()...)
}
return info
} }
// Multiply scalar by precomputed table of G2 elements // Multiply scalar by precomputed table of G2 elements
func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func (t *tableG2) mulTableG2(k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i:=len(k); i < gsize; i++ { for i := len(k); i < gsize; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
}
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
msb := getMsb(k_ext)
for i := msb-1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G2).Add(Q,Q)
b := getBit(k_ext,i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
} }
}
if Q_prev != nil { Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
return Q.Add(Q, Q_prev)
} else { msb := getMsb(kExt)
return Q
} for i := msb - 1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G2).Add(Q, Q)
b := getBit(kExt, i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
}
}
if qPrev != nil {
return Q.Add(Q, qPrev)
}
return Q
} }
// Multiply scalar by precomputed table of G2 elements without intermediate doubling // Multiply scalar by precomputed table of G2 elements without intermediate doubling
func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := len(t) * gsize minNElems := len(t) * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2,nbitsQ)
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j:=0; j < len(t); j++ {
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
} }
} // Init Adders
} nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2, nbitsQ)
// Consolidate Addition for i := 0; i < nbitsQ; i++ {
R := new(bn256.G2).Set(Q[nbitsQ-1]) Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
for i:=nbitsQ-1; i>0; i-- { }
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R,R) // Perform bitwise addition
R.Add(R,Q[i-1]) for j := 0; j < len(t); j++ {
} msb := getMsb(kExt[j*gsize : (j+1)*gsize])
if Q_prev != nil {
return R.Add(R,Q_prev) for i := msb - 1; i >= 0; i-- {
} else { b := getBit(kExt[j*gsize:(j+1)*gsize], i)
return R if b != 0 {
} // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
}
}
}
// Consolidate Addition
R := new(bn256.G2).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
} }
// Compute tables within function. This solution should still be faster than std multiplication // Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7 // for gsize = 7
func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG2{} table := tableG2{}
Q:= new(bn256.G2).ScalarBaseMult(new(big.Int)) Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i:=0; i<ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table.NewTableG2( a[i*gsize:(i+1)*gsize], gsize) table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
} }
table.NewTableG2( a[(ntables-1)*gsize:], gsize) table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize) Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize)
if Q_prev != nil { if qPrev != nil {
return Q.Add(Q,Q_prev) return Q.Add(Q, qPrev)
} else { }
return Q return Q
}
} }
// Multiply scalar by precomputed table of G2 elements without intermediate doubling // Multiply scalar by precomputed table of G2 elements without intermediate doubling
func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG2{} table := tableG2{}
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := ntables * gsize minNElems := ntables * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2,nbitsQ)
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j:=0; j < ntables-1; j++ {
table.NewTableG2( a[j*gsize:(j+1)*gsize], gsize)
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
} }
} // Init Adders
} nbitsQ := cryptoConstants.Q.BitLen()
table.NewTableG2( a[(ntables-1)*gsize:], gsize) Q := make([]*bn256.G2, nbitsQ)
msb := getMsb(k_ext[(ntables-1)*gsize:])
for i := msb-1; i >= 0; i-- { for i := 0; i < nbitsQ; i++ {
b := getBit(k_ext[(ntables-1)*gsize:],i) Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
if b != 0 { }
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
// Consolidate Addition // Perform bitwise addition
R := new(bn256.G2).Set(Q[nbitsQ-1]) for j := 0; j < ntables-1; j++ {
for i:=nbitsQ-1; i>0; i-- { table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it msb := getMsb(kExt[j*gsize : (j+1)*gsize])
R = new(bn256.G2).Add(R,R)
R.Add(R,Q[i-1]) for i := msb - 1; i >= 0; i-- {
} b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if Q_prev != nil { if b != 0 {
return R.Add(R,Q_prev) // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
} else { Q[i].Add(Q[i], table.data[b])
return R }
} }
}
table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
msb := getMsb(kExt[(ntables-1)*gsize:])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[(ntables-1)*gsize:], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
// Consolidate Addition
R := new(bn256.G2).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
} }
// Return most significant bit position in a group of Big Integers // Return most significant bit position in a group of Big Integers
func getMsb(k []*big.Int) int{ func getMsb(k []*big.Int) int {
msb := 0 msb := 0
for _, el := range(k){ for _, el := range k {
tmp_msb := el.BitLen() tmpMsb := el.BitLen()
if tmp_msb > msb { if tmpMsb > msb {
msb = tmp_msb msb = tmpMsb
} }
} }
return msb return msb
} }
// Return ith bit in group of Big Integers // Return ith bit in group of Big Integers
func getBit(k []*big.Int, i int) uint { func getBit(k []*big.Int, i int) uint {
table_idx := uint(0) tableIdx := uint(0)
for idx, el := range(k){ for idx, el := range k {
b := el.Bit(i) b := el.Bit(i)
table_idx += (b << idx) tableIdx += (b << idx)
} }
return table_idx return tableIdx
} }

View File

@@ -1,166 +1,163 @@
package prover package prover
import ( import (
"bytes"
"crypto/rand" "crypto/rand"
"fmt"
"math/big" "math/big"
"testing" "testing"
"time"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
"time"
"bytes"
"fmt"
) )
const ( const (
N1 = 5000 N1 = 5000
N2 = 5000 N2 = 5000
) )
func randomBigIntArray(n int) []*big.Int{ func randomBigIntArray(n int) []*big.Int {
var p []*big.Int var p []*big.Int
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
pi := randBI() pi := randBI()
p = append(p, pi) p = append(p, pi)
} }
return p return p
} }
func randomG1Array(n int) []*bn256.G1 { func randomG1Array(n int) []*bn256.G1 {
arrayG1 := make([]*bn256.G1, n) arrayG1 := make([]*bn256.G1, n)
for i:=0; i<n; i++ { for i := 0; i < n; i++ {
_, arrayG1[i], _ = bn256.RandomG1(rand.Reader) _, arrayG1[i], _ = bn256.RandomG1(rand.Reader)
} }
return arrayG1 return arrayG1
} }
func randomG2Array(n int) []*bn256.G2 { func randomG2Array(n int) []*bn256.G2 {
arrayG2 := make([]*bn256.G2, n) arrayG2 := make([]*bn256.G2, n)
for i:=0; i<n; i++ { for i := 0; i < n; i++ {
_, arrayG2[i], _ = bn256.RandomG2(rand.Reader) _, arrayG2[i], _ = bn256.RandomG2(rand.Reader)
} }
return arrayG2 return arrayG2
} }
func TestTableG1(t *testing.T) {
n := N1
// init scalar
var arrayW = randomBigIntArray(n)
// init G1 array
var arrayG1 = randomG1Array(n)
func TestTableG1(t *testing.T){ beforeT := time.Now()
n := N1 Q1 := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i := 0; i < n; i++ {
Q1.Add(Q1, new(bn256.G1).ScalarMult(arrayG1[i], arrayW[i]))
}
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
// init scalar for gsize := 2; gsize < 10; gsize++ {
var arrayW = randomBigIntArray(n) ntables := int((n + gsize - 1) / gsize)
// init G1 array table := make([]tableG1, ntables)
var arrayG1 = randomG1Array(n)
beforeT := time.Now() for i := 0; i < ntables-1; i++ {
Q1 := new(bn256.G1).ScalarBaseMult(new(big.Int)) table[i].newTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true)
for i:=0; i < n; i++ { }
Q1.Add(Q1, new(bn256.G1).ScalarMult(arrayG1[i], arrayW[i])) table[ntables-1].newTableG1(arrayG1[(ntables-1)*gsize:], gsize, true)
}
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
for gsize:=2; gsize < 10; gsize++ { beforeT = time.Now()
ntables := int((n + gsize - 1) / gsize) Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int))
table := make([]TableG1, ntables) for i := 0; i < ntables-1; i++ {
Q2 = table[i].mulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
}
Q2 = table[ntables-1].mulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
for i:=0; i<ntables-1; i++ { beforeT = time.Now()
table[i].NewTableG1( arrayG1[i*gsize:(i+1)*gsize], gsize) Q3 := scalarMultG1(arrayG1, arrayW, nil, gsize)
} fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
table[ntables-1].NewTableG1( arrayG1[(ntables-1)*gsize:], gsize)
beforeT = time.Now() beforeT = time.Now()
Q2:= new(bn256.G1).ScalarBaseMult(new(big.Int)) Q4 := mulTableNoDoubleG1(table, arrayW, nil, gsize)
for i:=0; i<ntables-1; i++ { fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
}
Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize,time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize) Q5 := scalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize) t.Error("Error in TMult")
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize,time.Since(beforeT)) }
if bytes.Compare(Q1.Marshal(), Q3.Marshal()) != 0 {
beforeT = time.Now() t.Error("Error in TMult with table comp")
Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize) }
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT)) if bytes.Compare(Q1.Marshal(), Q4.Marshal()) != 0 {
t.Error("Error in TMultNoDouble")
}
if bytes.Compare(Q1.Marshal(),Q2.Marshal()) != 0 { if bytes.Compare(Q1.Marshal(), Q5.Marshal()) != 0 {
t.Error("Error in TMult") t.Error("Error in TMultNoDoublee with table comp")
} }
if bytes.Compare(Q1.Marshal(),Q3.Marshal()) != 0 { }
t.Error("Error in TMult with table comp")
}
if bytes.Compare(Q1.Marshal(),Q4.Marshal()) != 0 {
t.Error("Error in TMultNoDouble")
}
if bytes.Compare(Q1.Marshal(),Q5.Marshal()) != 0 {
t.Error("Error in TMultNoDoublee with table comp")
}
}
} }
func TestTableG2(t *testing.T){ func TestTableG2(t *testing.T) {
n := N2 n := N2
// init scalar // init scalar
var arrayW = randomBigIntArray(n) var arrayW = randomBigIntArray(n)
// init G2 array // init G2 array
var arrayG2 = randomG2Array(n) var arrayG2 = randomG2Array(n)
beforeT := time.Now() beforeT := time.Now()
Q1 := new(bn256.G2).ScalarBaseMult(new(big.Int)) Q1 := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i:=0; i < n; i++ { for i := 0; i < n; i++ {
Q1.Add(Q1, new(bn256.G2).ScalarMult(arrayG2[i], arrayW[i])) Q1.Add(Q1, new(bn256.G2).ScalarMult(arrayG2[i], arrayW[i]))
} }
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT)) fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
for gsize:=2; gsize < 10; gsize++ { for gsize := 2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize) ntables := int((n + gsize - 1) / gsize)
table := make([]TableG2, ntables) table := make([]tableG2, ntables)
for i:=0; i<ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table[i].NewTableG2( arrayG2[i*gsize:(i+1)*gsize], gsize) table[i].newTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false)
} }
table[ntables-1].NewTableG2( arrayG2[(ntables-1)*gsize:], gsize) table[ntables-1].newTableG2(arrayG2[(ntables-1)*gsize:], gsize, false)
beforeT = time.Now() beforeT = time.Now()
Q2:= new(bn256.G2).ScalarBaseMult(new(big.Int)) Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i:=0; i<ntables-1; i++ { for i := 0; i < ntables-1; i++ {
Q2 =table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) Q2 = table[i].mulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
} }
Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize) Q2 = table[ntables-1].mulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize,time.Since(beforeT)) fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize) Q3 := scalarMultG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT)) fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize) Q4 := mulTableNoDoubleG2(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize,time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize) Q5 := scalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
if bytes.Compare(Q1.Marshal(),Q2.Marshal()) != 0 { t.Error("Error in TMult")
t.Error("Error in TMult") }
} if bytes.Compare(Q1.Marshal(), Q3.Marshal()) != 0 {
if bytes.Compare(Q1.Marshal(),Q3.Marshal()) != 0 { t.Error("Error in TMult with table comp")
t.Error("Error in TMult with table comp") }
} if bytes.Compare(Q1.Marshal(), Q4.Marshal()) != 0 {
if bytes.Compare(Q1.Marshal(),Q4.Marshal()) != 0 { t.Error("Error in TMultNoDouble")
t.Error("Error in TMultNoDouble") }
} if bytes.Compare(Q1.Marshal(), Q5.Marshal()) != 0 {
if bytes.Compare(Q1.Marshal(),Q5.Marshal()) != 0 { t.Error("Error in TMultNoDoublee with table comp")
t.Error("Error in TMultNoDoublee with table comp") }
} }
}
} }

View File

@@ -10,7 +10,7 @@ import (
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
"github.com/iden3/go-circom-prover-verifier/types" "github.com/iden3/go-circom-prover-verifier/types"
"github.com/iden3/go-iden3-crypto/utils" "github.com/iden3/go-iden3-crypto/utils"
//"fmt" //"fmt"
) )
// Proof is the data structure of the Groth16 zkSNARK proof // Proof is the data structure of the Groth16 zkSNARK proof
@@ -45,7 +45,7 @@ type Witness []*big.Int
// Group Size // Group Size
const ( const (
GSIZE = 6 GSIZE = 6
) )
func randBigInt() (*big.Int, error) { func randBigInt() (*big.Int, error) {
@@ -81,34 +81,34 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
proofB := arrayOfZeroesG2(numcpu) proofB := arrayOfZeroesG2(numcpu)
proofC := arrayOfZeroesG1(numcpu) proofC := arrayOfZeroesG1(numcpu)
proofBG1 := arrayOfZeroesG1(numcpu) proofBG1 := arrayOfZeroesG1(numcpu)
gsize := GSIZE gsize := GSIZE
var wg1 sync.WaitGroup var wg1 sync.WaitGroup
wg1.Add(numcpu) wg1.Add(numcpu)
for _cpu, _ranges := range ranges(pk.NVars, numcpu) { for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
// split 1 // split 1
go func(cpu int, ranges [2]int) { go func(cpu int, ranges [2]int) {
proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]], proofA[cpu] = scalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]], w[ranges[0]:ranges[1]],
proofA[cpu], proofA[cpu],
gsize) gsize)
proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]], proofB[cpu] = scalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]], w[ranges[0]:ranges[1]],
proofB[cpu], proofB[cpu],
gsize) gsize)
proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]], proofBG1[cpu] = scalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]], w[ranges[0]:ranges[1]],
proofBG1[cpu], proofBG1[cpu],
gsize) gsize)
min_lim := pk.NPublic+1 minLim := pk.NPublic + 1
if ranges[0] > pk.NPublic+1 { if ranges[0] > pk.NPublic+1 {
min_lim = ranges[0] minLim = ranges[0]
} }
if ranges[1] > pk.NPublic + 1 { if ranges[1] > pk.NPublic+1 {
proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]], proofC[cpu] = scalarMultNoDoubleG1(pk.C[minLim:ranges[1]],
w[min_lim:ranges[1]], w[minLim:ranges[1]],
proofC[cpu], proofC[cpu],
gsize) gsize)
} }
wg1.Done() wg1.Done()
}(_cpu, _ranges) }(_cpu, _ranges)
} }
@@ -142,10 +142,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
for _cpu, _ranges := range ranges(len(h), numcpu) { for _cpu, _ranges := range ranges(len(h), numcpu) {
// split 2 // split 2
go func(cpu int, ranges [2]int) { go func(cpu int, ranges [2]int) {
proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]], proofC[cpu] = scalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
h[ranges[0]:ranges[1]], h[ranges[0]:ranges[1]],
proofC[cpu], proofC[cpu],
gsize) gsize)
wg2.Done() wg2.Done()
}(_cpu, _ranges) }(_cpu, _ranges)
} }

View File

@@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os"
"testing" "testing"
"time" "time"
@@ -16,25 +17,38 @@ import (
func TestCircuitsGenerateProof(t *testing.T) { func TestCircuitsGenerateProof(t *testing.T) {
testCircuitGenerateProof(t, "circuit1k") // 1000 constraints testCircuitGenerateProof(t, "circuit1k") // 1000 constraints
testCircuitGenerateProof(t, "circuit5k") // 5000 constraints testCircuitGenerateProof(t, "circuit5k") // 5000 constraints
//testCircuitGenerateProof(t, "circuit10k") // 10000 constraints // testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
//testCircuitGenerateProof(t, "circuit20k") // 20000 constraints // testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
} }
func testCircuitGenerateProof(t *testing.T, circuit string) { func testCircuitGenerateProof(t *testing.T, circuit string) {
provingKeyJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json") // using json provingKey file
// provingKeyJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
// require.Nil(t, err)
// pk, err := parsers.ParsePk(provingKeyJson)
// require.Nil(t, err)
// witnessJson, err := ioutil.ReadFile("../testdata/" + circuit + "/witness.json")
// require.Nil(t, err)
// w, err := parsers.ParseWitness(witnessJson)
// require.Nil(t, err)
// using bin provingKey file
pkBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.bin")
require.Nil(t, err) require.Nil(t, err)
pk, err := parsers.ParsePk(provingKeyJson) defer pkBinFile.Close()
pk, err := parsers.ParsePkBin(pkBinFile)
require.Nil(t, err) require.Nil(t, err)
witnessJson, err := ioutil.ReadFile("../testdata/" + circuit + "/witness.json") witnessBinFile, err := os.Open("../testdata/" + circuit + "/witness.bin")
require.Nil(t, err) require.Nil(t, err)
w, err := parsers.ParseWitness(witnessJson) defer witnessBinFile.Close()
w, err := parsers.ParseWitnessBin(witnessBinFile)
require.Nil(t, err) require.Nil(t, err)
beforeT := time.Now() beforeT := time.Now()
proof, pubSignals, err := GenerateProof(pk, w) proof, pubSignals, err := GenerateProof(pk, w)
assert.Nil(t, err) assert.Nil(t, err)
fmt.Println("proof generation time elapsed:", time.Since(beforeT)) fmt.Println("proof generation time for "+circuit+" elapsed:", time.Since(beforeT))
proofStr, err := parsers.ProofToJson(proof) proofStr, err := parsers.ProofToJson(proof)
assert.Nil(t, err) assert.Nil(t, err)

View File

@@ -16,7 +16,7 @@ There may be some concern on the additional size of the tables since they need t
| Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 | | Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|---|---|---|---|---|---|---|---|---| |---|---|---|--|---|---|---|---|---|
| Naive | 6.63s | - | - | - | - | - | - | - | | Naive | 6.63s | - | - | - | - | - | - | - |
| Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | | Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s |
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s | | Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
@@ -26,7 +26,7 @@ There may be some concern on the additional size of the tables since they need t
There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve). There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
| Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 | | Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|---|---|---|---|---|---|---|---|---| |---|---|---|--|---|---|---|---|---|
| Naive | 3.55s | | | | | | | | | Naive | 3.55s | | | | | | | |
| Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms | | Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms |
| Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s | | Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s |

View File

@@ -1,25 +0,0 @@
# Tables Pre-calculation
The most time consuming part of a ZKSnark proof calculation is the scalar multiplication of elliptic curve points. Direct mechanism accumulates each multiplication. However, prover only needs the total accumulation.
There are two potential improvements to the naive approach:
1. Apply Strauss-Shamir method (https://stackoverflow.com/questions/50993471/ec-scalar-multiplication-with-strauss-shamir-method).
2. Leave the doubling operation for the last step
Both options can be combined.
In the following table, we show the results of using the naive method, Srauss-Shamir and Strauss-Shamir + No doubling. These last two options are repeated for different table grouping order.
There are 5000 G1 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
There may be some concern on the additional size of the tables since they need to be loaded into a smartphone during the proof, and the time required to load these tables may exceed the benefits. If this is a problem, another althernative is to compute the tables during the proof itself. Depending on the Group Size, timing may be better than the naive approach.
| Algorithm | GS / Time |
|---|---|---|
| Naive | 6.63s | | | | | | | |
| Strauss | 13.16s | 9.033s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | 1.44 s |
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
| No Doubling | 3.74s | 3.00s | 2.38s | 1.96s | 1.79s | 1.54s | 1.50s | 1.44s|
| No Doubling + Table Computation | 6.83s | 5.1s | 4.16s | 3.52s| 3.22s | 3.21s | 3.57s | 4.56s |

View File

@@ -6,6 +6,8 @@ import (
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
) )
var Q, _ = new(big.Int).SetString("21888242871839275222246405745257275088696311157297823662689037894645226208583", 10)
// R is the mod of the finite field // R is the mod of the finite field
var R, _ = new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495617", 10) var R, _ = new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495617", 10)