9 Commits

Author SHA1 Message Date
druiz0992
86621a0dbe reduced test time 2020-05-03 20:17:57 +02:00
druiz0992
9ed6e14fad Leave prover_test untouched 2020-05-03 20:07:39 +02:00
druiz0992
2d45cb7039 Update tables.md 2020-05-03 19:58:04 +02:00
druiz0992
68b0c2fb54 Fixed merge conflicts 2020-05-03 19:53:52 +02:00
druiz0992
e3b5f88660 Added G2 and included tables to prover 2020-05-03 19:51:02 +02:00
druiz0992
36b48215f0 Update tables.md 2020-05-03 03:39:22 +02:00
druiz0992
a110eb4ca1 Update tables.md 2020-05-03 03:32:23 +02:00
druiz0992
72913bc801 Added a description file 2020-05-03 03:30:52 +02:00
druiz0992
8c81f5041e G1 Functionality to precomput G1 tables 2020-05-03 03:02:49 +02:00
13 changed files with 523 additions and 1559 deletions

View File

@@ -23,6 +23,6 @@ jobs:
- name: Compile circuits and execute Go tests
run: |
cd testdata && sh ./compile-circuits.sh && cd ..
go run cli/cli.go -prove -pk=testdata/circuit1k/proving_key.json -witness=testdata/circuit1k/witness.json -proof=testdata/circuit1k/proof.json -public=testdata/circuit1k/public.json
go run cli/cli.go -prove -pk=testdata/circuit5k/proving_key.json -witness=testdata/circuit5k/witness.json -proof=testdata/circuit5k/proof.json -public=testdata/circuit5k/public.json
go run cli/cli.go -prove -provingkey=testdata/circuit1k/proving_key.json -witness=testdata/circuit1k/witness.json -proof=testdata/circuit1k/proof.json -public=testdata/circuit1k/public.json
go run cli/cli.go -prove -provingkey=testdata/circuit5k/proving_key.json -witness=testdata/circuit5k/witness.json -proof=testdata/circuit5k/proof.json -public=testdata/circuit5k/public.json
go test ./...

View File

@@ -21,14 +21,12 @@ func main() {
prove := flag.Bool("prove", false, "prover mode")
verify := flag.Bool("verify", false, "verifier mode")
convert := flag.Bool("convert", false, "convert mode, to convert between proving_key.json to proving_key.go.bin")
provingKeyPath := flag.String("pk", "proving_key.json", "provingKey path")
provingKeyPath := flag.String("provingkey", "proving_key.json", "provingKey path")
witnessPath := flag.String("witness", "witness.json", "witness path")
proofPath := flag.String("proof", "proof.json", "proof path")
verificationKeyPath := flag.String("vk", "verification_key.json", "verificationKey path")
verificationKeyPath := flag.String("verificationkey", "verification_key.json", "verificationKey path")
publicPath := flag.String("public", "public.json", "public signals path")
provingKeyBinPath := flag.String("pkbin", "proving_key.go.bin", "provingKey Bin path")
flag.Parse()
@@ -44,12 +42,6 @@ func main() {
fmt.Println("Error:", err)
}
os.Exit(0)
} else if *convert {
err := cmdConvert(*provingKeyPath, *provingKeyBinPath)
if err != nil {
fmt.Println("Error:", err)
}
os.Exit(0)
}
flag.PrintDefaults()
}
@@ -141,25 +133,3 @@ func cmdVerify(proofPath, verificationKeyPath, publicPath string) error {
fmt.Println("verification:", v)
return nil
}
func cmdConvert(provingKeyPath, provingKeyBinPath string) error {
fmt.Println("Convertion tool")
provingKeyJson, err := ioutil.ReadFile(provingKeyPath)
if err != nil {
return err
}
pk, err := parsers.ParsePk(provingKeyJson)
if err != nil {
return err
}
fmt.Printf("Converting proving key json (%s)\nto go proving key binary (%s)\n", provingKeyPath, provingKeyBinPath)
pkGBin, err := parsers.PkToGoBin(pk)
if err != nil {
return err
}
err = ioutil.WriteFile(provingKeyBinPath, pkGBin, 0644)
return nil
}

View File

@@ -3,7 +3,6 @@ package parsers
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
@@ -309,13 +308,6 @@ func stringToBigInt(s string) (*big.Int, error) {
return n, nil
}
func addPadding32(b []byte) []byte {
if len(b) != 32 {
b = addZPadding(b)
}
return b
}
func addZPadding(b []byte) []byte {
var z [32]byte
var r []byte
@@ -475,35 +467,8 @@ func stringToG2(h [][]string) (*bn256.G2, error) {
return p, err
}
// ProofStringToSmartContractFormat converts the ProofString to a ProofString in the SmartContract format in a ProofString structure
func ProofStringToSmartContractFormat(s ProofString) ProofString {
var rs ProofString
rs.A = make([]string, 2)
rs.B = make([][]string, 2)
rs.B[0] = make([]string, 2)
rs.B[1] = make([]string, 2)
rs.C = make([]string, 2)
rs.A[0] = s.A[0]
rs.A[1] = s.A[1]
rs.B[0][0] = s.B[0][1]
rs.B[0][1] = s.B[0][0]
rs.B[1][0] = s.B[1][1]
rs.B[1][1] = s.B[1][0]
rs.C[0] = s.C[0]
rs.C[1] = s.C[1]
rs.Protocol = s.Protocol
return rs
}
// ProofToSmartContractFormat converts the *types.Proof to a ProofString in the SmartContract format in a ProofString structure
func ProofToSmartContractFormat(p *types.Proof) ProofString {
s := ProofToString(p)
return ProofStringToSmartContractFormat(s)
}
// ProofToString converts the Proof to ProofString
func ProofToString(p *types.Proof) ProofString {
// ProofToJson outputs the Proof i Json format
func ProofToJson(p *types.Proof) ([]byte, error) {
var ps ProofString
ps.A = make([]string, 3)
ps.B = make([][]string, 3)
@@ -532,55 +497,10 @@ func ProofToString(p *types.Proof) ProofString {
ps.Protocol = "groth"
return ps
}
// ProofToJson outputs the Proof i Json format
func ProofToJson(p *types.Proof) ([]byte, error) {
ps := ProofToString(p)
return json.Marshal(ps)
}
// ProofToHex converts the Proof to ProofString with hexadecimal strings
func ProofToHex(p *types.Proof) ProofString {
var ps ProofString
ps.A = make([]string, 3)
ps.B = make([][]string, 3)
ps.B[0] = make([]string, 2)
ps.B[1] = make([]string, 2)
ps.B[2] = make([]string, 2)
ps.C = make([]string, 3)
a := p.A.Marshal()
ps.A[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[:32]).Bytes())
ps.A[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[32:64]).Bytes())
ps.A[2] = "1"
b := p.B.Marshal()
ps.B[0][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[:32]).Bytes())
ps.B[0][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[32:64]).Bytes())
ps.B[1][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[64:96]).Bytes())
ps.B[1][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[96:128]).Bytes())
ps.B[2][0] = "1"
ps.B[2][1] = "0"
c := p.C.Marshal()
ps.C[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[:32]).Bytes())
ps.C[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[32:64]).Bytes())
ps.C[2] = "1"
ps.Protocol = "groth"
return ps
}
// ProofToJsonHex outputs the Proof i Json format with hexadecimal strings
func ProofToJsonHex(p *types.Proof) ([]byte, error) {
ps := ProofToHex(p)
return json.Marshal(ps)
}
// ParseWitnessBin parses binary file representation of the Witness into the Witness struct
// ParseWitness parses binary file representation of the Witness into the Witness struct
func ParseWitnessBin(f *os.File) (types.Witness, error) {
var w types.Witness
r := bufio.NewReader(f)
@@ -607,715 +527,3 @@ func swapEndianness(b []byte) []byte {
}
return o
}
func readNBytes(r io.Reader, n int) ([]byte, error) {
b := make([]byte, n)
_, err := io.ReadFull(r, b)
if err != nil {
return b, err
}
return b, nil
}
// ParsePkBin parses binary file representation of the ProvingKey into the ProvingKey struct
func ParsePkBin(f *os.File) (*types.Pk, error) {
o := 0
var pk types.Pk
r := bufio.NewReader(f)
b, err := readNBytes(r, 12)
if err != nil {
return nil, err
}
pk.NVars = int(binary.LittleEndian.Uint32(b[:4]))
pk.NPublic = int(binary.LittleEndian.Uint32(b[4:8]))
pk.DomainSize = int(binary.LittleEndian.Uint32(b[8:12]))
o += 12
b, err = readNBytes(r, 8)
if err != nil {
return nil, err
}
pPolsA := int(binary.LittleEndian.Uint32(b[:4]))
pPolsB := int(binary.LittleEndian.Uint32(b[4:8]))
o += 8
b, err = readNBytes(r, 20)
if err != nil {
return nil, err
}
pPointsA := int(binary.LittleEndian.Uint32(b[:4]))
pPointsB1 := int(binary.LittleEndian.Uint32(b[4:8]))
pPointsB2 := int(binary.LittleEndian.Uint32(b[8:12]))
pPointsC := int(binary.LittleEndian.Uint32(b[12:16]))
pPointsHExps := int(binary.LittleEndian.Uint32(b[16:20]))
o += 20
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkAlpha1 = new(bn256.G1)
_, err = pk.VkAlpha1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkBeta1 = new(bn256.G1)
_, err = pk.VkBeta1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkDelta1 = new(bn256.G1)
_, err = pk.VkDelta1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
pk.VkBeta2 = new(bn256.G2)
_, err = pk.VkBeta2.Unmarshal(fromMont2Q(b))
if err != nil {
return nil, err
}
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
pk.VkDelta2 = new(bn256.G2)
_, err = pk.VkDelta2.Unmarshal(fromMont2Q(b))
if err != nil {
return nil, err
}
o += 448
if o != pPolsA {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPolsA, o)
}
// PolsA
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 4)
if err != nil {
return nil, err
}
keysLength := int(binary.LittleEndian.Uint32(b[:4]))
o += 4
polsMap := make(map[int]*big.Int)
for j := 0; j < keysLength; j++ {
bK, err := readNBytes(r, 4)
if err != nil {
return nil, err
}
key := int(binary.LittleEndian.Uint32(bK[:4]))
o += 4
b, err := readNBytes(r, 32)
if err != nil {
return nil, err
}
polsMap[key] = new(big.Int).SetBytes(fromMont1R(b[:32]))
o += 32
}
pk.PolsA = append(pk.PolsA, polsMap)
}
if o != pPolsB {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPolsB, o)
}
// PolsB
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 4)
if err != nil {
return nil, err
}
keysLength := int(binary.LittleEndian.Uint32(b[:4]))
o += 4
polsMap := make(map[int]*big.Int)
for j := 0; j < keysLength; j++ {
bK, err := readNBytes(r, 4)
if err != nil {
return nil, err
}
key := int(binary.LittleEndian.Uint32(bK[:4]))
o += 4
b, err := readNBytes(r, 32)
if err != nil {
return nil, err
}
polsMap[key] = new(big.Int).SetBytes(fromMont1R(b[:32]))
o += 32
}
pk.PolsB = append(pk.PolsB, polsMap)
}
if o != pPointsA {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsA, o)
}
// A
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.A = append(pk.A, p1)
o += 64
}
if o != pPointsB1 {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsB1, o)
}
// B1
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.B1 = append(pk.B1, p1)
o += 64
}
if o != pPointsB2 {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsB2, o)
}
// B2
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
p2 := new(bn256.G2)
_, err = p2.Unmarshal(fromMont2Q(b))
if err != nil {
return nil, err
}
pk.B2 = append(pk.B2, p2)
o += 128
}
if o != pPointsC {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsC, o)
}
// C
zb := make([]byte, 64)
z := new(bn256.G1)
_, err = z.Unmarshal(zb)
if err != nil {
return nil, err
}
pk.C = append(pk.C, z) // circom behaviour (3x null==["0", "0", "0"])
pk.C = append(pk.C, z)
pk.C = append(pk.C, z)
for i := pk.NPublic + 1; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.C = append(pk.C, p1)
o += 64
}
if o != pPointsHExps {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsHExps, o)
}
// HExps
for i := 0; i < pk.DomainSize; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(fromMont1Q(b))
if err != nil {
return nil, err
}
pk.HExps = append(pk.HExps, p1)
}
return &pk, nil
}
func fromMont1Q(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
b := new(big.Int).SetBytes(swapEndianness(m[32:64]))
x := coordFromMont(a, types.Q)
y := coordFromMont(b, types.Q)
if bytes.Equal(x.Bytes(), big.NewInt(1).Bytes()) {
x = big.NewInt(0)
}
if bytes.Equal(y.Bytes(), big.NewInt(1).Bytes()) {
y = big.NewInt(0)
}
xBytes := x.Bytes()
yBytes := y.Bytes()
if len(xBytes) != 32 {
xBytes = addZPadding(xBytes)
}
if len(yBytes) != 32 {
yBytes = addZPadding(yBytes)
}
var p []byte
p = append(p, xBytes...)
p = append(p, yBytes...)
return p
}
func fromMont2Q(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
b := new(big.Int).SetBytes(swapEndianness(m[32:64]))
c := new(big.Int).SetBytes(swapEndianness(m[64:96]))
d := new(big.Int).SetBytes(swapEndianness(m[96:128]))
x := coordFromMont(a, types.Q)
y := coordFromMont(b, types.Q)
z := coordFromMont(c, types.Q)
t := coordFromMont(d, types.Q)
if bytes.Equal(x.Bytes(), big.NewInt(1).Bytes()) {
x = big.NewInt(0)
}
if bytes.Equal(y.Bytes(), big.NewInt(1).Bytes()) {
y = big.NewInt(0)
}
if bytes.Equal(z.Bytes(), big.NewInt(1).Bytes()) {
z = big.NewInt(0)
}
if bytes.Equal(t.Bytes(), big.NewInt(1).Bytes()) {
t = big.NewInt(0)
}
xBytes := x.Bytes()
yBytes := y.Bytes()
zBytes := z.Bytes()
tBytes := t.Bytes()
if len(xBytes) != 32 {
xBytes = addZPadding(xBytes)
}
if len(yBytes) != 32 {
yBytes = addZPadding(yBytes)
}
if len(zBytes) != 32 {
zBytes = addZPadding(zBytes)
}
if len(tBytes) != 32 {
tBytes = addZPadding(tBytes)
}
var p []byte
p = append(p, yBytes...) // swap
p = append(p, xBytes...)
p = append(p, tBytes...)
p = append(p, zBytes...)
return p
}
func fromMont1R(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
x := coordFromMont(a, types.R)
return x.Bytes()
}
func fromMont2R(m []byte) []byte {
a := new(big.Int).SetBytes(swapEndianness(m[:32]))
b := new(big.Int).SetBytes(swapEndianness(m[32:64]))
c := new(big.Int).SetBytes(swapEndianness(m[64:96]))
d := new(big.Int).SetBytes(swapEndianness(m[96:128]))
x := coordFromMont(a, types.R)
y := coordFromMont(b, types.R)
z := coordFromMont(c, types.R)
t := coordFromMont(d, types.R)
var p []byte
p = append(p, y.Bytes()...) // swap
p = append(p, x.Bytes()...)
p = append(p, t.Bytes()...)
p = append(p, z.Bytes()...)
return p
}
func coordFromMont(u, q *big.Int) *big.Int {
return new(big.Int).Mod(
new(big.Int).Mul(
u,
new(big.Int).ModInverse(
new(big.Int).Lsh(big.NewInt(1), 256),
q,
),
),
q,
)
}
// PkToGoBin converts the ProvingKey (*types.Pk) into binary format defined by
// go-circom-prover-verifier. PkGoBin is a own go-circom-prover-verifier
// binary format that allows to go faster when parsing.
func PkToGoBin(pk *types.Pk) ([]byte, error) {
var r []byte
o := 0
var b [4]byte
binary.LittleEndian.PutUint32(b[:], uint32(pk.NVars))
r = append(r, b[:]...)
binary.LittleEndian.PutUint32(b[:], uint32(pk.NPublic))
r = append(r, b[:]...)
binary.LittleEndian.PutUint32(b[:], uint32(pk.DomainSize))
r = append(r, b[:]...)
o += 12
// reserve space for pols (A, B) pos
b = [4]byte{}
r = append(r, b[:]...) // 12:16
r = append(r, b[:]...) // 16:20
o += 8
// reserve space for points (A, B1, B2, C, HExps) pos
r = append(r, b[:]...) // 20:24
r = append(r, b[:]...) // 24
r = append(r, b[:]...) // 28
r = append(r, b[:]...) // 32
r = append(r, b[:]...) // 36:40
o += 20
pb1 := pk.VkAlpha1.Marshal()
r = append(r, pb1[:]...)
pb1 = pk.VkBeta1.Marshal()
r = append(r, pb1[:]...)
pb1 = pk.VkDelta1.Marshal()
r = append(r, pb1[:]...)
pb2 := pk.VkBeta2.Marshal()
r = append(r, pb2[:]...)
pb2 = pk.VkDelta2.Marshal()
r = append(r, pb2[:]...)
o += 448
// polsA
binary.LittleEndian.PutUint32(r[12:16], uint32(o))
for i := 0; i < pk.NVars; i++ {
binary.LittleEndian.PutUint32(b[:], uint32(len(pk.PolsA[i])))
r = append(r, b[:]...)
o += 4
for j, v := range pk.PolsA[i] {
binary.LittleEndian.PutUint32(b[:], uint32(j))
r = append(r, b[:]...)
r = append(r, addPadding32(v.Bytes())...)
o += 32 + 4
}
}
// polsB
binary.LittleEndian.PutUint32(r[16:20], uint32(o))
for i := 0; i < pk.NVars; i++ {
binary.LittleEndian.PutUint32(b[:], uint32(len(pk.PolsB[i])))
r = append(r, b[:]...)
o += 4
for j, v := range pk.PolsB[i] {
binary.LittleEndian.PutUint32(b[:], uint32(j))
r = append(r, b[:]...)
r = append(r, addPadding32(v.Bytes())...)
o += 32 + 4
}
}
// A
binary.LittleEndian.PutUint32(r[20:24], uint32(o))
for i := 0; i < pk.NVars; i++ {
pb1 = pk.A[i].Marshal()
r = append(r, pb1[:]...)
o += 64
}
// B1
binary.LittleEndian.PutUint32(r[24:28], uint32(o))
for i := 0; i < pk.NVars; i++ {
pb1 = pk.B1[i].Marshal()
r = append(r, pb1[:]...)
o += 64
}
// B2
binary.LittleEndian.PutUint32(r[28:32], uint32(o))
for i := 0; i < pk.NVars; i++ {
pb2 = pk.B2[i].Marshal()
r = append(r, pb2[:]...)
o += 128
}
// C
binary.LittleEndian.PutUint32(r[32:36], uint32(o))
for i := pk.NPublic + 1; i < pk.NVars; i++ {
pb1 = pk.C[i].Marshal()
r = append(r, pb1[:]...)
o += 64
}
// HExps
binary.LittleEndian.PutUint32(r[36:40], uint32(o))
for i := 0; i < pk.DomainSize+1; i++ {
pb1 = pk.HExps[i].Marshal()
r = append(r, pb1[:]...)
o += 64
}
return r[:], nil
}
// ParsePkGoBin parses go-circom-prover-verifier binary file representation of
// the ProvingKey into ProvingKey struct (*types.Pk). PkGoBin is a own
// go-circom-prover-verifier binary format that allows to go faster when
// parsing.
func ParsePkGoBin(f *os.File) (*types.Pk, error) {
o := 0
var pk types.Pk
r := bufio.NewReader(f)
b, err := readNBytes(r, 12)
if err != nil {
return nil, err
}
pk.NVars = int(binary.LittleEndian.Uint32(b[:4]))
pk.NPublic = int(binary.LittleEndian.Uint32(b[4:8]))
pk.DomainSize = int(binary.LittleEndian.Uint32(b[8:12]))
o += 12
b, err = readNBytes(r, 8)
if err != nil {
return nil, err
}
pPolsA := int(binary.LittleEndian.Uint32(b[:4]))
pPolsB := int(binary.LittleEndian.Uint32(b[4:8]))
o += 8
b, err = readNBytes(r, 20)
if err != nil {
return nil, err
}
pPointsA := int(binary.LittleEndian.Uint32(b[:4]))
pPointsB1 := int(binary.LittleEndian.Uint32(b[4:8]))
pPointsB2 := int(binary.LittleEndian.Uint32(b[8:12]))
pPointsC := int(binary.LittleEndian.Uint32(b[12:16]))
pPointsHExps := int(binary.LittleEndian.Uint32(b[16:20]))
o += 20
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkAlpha1 = new(bn256.G1)
_, err = pk.VkAlpha1.Unmarshal(b)
if err != nil {
return &pk, err
}
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkBeta1 = new(bn256.G1)
_, err = pk.VkBeta1.Unmarshal(b)
if err != nil {
return &pk, err
}
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
pk.VkDelta1 = new(bn256.G1)
_, err = pk.VkDelta1.Unmarshal(b)
if err != nil {
return &pk, err
}
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
pk.VkBeta2 = new(bn256.G2)
_, err = pk.VkBeta2.Unmarshal(b)
if err != nil {
return &pk, err
}
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
pk.VkDelta2 = new(bn256.G2)
_, err = pk.VkDelta2.Unmarshal(b)
if err != nil {
return &pk, err
}
o += 448
if o != pPolsA {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPolsA, o)
}
// PolsA
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 4)
if err != nil {
return nil, err
}
keysLength := int(binary.LittleEndian.Uint32(b[:4]))
o += 4
polsMap := make(map[int]*big.Int)
for j := 0; j < keysLength; j++ {
bK, err := readNBytes(r, 4)
if err != nil {
return nil, err
}
key := int(binary.LittleEndian.Uint32(bK[:4]))
o += 4
b, err := readNBytes(r, 32)
if err != nil {
return nil, err
}
polsMap[key] = new(big.Int).SetBytes(b[:32])
o += 32
}
pk.PolsA = append(pk.PolsA, polsMap)
}
if o != pPolsB {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPolsB, o)
}
// PolsB
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 4)
if err != nil {
return nil, err
}
keysLength := int(binary.LittleEndian.Uint32(b[:4]))
o += 4
polsMap := make(map[int]*big.Int)
for j := 0; j < keysLength; j++ {
bK, err := readNBytes(r, 4)
if err != nil {
return nil, err
}
key := int(binary.LittleEndian.Uint32(bK[:4]))
o += 4
b, err := readNBytes(r, 32)
if err != nil {
return nil, err
}
polsMap[key] = new(big.Int).SetBytes(b[:32])
o += 32
}
pk.PolsB = append(pk.PolsB, polsMap)
}
if o != pPointsA {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsA, o)
}
// A
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(b)
if err != nil {
return nil, err
}
pk.A = append(pk.A, p1)
o += 64
}
if o != pPointsB1 {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsB1, o)
}
// B1
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(b)
if err != nil {
return nil, err
}
pk.B1 = append(pk.B1, p1)
o += 64
}
if o != pPointsB2 {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsB2, o)
}
// B2
for i := 0; i < pk.NVars; i++ {
b, err = readNBytes(r, 128)
if err != nil {
return nil, err
}
p2 := new(bn256.G2)
_, err = p2.Unmarshal(b)
if err != nil {
return nil, err
}
pk.B2 = append(pk.B2, p2)
o += 128
}
if o != pPointsC {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsC, o)
}
// C
zb := make([]byte, 64)
z := new(bn256.G1)
_, err = z.Unmarshal(zb)
if err != nil {
return nil, err
}
pk.C = append(pk.C, z)
pk.C = append(pk.C, z)
pk.C = append(pk.C, z)
for i := pk.NPublic + 1; i < pk.NVars; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(b)
if err != nil {
return nil, err
}
pk.C = append(pk.C, p1)
o += 64
}
if o != pPointsHExps {
return nil, fmt.Errorf("Unexpected offset, expected: %v, actual: %v", pPointsHExps, o)
}
// HExps
for i := 0; i < pk.DomainSize+1; i++ {
b, err = readNBytes(r, 64)
if err != nil {
return nil, err
}
p1 := new(bn256.G1)
_, err = p1.Unmarshal(b)
if err != nil {
return nil, err
}
pk.HExps = append(pk.HExps, p1)
}
return &pk, nil
}

View File

@@ -1,12 +1,10 @@
package parsers
import (
"encoding/json"
"io/ioutil"
"os"
"testing"
"github.com/iden3/go-circom-prover-verifier/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -174,147 +172,3 @@ func TestParseWitnessBin(t *testing.T) {
testCircuitParseWitnessBin(t, "circuit1k")
testCircuitParseWitnessBin(t, "circuit5k")
}
func TestProofSmartContractFormat(t *testing.T) {
proofJson, err := ioutil.ReadFile("../testdata/circuit1k/proof.json")
require.Nil(t, err)
proof, err := ParseProof(proofJson)
require.Nil(t, err)
pS := ProofToString(proof)
pSC := ProofToSmartContractFormat(proof)
assert.Nil(t, err)
assert.Equal(t, pS.A[0], pSC.A[0])
assert.Equal(t, pS.A[1], pSC.A[1])
assert.Equal(t, pS.B[0][0], pSC.B[0][1])
assert.Equal(t, pS.B[0][1], pSC.B[0][0])
assert.Equal(t, pS.B[1][0], pSC.B[1][1])
assert.Equal(t, pS.B[1][1], pSC.B[1][0])
assert.Equal(t, pS.C[0], pSC.C[0])
assert.Equal(t, pS.C[1], pSC.C[1])
assert.Equal(t, pS.Protocol, pSC.Protocol)
pSC2 := ProofStringToSmartContractFormat(pS)
assert.Equal(t, pSC, pSC2)
}
func TestProofJSON(t *testing.T) {
proofJson, err := ioutil.ReadFile("../testdata/circuit1k/proof.json")
require.Nil(t, err)
proof, err := ParseProof(proofJson)
require.Nil(t, err)
proof1JSON, err := json.Marshal(proof)
require.Nil(t, err)
var proof1 types.Proof
err = json.Unmarshal(proof1JSON, &proof1)
require.Nil(t, err)
require.Equal(t, *proof, proof1)
}
func testCircuitParsePkBin(t *testing.T, circuit string) {
pkBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.bin")
require.Nil(t, err)
defer pkBinFile.Close()
pk, err := ParsePkBin(pkBinFile)
require.Nil(t, err)
pkJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
require.Nil(t, err)
pkJ, err := ParsePk(pkJson)
require.Nil(t, err)
assert.Equal(t, pkJ.NVars, pk.NVars)
assert.Equal(t, pkJ.NPublic, pk.NPublic)
assert.Equal(t, pkJ.DomainSize, pk.DomainSize)
assert.Equal(t, pkJ.VkAlpha1, pk.VkAlpha1)
assert.Equal(t, pkJ.VkBeta1, pk.VkBeta1)
assert.Equal(t, pkJ.VkDelta1, pk.VkDelta1)
assert.Equal(t, pkJ.VkDelta2, pk.VkDelta2)
assert.Equal(t, pkJ.PolsA, pk.PolsA)
assert.Equal(t, pkJ.PolsB, pk.PolsB)
assert.Equal(t, pkJ.A, pk.A)
assert.Equal(t, pkJ.B1, pk.B1)
assert.Equal(t, pkJ.B2, pk.B2)
assert.Equal(t, pkJ.C, pk.C)
assert.Equal(t, pkJ.HExps[:pkJ.DomainSize], pk.HExps[:pk.DomainSize]) // circom behaviour
}
func TestParsePkBin(t *testing.T) {
testCircuitParsePkBin(t, "circuit1k")
testCircuitParsePkBin(t, "circuit5k")
}
func testGoCircomPkFormat(t *testing.T, circuit string) {
pkJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
require.Nil(t, err)
pk, err := ParsePk(pkJson)
require.Nil(t, err)
pkGBin, err := PkToGoBin(pk)
require.Nil(t, err)
err = ioutil.WriteFile("../testdata/"+circuit+"/proving_key.go.bin", pkGBin, 0644)
assert.Nil(t, err)
// parse ProvingKeyGo
pkGoBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.go.bin")
require.Nil(t, err)
defer pkGoBinFile.Close()
pkG, err := ParsePkGoBin(pkGoBinFile)
require.Nil(t, err)
assert.Equal(t, pk.VkAlpha1, pkG.VkAlpha1)
assert.Equal(t, pk.VkBeta1, pkG.VkBeta1)
assert.Equal(t, pk.VkDelta1, pkG.VkDelta1)
assert.Equal(t, pk.VkBeta2, pkG.VkBeta2)
assert.Equal(t, pk.VkDelta2, pkG.VkDelta2)
assert.Equal(t, pk.A, pkG.A)
assert.Equal(t, pk.B1, pkG.B1)
assert.Equal(t, pk.B2, pkG.B2)
assert.Equal(t, pk.C, pkG.C)
assert.Equal(t, pk.HExps, pkG.HExps)
assert.Equal(t, pk.PolsA, pkG.PolsA)
assert.Equal(t, pk.PolsB, pkG.PolsB)
}
func TestGoCircomPkFormat(t *testing.T) {
testGoCircomPkFormat(t, "circuit1k")
testGoCircomPkFormat(t, "circuit5k")
// testGoCircomPkFormat(t, "circuit10k")
// testGoCircomPkFormat(t, "circuit20k")
}
func benchmarkParsePk(b *testing.B, circuit string) {
pkJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
require.Nil(b, err)
pkBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.bin")
require.Nil(b, err)
defer pkBinFile.Close()
pkGoBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.go.bin")
require.Nil(b, err)
defer pkGoBinFile.Close()
b.Run("ParsePkJson "+circuit, func(b *testing.B) {
for i := 0; i < b.N; i++ {
ParsePkBin(pkBinFile)
}
})
b.Run("ParsePkBin "+circuit, func(b *testing.B) {
for i := 0; i < b.N; i++ {
ParsePk(pkJson)
}
})
b.Run("ParsePkGoBin "+circuit, func(b *testing.B) {
for i := 0; i < b.N; i++ {
ParsePkGoBin(pkBinFile)
}
})
}
func BenchmarkParsePk(b *testing.B) {
benchmarkParsePk(b, "circuit1k")
benchmarkParsePk(b, "circuit5k")
// benchmarkParsePk(b, "circuit10k")
// benchmarkParsePk(b, "circuit20k")
}

View File

@@ -1,20 +1,21 @@
package prover
import (
"math/big"
"math/big"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
)
type tableG1 struct {
data []*bn256.G1
type TableG1 struct{
data []*bn256.G1
}
func (t tableG1) getData() []*bn256.G1 {
return t.data
func (t TableG1) GetData() []*bn256.G1 {
return t.data
}
// Compute table of gsize elements as ::
// Table[0] = Inf
// Table[1] = a[0]
@@ -22,202 +23,191 @@ func (t tableG1) getData() []*bn256.G1 {
// Table[3] = a[0]+a[1]
// .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
func (t *tableG1) newTableG1(a []*bn256.G1, gsize int, toaffine bool) {
// EC table
table := make([]*bn256.G1, 0)
func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int){
// EC table
table := make([]*bn256.G1, 0)
// We need at least gsize elements. If not enough, fill with 0
aExt := make([]*bn256.G1, 0)
aExt = append(aExt, a...)
// We need at least gsize elements. If not enough, fill with 0
a_ext := make([]*bn256.G1, 0)
a_ext = append(a_ext, a...)
for i := len(a); i < gsize; i++ {
aExt = append(aExt, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
}
for i:=len(a); i<gsize; i++ {
a_ext = append(a_ext,new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
}
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
table = append(table, elG1)
lastPow2 := 1
nelems := 0
for i := 1; i < 1<<gsize; i++ {
elG1 := new(bn256.G1)
// if power of 2
if i&(i-1) == 0 {
lastPow2 = i
elG1.Set(aExt[nelems])
nelems++
} else {
elG1.Add(table[lastPow2], table[i-lastPow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine()
}
table = append(table, elG1)
}
if toaffine {
for i := 0; i < len(table); i++ {
info := table[i].Marshal()
table[i].Unmarshal(info)
}
}
t.data = table
}
func (t tableG1) Marshal() []byte {
info := make([]byte, 0)
for _, el := range t.data {
info = append(info, el.Marshal()...)
}
return info
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
table = append(table,elG1)
last_pow2 := 1
nelems := 0
for i :=1; i< 1<<gsize; i++ {
elG1 := new(bn256.G1)
// if power of 2
if i & (i-1) == 0{
last_pow2 = i
elG1.Set(a_ext[nelems])
nelems++
} else {
elG1.Add(table[last_pow2], table[i-last_pow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine()
}
table = append(table, elG1)
}
t.data = table
}
// Multiply scalar by precomputed table of G1 elements
func (t *tableG1) mulTableG1(k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0
kExt := make([]*big.Int, 0)
kExt = append(kExt, k...)
func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0
k_ext := make([]*big.Int, 0)
k_ext = append(k_ext, k...)
for i := len(k); i < gsize; i++ {
kExt = append(kExt, new(big.Int).SetUint64(0))
for i:=len(k); i < gsize; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0))
}
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
msb := getMsb(k_ext)
for i := msb-1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G1).Add(Q,Q)
b := getBit(k_ext,i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
}
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
msb := getMsb(kExt)
for i := msb - 1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G1).Add(Q, Q)
b := getBit(kExt, i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
}
}
if qPrev != nil {
return Q.Add(Q, qPrev)
}
return Q
}
if Q_prev != nil {
return Q.Add(Q,Q_prev)
} else {
return Q
}
}
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0
minNElems := len(t) * gsize
kExt := make([]*big.Int, 0)
kExt = append(kExt, k...)
for i := len(k); i < minNElems; i++ {
kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1, nbitsQ)
func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0
min_nelems := len(t) * gsize
k_ext := make([]*big.Int, 0)
k_ext = append(k_ext, k...)
for i := len(k); i < min_nelems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1,nbitsQ)
for i := 0; i < nbitsQ; i++ {
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
}
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j := 0; j < len(t); j++ {
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
// Perform bitwise addition
for j:=0; j < len(t); j++ {
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
}
}
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
}
}
}
// Consolidate Addition
R := new(bn256.G1).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R, R)
R.Add(R, Q[i-1])
}
// Consolidate Addition
R := new(bn256.G1).Set(Q[nbitsQ-1])
for i:=nbitsQ-1; i>0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R,R)
R.Add(R,Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
if Q_prev != nil {
return R.Add(R,Q_prev)
} else {
return R
}
}
// Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7
func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize)
table := tableG1{}
Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize)
table := TableG1{}
Q:= new(bn256.G1).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ {
table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
}
table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize)
for i:=0; i<ntables-1; i++ {
table.NewTableG1( a[i*gsize:(i+1)*gsize], gsize)
Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
}
table.NewTableG1( a[(ntables-1)*gsize:], gsize)
Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize)
if qPrev != nil {
return Q.Add(Q, qPrev)
}
return Q
if Q_prev != nil {
return Q.Add(Q,Q_prev)
} else {
return Q
}
}
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize)
table := tableG1{}
func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize)
table := TableG1{}
// We need at least gsize elements. If not enough, fill with 0
minNElems := ntables * gsize
kExt := make([]*big.Int, 0)
kExt = append(kExt, k...)
for i := len(k); i < minNElems; i++ {
kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1, nbitsQ)
// We need at least gsize elements. If not enough, fill with 0
min_nelems := ntables * gsize
k_ext := make([]*big.Int, 0)
k_ext = append(k_ext, k...)
for i := len(k); i < min_nelems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G1,nbitsQ)
for i := 0; i < nbitsQ; i++ {
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
}
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j := 0; j < ntables-1; j++ {
table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
// Perform bitwise addition
for j:=0; j < ntables-1; j++ {
table.NewTableG1( a[j*gsize:(j+1)*gsize], gsize)
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
msb := getMsb(kExt[(ntables-1)*gsize:])
}
}
table.NewTableG1( a[(ntables-1)*gsize:], gsize)
msb := getMsb(k_ext[(ntables-1)*gsize:])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[(ntables-1)*gsize:], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[(ntables-1)*gsize:],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
// Consolidate Addition
R := new(bn256.G1).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
// Consolidate Addition
R := new(bn256.G1).Set(Q[nbitsQ-1])
for i:=nbitsQ-1; i>0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G1).Add(R,R)
R.Add(R,Q[i-1])
}
if Q_prev != nil {
return R.Add(R,Q_prev)
} else {
return R
}
}
/////
@@ -225,14 +215,16 @@ func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize in
// TODO - How can avoid replicating code in G2?
//G2
type tableG2 struct {
data []*bn256.G2
type TableG2 struct{
data []*bn256.G2
}
func (t tableG2) getData() []*bn256.G2 {
return t.data
func (t TableG2) GetData() []*bn256.G2 {
return t.data
}
// Compute table of gsize elements as ::
// Table[0] = Inf
// Table[1] = a[0]
@@ -240,224 +232,212 @@ func (t tableG2) getData() []*bn256.G2 {
// Table[3] = a[0]+a[1]
// .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
// TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) {
// EC table
table := make([]*bn256.G2, 0)
func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int){
// EC table
table := make([]*bn256.G2, 0)
// We need at least gsize elements. If not enough, fill with 0
aExt := make([]*bn256.G2, 0)
aExt = append(aExt, a...)
// We need at least gsize elements. If not enough, fill with 0
a_ext := make([]*bn256.G2, 0)
a_ext = append(a_ext, a...)
for i := len(a); i < gsize; i++ {
aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
}
for i:=len(a); i<gsize; i++ {
a_ext = append(a_ext,new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
}
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
table = append(table, elG2)
lastPow2 := 1
nelems := 0
for i := 1; i < 1<<gsize; i++ {
elG2 := new(bn256.G2)
// if power of 2
if i&(i-1) == 0 {
lastPow2 = i
elG2.Set(aExt[nelems])
nelems++
} else {
elG2.Add(table[lastPow2], table[i-lastPow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine()
}
table = append(table, elG2)
}
if toaffine {
for i := 0; i < len(table); i++ {
info := table[i].Marshal()
table[i].Unmarshal(info)
}
}
t.data = table
}
func (t tableG2) Marshal() []byte {
info := make([]byte, 0)
for _, el := range t.data {
info = append(info, el.Marshal()...)
}
return info
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
table = append(table,elG2)
last_pow2 := 1
nelems := 0
for i :=1; i< 1<<gsize; i++ {
elG2 := new(bn256.G2)
// if power of 2
if i & (i-1) == 0{
last_pow2 = i
elG2.Set(a_ext[nelems])
nelems++
} else {
elG2.Add(table[last_pow2], table[i-last_pow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine()
}
table = append(table, elG2)
}
t.data = table
}
// Multiply scalar by precomputed table of G2 elements
func (t *tableG2) mulTableG2(k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0
kExt := make([]*big.Int, 0)
kExt = append(kExt, k...)
func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0
k_ext := make([]*big.Int, 0)
k_ext = append(k_ext, k...)
for i := len(k); i < gsize; i++ {
kExt = append(kExt, new(big.Int).SetUint64(0))
for i:=len(k); i < gsize; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0))
}
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
msb := getMsb(k_ext)
for i := msb-1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G2).Add(Q,Q)
b := getBit(k_ext,i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
}
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
msb := getMsb(kExt)
for i := msb - 1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G2).Add(Q, Q)
b := getBit(kExt, i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b])
}
}
if qPrev != nil {
return Q.Add(Q, qPrev)
}
return Q
}
if Q_prev != nil {
return Q.Add(Q, Q_prev)
} else {
return Q
}
}
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0
minNElems := len(t) * gsize
kExt := make([]*big.Int, 0)
kExt = append(kExt, k...)
for i := len(k); i < minNElems; i++ {
kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2, nbitsQ)
func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0
min_nelems := len(t) * gsize
k_ext := make([]*big.Int, 0)
k_ext = append(k_ext, k...)
for i := len(k); i < min_nelems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2,nbitsQ)
for i := 0; i < nbitsQ; i++ {
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
}
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j := 0; j < len(t); j++ {
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
// Perform bitwise addition
for j:=0; j < len(t); j++ {
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
}
}
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b])
}
}
}
// Consolidate Addition
R := new(bn256.G2).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
// Consolidate Addition
R := new(bn256.G2).Set(Q[nbitsQ-1])
for i:=nbitsQ-1; i>0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R,R)
R.Add(R,Q[i-1])
}
if Q_prev != nil {
return R.Add(R,Q_prev)
} else {
return R
}
}
// Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7
func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize)
table := tableG2{}
Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize)
table := TableG2{}
Q:= new(bn256.G2).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ {
table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
}
table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize)
for i:=0; i<ntables-1; i++ {
table.NewTableG2( a[i*gsize:(i+1)*gsize], gsize)
Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
}
table.NewTableG2( a[(ntables-1)*gsize:], gsize)
Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize)
if qPrev != nil {
return Q.Add(Q, qPrev)
}
return Q
if Q_prev != nil {
return Q.Add(Q,Q_prev)
} else {
return Q
}
}
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize)
table := tableG2{}
func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize)
table := TableG2{}
// We need at least gsize elements. If not enough, fill with 0
minNElems := ntables * gsize
kExt := make([]*big.Int, 0)
kExt = append(kExt, k...)
for i := len(k); i < minNElems; i++ {
kExt = append(kExt, new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2, nbitsQ)
// We need at least gsize elements. If not enough, fill with 0
min_nelems := ntables * gsize
k_ext := make([]*big.Int, 0)
k_ext = append(k_ext, k...)
for i := len(k); i < min_nelems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0))
}
// Init Adders
nbitsQ := cryptoConstants.Q.BitLen()
Q := make([]*bn256.G2,nbitsQ)
for i := 0; i < nbitsQ; i++ {
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
}
for i:=0; i< nbitsQ; i++ {
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
}
// Perform bitwise addition
for j := 0; j < ntables-1; j++ {
table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
msb := getMsb(kExt[j*gsize : (j+1)*gsize])
// Perform bitwise addition
for j:=0; j < ntables-1; j++ {
table.NewTableG2( a[j*gsize:(j+1)*gsize], gsize)
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
msb := getMsb(kExt[(ntables-1)*gsize:])
}
}
table.NewTableG2( a[(ntables-1)*gsize:], gsize)
msb := getMsb(k_ext[(ntables-1)*gsize:])
for i := msb - 1; i >= 0; i-- {
b := getBit(kExt[(ntables-1)*gsize:], i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
for i := msb-1; i >= 0; i-- {
b := getBit(k_ext[(ntables-1)*gsize:],i)
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b])
}
}
// Consolidate Addition
R := new(bn256.G2).Set(Q[nbitsQ-1])
for i := nbitsQ - 1; i > 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R, R)
R.Add(R, Q[i-1])
}
if qPrev != nil {
return R.Add(R, qPrev)
}
return R
// Consolidate Addition
R := new(bn256.G2).Set(Q[nbitsQ-1])
for i:=nbitsQ-1; i>0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
R = new(bn256.G2).Add(R,R)
R.Add(R,Q[i-1])
}
if Q_prev != nil {
return R.Add(R,Q_prev)
} else {
return R
}
}
// Return most significant bit position in a group of Big Integers
func getMsb(k []*big.Int) int {
msb := 0
func getMsb(k []*big.Int) int{
msb := 0
for _, el := range k {
tmpMsb := el.BitLen()
if tmpMsb > msb {
msb = tmpMsb
}
}
return msb
for _, el := range(k){
tmp_msb := el.BitLen()
if tmp_msb > msb {
msb = tmp_msb
}
}
return msb
}
// Return ith bit in group of Big Integers
func getBit(k []*big.Int, i int) uint {
tableIdx := uint(0)
table_idx := uint(0)
for idx, el := range k {
b := el.Bit(i)
tableIdx += (b << idx)
}
return tableIdx
for idx, el := range(k){
b := el.Bit(i)
table_idx += (b << idx)
}
return table_idx
}

View File

@@ -1,163 +1,166 @@
package prover
import (
"bytes"
"crypto/rand"
"fmt"
"math/big"
"testing"
"time"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
"time"
"bytes"
"fmt"
)
const (
N1 = 5000
N2 = 5000
N1 = 5000
N2 = 5000
)
func randomBigIntArray(n int) []*big.Int {
func randomBigIntArray(n int) []*big.Int{
var p []*big.Int
for i := 0; i < n; i++ {
pi := randBI()
p = append(p, pi)
}
return p
return p
}
func randomG1Array(n int) []*bn256.G1 {
arrayG1 := make([]*bn256.G1, n)
arrayG1 := make([]*bn256.G1, n)
for i := 0; i < n; i++ {
_, arrayG1[i], _ = bn256.RandomG1(rand.Reader)
}
return arrayG1
for i:=0; i<n; i++ {
_, arrayG1[i], _ = bn256.RandomG1(rand.Reader)
}
return arrayG1
}
func randomG2Array(n int) []*bn256.G2 {
arrayG2 := make([]*bn256.G2, n)
arrayG2 := make([]*bn256.G2, n)
for i := 0; i < n; i++ {
_, arrayG2[i], _ = bn256.RandomG2(rand.Reader)
}
return arrayG2
for i:=0; i<n; i++ {
_, arrayG2[i], _ = bn256.RandomG2(rand.Reader)
}
return arrayG2
}
func TestTableG1(t *testing.T) {
n := N1
// init scalar
var arrayW = randomBigIntArray(n)
// init G1 array
var arrayG1 = randomG1Array(n)
beforeT := time.Now()
Q1 := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i := 0; i < n; i++ {
Q1.Add(Q1, new(bn256.G1).ScalarMult(arrayG1[i], arrayW[i]))
}
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
func TestTableG1(t *testing.T){
n := N1
for gsize := 2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize)
table := make([]tableG1, ntables)
// init scalar
var arrayW = randomBigIntArray(n)
// init G1 array
var arrayG1 = randomG1Array(n)
for i := 0; i < ntables-1; i++ {
table[i].newTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true)
}
table[ntables-1].newTableG1(arrayG1[(ntables-1)*gsize:], gsize, true)
beforeT := time.Now()
Q1 := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i:=0; i < n; i++ {
Q1.Add(Q1, new(bn256.G1).ScalarMult(arrayG1[i], arrayW[i]))
}
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
beforeT = time.Now()
Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ {
Q2 = table[i].mulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
}
Q2 = table[ntables-1].mulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
for gsize:=2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize)
table := make([]TableG1, ntables)
beforeT = time.Now()
Q3 := scalarMultG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
for i:=0; i<ntables-1; i++ {
table[i].NewTableG1( arrayG1[i*gsize:(i+1)*gsize], gsize)
}
table[ntables-1].NewTableG1( arrayG1[(ntables-1)*gsize:], gsize)
beforeT = time.Now()
Q4 := mulTableNoDoubleG1(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now()
Q2:= new(bn256.G1).ScalarBaseMult(new(big.Int))
for i:=0; i<ntables-1; i++ {
Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
}
Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize,time.Since(beforeT))
beforeT = time.Now()
Q5 := scalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now()
Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
t.Error("Error in TMult")
}
if bytes.Compare(Q1.Marshal(), Q3.Marshal()) != 0 {
t.Error("Error in TMult with table comp")
}
if bytes.Compare(Q1.Marshal(), Q4.Marshal()) != 0 {
t.Error("Error in TMultNoDouble")
}
if bytes.Compare(Q1.Marshal(), Q5.Marshal()) != 0 {
t.Error("Error in TMultNoDoublee with table comp")
}
}
beforeT = time.Now()
Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize,time.Since(beforeT))
beforeT = time.Now()
Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
if bytes.Compare(Q1.Marshal(),Q2.Marshal()) != 0 {
t.Error("Error in TMult")
}
if bytes.Compare(Q1.Marshal(),Q3.Marshal()) != 0 {
t.Error("Error in TMult with table comp")
}
if bytes.Compare(Q1.Marshal(),Q4.Marshal()) != 0 {
t.Error("Error in TMultNoDouble")
}
if bytes.Compare(Q1.Marshal(),Q5.Marshal()) != 0 {
t.Error("Error in TMultNoDoublee with table comp")
}
}
}
func TestTableG2(t *testing.T) {
n := N2
func TestTableG2(t *testing.T){
n := N2
// init scalar
var arrayW = randomBigIntArray(n)
// init G2 array
var arrayG2 = randomG2Array(n)
// init scalar
var arrayW = randomBigIntArray(n)
// init G2 array
var arrayG2 = randomG2Array(n)
beforeT := time.Now()
Q1 := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i := 0; i < n; i++ {
Q1.Add(Q1, new(bn256.G2).ScalarMult(arrayG2[i], arrayW[i]))
}
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
beforeT := time.Now()
Q1 := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i:=0; i < n; i++ {
Q1.Add(Q1, new(bn256.G2).ScalarMult(arrayG2[i], arrayW[i]))
}
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
for gsize := 2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize)
table := make([]tableG2, ntables)
for gsize:=2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize)
table := make([]TableG2, ntables)
for i := 0; i < ntables-1; i++ {
table[i].newTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false)
}
table[ntables-1].newTableG2(arrayG2[(ntables-1)*gsize:], gsize, false)
for i:=0; i<ntables-1; i++ {
table[i].NewTableG2( arrayG2[i*gsize:(i+1)*gsize], gsize)
}
table[ntables-1].NewTableG2( arrayG2[(ntables-1)*gsize:], gsize)
beforeT = time.Now()
Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ {
Q2 = table[i].mulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
}
Q2 = table[ntables-1].mulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now()
Q2:= new(bn256.G2).ScalarBaseMult(new(big.Int))
for i:=0; i<ntables-1; i++ {
Q2 =table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
}
Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize,time.Since(beforeT))
beforeT = time.Now()
Q3 := scalarMultG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now()
Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
beforeT = time.Now()
Q4 := mulTableNoDoubleG2(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now()
Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize,time.Since(beforeT))
beforeT = time.Now()
Q5 := scalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now()
Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
t.Error("Error in TMult")
}
if bytes.Compare(Q1.Marshal(), Q3.Marshal()) != 0 {
t.Error("Error in TMult with table comp")
}
if bytes.Compare(Q1.Marshal(), Q4.Marshal()) != 0 {
t.Error("Error in TMultNoDouble")
}
if bytes.Compare(Q1.Marshal(), Q5.Marshal()) != 0 {
t.Error("Error in TMultNoDoublee with table comp")
}
}
if bytes.Compare(Q1.Marshal(),Q2.Marshal()) != 0 {
t.Error("Error in TMult")
}
if bytes.Compare(Q1.Marshal(),Q3.Marshal()) != 0 {
t.Error("Error in TMult with table comp")
}
if bytes.Compare(Q1.Marshal(),Q4.Marshal()) != 0 {
t.Error("Error in TMultNoDouble")
}
if bytes.Compare(Q1.Marshal(),Q5.Marshal()) != 0 {
t.Error("Error in TMultNoDoublee with table comp")
}
}
}

View File

@@ -10,7 +10,7 @@ import (
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
"github.com/iden3/go-circom-prover-verifier/types"
"github.com/iden3/go-iden3-crypto/utils"
//"fmt"
//"fmt"
)
// Proof is the data structure of the Groth16 zkSNARK proof
@@ -45,7 +45,7 @@ type Witness []*big.Int
// Group Size
const (
GSIZE = 6
GSIZE = 6
)
func randBigInt() (*big.Int, error) {
@@ -81,34 +81,34 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
proofB := arrayOfZeroesG2(numcpu)
proofC := arrayOfZeroesG1(numcpu)
proofBG1 := arrayOfZeroesG1(numcpu)
gsize := GSIZE
gsize := GSIZE
var wg1 sync.WaitGroup
wg1.Add(numcpu)
for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
// split 1
go func(cpu int, ranges [2]int) {
proofA[cpu] = scalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]],
proofA[cpu],
gsize)
proofB[cpu] = scalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]],
proofB[cpu],
gsize)
proofBG1[cpu] = scalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]],
proofBG1[cpu],
gsize)
minLim := pk.NPublic + 1
if ranges[0] > pk.NPublic+1 {
minLim = ranges[0]
}
if ranges[1] > pk.NPublic+1 {
proofC[cpu] = scalarMultNoDoubleG1(pk.C[minLim:ranges[1]],
w[minLim:ranges[1]],
proofC[cpu],
gsize)
}
proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]],
proofA[cpu],
gsize)
proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]],
proofB[cpu],
gsize)
proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]],
proofBG1[cpu],
gsize)
min_lim := pk.NPublic+1
if ranges[0] > pk.NPublic+1 {
min_lim = ranges[0]
}
if ranges[1] > pk.NPublic + 1 {
proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]],
w[min_lim:ranges[1]],
proofC[cpu],
gsize)
}
wg1.Done()
}(_cpu, _ranges)
}
@@ -142,10 +142,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
for _cpu, _ranges := range ranges(len(h), numcpu) {
// split 2
go func(cpu int, ranges [2]int) {
proofC[cpu] = scalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
h[ranges[0]:ranges[1]],
proofC[cpu],
gsize)
proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
h[ranges[0]:ranges[1]],
proofC[cpu],
gsize)
wg2.Done()
}(_cpu, _ranges)
}

View File

@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
@@ -17,45 +16,25 @@ import (
func TestCircuitsGenerateProof(t *testing.T) {
testCircuitGenerateProof(t, "circuit1k") // 1000 constraints
testCircuitGenerateProof(t, "circuit5k") // 5000 constraints
// testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
// testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
//testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
//testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
}
func testCircuitGenerateProof(t *testing.T, circuit string) {
// Using json provingKey file:
// provingKeyJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
// require.Nil(t, err)
// pk, err := parsers.ParsePk(provingKeyJson)
// require.Nil(t, err)
// witnessJson, err := ioutil.ReadFile("../testdata/" + circuit + "/witness.json")
// require.Nil(t, err)
// w, err := parsers.ParseWitness(witnessJson)
// require.Nil(t, err)
// Using bin provingKey file:
// pkBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.bin")
// require.Nil(t, err)
// defer pkBinFile.Close()
// pk, err := parsers.ParsePkBin(pkBinFile)
// require.Nil(t, err)
// Using go bin provingKey file:
pkGoBinFile, err := os.Open("../testdata/" + circuit + "/proving_key.go.bin")
provingKeyJson, err := ioutil.ReadFile("../testdata/" + circuit + "/proving_key.json")
require.Nil(t, err)
defer pkGoBinFile.Close()
pk, err := parsers.ParsePkGoBin(pkGoBinFile)
pk, err := parsers.ParsePk(provingKeyJson)
require.Nil(t, err)
witnessBinFile, err := os.Open("../testdata/" + circuit + "/witness.bin")
witnessJson, err := ioutil.ReadFile("../testdata/" + circuit + "/witness.json")
require.Nil(t, err)
defer witnessBinFile.Close()
w, err := parsers.ParseWitnessBin(witnessBinFile)
w, err := parsers.ParseWitness(witnessJson)
require.Nil(t, err)
beforeT := time.Now()
proof, pubSignals, err := GenerateProof(pk, w)
assert.Nil(t, err)
fmt.Println("proof generation time for "+circuit+" elapsed:", time.Since(beforeT))
fmt.Println("proof generation time elapsed:", time.Since(beforeT))
proofStr, err := parsers.ProofToJson(proof)
assert.Nil(t, err)

View File

@@ -16,7 +16,7 @@ There may be some concern on the additional size of the tables since they need t
| Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|---|---|---|--|---|---|---|---|---|
|---|---|---|---|---|---|---|---|---|
| Naive | 6.63s | - | - | - | - | - | - | - |
| Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s |
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
@@ -26,7 +26,7 @@ There may be some concern on the additional size of the tables since they need t
There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
| Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|---|---|---|--|---|---|---|---|---|
|---|---|---|---|---|---|---|---|---|
| Naive | 3.55s | | | | | | | |
| Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms |
| Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s |

25
tables.md Normal file
View File

@@ -0,0 +1,25 @@
# Tables Pre-calculation
The most time consuming part of a ZKSnark proof calculation is the scalar multiplication of elliptic curve points. Direct mechanism accumulates each multiplication. However, prover only needs the total accumulation.
There are two potential improvements to the naive approach:
1. Apply Strauss-Shamir method (https://stackoverflow.com/questions/50993471/ec-scalar-multiplication-with-strauss-shamir-method).
2. Leave the doubling operation for the last step
Both options can be combined.
In the following table, we show the results of using the naive method, Srauss-Shamir and Strauss-Shamir + No doubling. These last two options are repeated for different table grouping order.
There are 5000 G1 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
There may be some concern on the additional size of the tables since they need to be loaded into a smartphone during the proof, and the time required to load these tables may exceed the benefits. If this is a problem, another althernative is to compute the tables during the proof itself. Depending on the Group Size, timing may be better than the naive approach.
| Algorithm | GS / Time |
|---|---|---|
| Naive | 6.63s | | | | | | | |
| Strauss | 13.16s | 9.033s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | 1.44 s |
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
| No Doubling | 3.74s | 3.00s | 2.38s | 1.96s | 1.79s | 1.54s | 1.50s | 1.44s|
| No Doubling + Table Computation | 6.83s | 5.1s | 4.16s | 3.52s| 3.22s | 3.21s | 3.57s | 4.56s |

View File

@@ -7,4 +7,3 @@ rm */*.cpp
rm */*.sym
rm */*.r1cs
rm */*.sol
rm */*.bin

View File

@@ -45,23 +45,19 @@ compile_and_ts_and_witness
cd ../
echo "convert witness & pk of circuit1k to bin & go bin"
echo "convert witness & pk of circuit1k to bin"
node node_modules/wasmsnark/tools/buildwitness.js -i circuit1k/witness.json -o circuit1k/witness.bin
node node_modules/wasmsnark/tools/buildpkey.js -i circuit1k/proving_key.json -o circuit1k/proving_key.bin
go run ../cli/cli.go -convert -pk circuit1k/proving_key.json -pkbin circuit1k/proving_key.go.bin
echo "convert witness & pk of circuit5k to bin & go bin"
echo "convert witness & pk of circuit5k to bin"
node node_modules/wasmsnark/tools/buildwitness.js -i circuit5k/witness.json -o circuit5k/witness.bin
node node_modules/wasmsnark/tools/buildpkey.js -i circuit5k/proving_key.json -o circuit5k/proving_key.bin
go run ../cli/cli.go -convert -pk circuit5k/proving_key.json -pkbin circuit5k/proving_key.go.bin
# echo "convert witness & pk of circuit10k to bin & go bin"
# echo "convert witness & pk of circuit10k to bin"
# node node_modules/wasmsnark/tools/buildwitness.js -i circuit10k/witness.json -o circuit10k/witness.bin
# node node_modules/wasmsnark/tools/buildpkey.js -i circuit10k/proving_key.json -o circuit10k/proving_key.bin
# go run ../cli/cli.go -convert -pk circuit10k/proving_key.json -pkbin circuit10k/proving_key.go.bin
#
# echo "convert witness & pk of circuit20k to bin & go bin"
# echo "convert witness & pk of circuit20k to bin"
# node node_modules/wasmsnark/tools/buildwitness.js -i circuit20k/witness.json -o circuit20k/witness.bin
# node node_modules/wasmsnark/tools/buildpkey.js -i circuit20k/proving_key.json -o circuit20k/proving_key.bin
# go run ../cli/cli.go -convert -pk circuit20k/proving_key.json -pkbin circuit20k/proving_key.go.bin

View File

@@ -1,15 +1,11 @@
package types
import (
"encoding/hex"
"encoding/json"
"math/big"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
)
var Q, _ = new(big.Int).SetString("21888242871839275222246405745257275088696311157297823662689037894645226208583", 10)
// R is the mod of the finite field
var R, _ = new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495617", 10)
@@ -20,52 +16,6 @@ type Proof struct {
C *bn256.G1
}
type proofAux struct {
A string `json:"pi_a"`
B string `json:"pi_b"`
C string `json:"pi_c"`
}
func (p Proof) MarshalJSON() ([]byte, error) {
var pa proofAux
pa.A = hex.EncodeToString(p.A.Marshal())
pa.B = hex.EncodeToString(p.B.Marshal())
pa.C = hex.EncodeToString(p.C.Marshal())
return json.Marshal(pa)
}
func (p *Proof) UnmarshalJSON(data []byte) error {
var pa proofAux
if err := json.Unmarshal(data, &pa); err != nil {
return err
}
aBytes, err := hex.DecodeString(pa.A)
if err != nil {
return err
}
p.A = new(bn256.G1)
if _, err := p.A.Unmarshal(aBytes); err != nil {
return err
}
bBytes, err := hex.DecodeString(pa.B)
if err != nil {
return err
}
p.B = new(bn256.G2)
if _, err := p.B.Unmarshal(bBytes); err != nil {
return err
}
cBytes, err := hex.DecodeString(pa.C)
if err != nil {
return err
}
p.C = new(bn256.G1)
if _, err := p.C.Unmarshal(cBytes); err != nil {
return err
}
return nil
}
// Pk holds the data structure of the ProvingKey
type Pk struct {
A []*bn256.G1