mirror of
https://github.com/arnaucube/go-circom-prover-verifier.git
synced 2026-02-07 11:26:44 +01:00
Compare commits
15 Commits
feature/pa
...
feature/ta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86621a0dbe | ||
|
|
9ed6e14fad | ||
|
|
2d45cb7039 | ||
|
|
68b0c2fb54 | ||
|
|
e3b5f88660 | ||
|
|
36b48215f0 | ||
|
|
a110eb4ca1 | ||
|
|
72913bc801 | ||
|
|
8c81f5041e | ||
|
|
a314ecc02f | ||
|
|
700f2a4503 | ||
|
|
02fc1ea6f7 | ||
|
|
16054cc679 | ||
|
|
296dc53736 | ||
|
|
0977fac08c |
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
@@ -17,11 +17,7 @@ jobs:
|
|||||||
- name: Install Nodejs
|
- name: Install Nodejs
|
||||||
uses: actions/setup-node@v1
|
uses: actions/setup-node@v1
|
||||||
with:
|
with:
|
||||||
go-version: 10.x
|
node-version: '10.x'
|
||||||
- name: Install circom
|
|
||||||
run: npm install -g circom
|
|
||||||
- name: Install snarkjs
|
|
||||||
run: npm install -g snarkjs
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Compile circuits and execute Go tests
|
- name: Compile circuits and execute Go tests
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,5 +4,6 @@ testdata/*/*.cpp
|
|||||||
testdata/*/*.sym
|
testdata/*/*.sym
|
||||||
testdata/*/*.r1cs
|
testdata/*/*.r1cs
|
||||||
testdata/*/*.sol
|
testdata/*/*.sol
|
||||||
|
testdata/*/*.bin
|
||||||
!testdata/*/inputs.json
|
!testdata/*/inputs.json
|
||||||
cli/*.json
|
cli/*.json
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# go-circom-prover-verifier [](https://godoc.org/github.com/iden3/go-circom-prover-verifier) [](https://goreportcard.com/report/github.com/iden3/go-circom-prover-verifier) [](https://github.com/iden3/go-circom-prover-verifier/actions?query=workflow%3ATest)
|
# go-circom-prover-verifier [](https://godoc.org/github.com/iden3/go-circom-prover-verifier) [](https://goreportcard.com/report/github.com/iden3/go-circom-prover-verifier) [](https://github.com/iden3/go-circom-prover-verifier/actions?query=workflow%3ATest)
|
||||||
|
|
||||||
Experimental Go implementation of the [Groth16 protocol](https://eprint.iacr.org/2016/260.pdf) zkSNARK prover & verifier compatible with [circom](https://github.com/iden3/circom).
|
Go implementation of the [Groth16 protocol](https://eprint.iacr.org/2016/260.pdf) zkSNARK prover & verifier compatible with [circom](https://github.com/iden3/circom).
|
||||||
|
|
||||||
|
|
||||||
Using [bn256](https://github.com/ethereum/go-ethereum/tree/master/crypto/bn256/cloudflare) (used by [go-ethereum](https://github.com/ethereum/go-ethereum)) for the Pairing curve operations.
|
Using [bn256](https://github.com/ethereum/go-ethereum/tree/master/crypto/bn256/cloudflare) (used by [go-ethereum](https://github.com/ethereum/go-ethereum)) for the Pairing curve operations.
|
||||||
@@ -82,9 +82,9 @@ Usage of /tmp/go-build620318239/b001/exe/cli:
|
|||||||
|
|
||||||
- Prove
|
- Prove
|
||||||
```
|
```
|
||||||
> go run cli.go -prove -provingkey=../testdata/small/proving_key.json -witness=../testdata/small/witness.json
|
> go run cli.go -prove -provingkey=../testdata/circuit5k/proving_key.json -witness=../testdata/circuit5k/witness.json
|
||||||
```
|
```
|
||||||
- Verify
|
- Verify
|
||||||
```
|
```
|
||||||
> go run cli.go -verify -verificationkey=../testdata/small/verification_key.json
|
> go run cli.go -verify -verificationkey=../testdata/circuit5k/verification_key.json
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/iden3/go-circom-prover-verifier/parsers"
|
"github.com/iden3/go-circom-prover-verifier/parsers"
|
||||||
@@ -34,15 +35,15 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error:", err)
|
fmt.Println("Error:", err)
|
||||||
}
|
}
|
||||||
return
|
os.Exit(0)
|
||||||
} else if *verify {
|
} else if *verify {
|
||||||
err := cmdVerify(*proofPath, *verificationKeyPath, *publicPath)
|
err := cmdVerify(*proofPath, *verificationKeyPath, *publicPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error:", err)
|
fmt.Println("Error:", err)
|
||||||
}
|
}
|
||||||
return
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
fmt.Println("use -help for the list of commands")
|
flag.PrintDefaults()
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdProve(provingKeyPath, witnessPath, proofPath, publicPath string) error {
|
func cmdProve(provingKeyPath, witnessPath, proofPath, publicPath string) error {
|
||||||
|
|||||||
@@ -1,11 +1,14 @@
|
|||||||
package parsers
|
package parsers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -496,3 +499,31 @@ func ProofToJson(p *types.Proof) ([]byte, error) {
|
|||||||
|
|
||||||
return json.Marshal(ps)
|
return json.Marshal(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseWitness parses binary file representation of the Witness into the Witness struct
|
||||||
|
func ParseWitnessBin(f *os.File) (types.Witness, error) {
|
||||||
|
var w types.Witness
|
||||||
|
r := bufio.NewReader(f)
|
||||||
|
for {
|
||||||
|
b := make([]byte, 32)
|
||||||
|
n, err := r.Read(b)
|
||||||
|
if err == io.EOF {
|
||||||
|
return w, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n != 32 {
|
||||||
|
return nil, fmt.Errorf("error on value format, expected 32 bytes, got %v", n)
|
||||||
|
}
|
||||||
|
w = append(w, new(big.Int).SetBytes(swapEndianness(b[0:32])))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// swapEndianness swaps the order of the bytes in the slice.
|
||||||
|
func swapEndianness(b []byte) []byte {
|
||||||
|
o := make([]byte, len(b))
|
||||||
|
for i := range b {
|
||||||
|
o[len(b)-1-i] = b[i]
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
package parsers
|
package parsers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseArrayG1(t *testing.T) {
|
func TestParseArrayG1(t *testing.T) {
|
||||||
@@ -143,3 +146,29 @@ func TestParseArrayG2(t *testing.T) {
|
|||||||
assert.Equal(t, "bn256.G2((1922d70c934543aa655ec3277f7fa10a25ec973a4f001a7c54ce4954b4916f8c, 14865e836947c42cf35b47d30e06535fff9dab319c4296e28afde368960671d5), (2f50fbe77925b0a9d718c9ab38638bafa7c65f43f0d09035e518df97ad294847, 177dfa1a3b8627faf0425d9511bcb4c6ca986ea05e3803b5c643c35b94a7e6fe))", a[3].String())
|
assert.Equal(t, "bn256.G2((1922d70c934543aa655ec3277f7fa10a25ec973a4f001a7c54ce4954b4916f8c, 14865e836947c42cf35b47d30e06535fff9dab319c4296e28afde368960671d5), (2f50fbe77925b0a9d718c9ab38638bafa7c65f43f0d09035e518df97ad294847, 177dfa1a3b8627faf0425d9511bcb4c6ca986ea05e3803b5c643c35b94a7e6fe))", a[3].String())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testCircuitParseWitnessBin(t *testing.T, circuit string) {
|
||||||
|
witnessBinFile, err := os.Open("../testdata/" + circuit + "/witness.bin")
|
||||||
|
require.Nil(t, err)
|
||||||
|
defer witnessBinFile.Close()
|
||||||
|
witness, err := ParseWitnessBin(witnessBinFile)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
witnessJson, err := ioutil.ReadFile("../testdata/" + circuit + "/witness.json")
|
||||||
|
require.Nil(t, err)
|
||||||
|
w, err := ParseWitness(witnessJson)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, len(w), len(witness))
|
||||||
|
assert.Equal(t, w[0], witness[0])
|
||||||
|
assert.Equal(t, w[1], witness[1])
|
||||||
|
assert.Equal(t, w[10], witness[10])
|
||||||
|
assert.Equal(t, w[len(w)-3], witness[len(w)-3])
|
||||||
|
assert.Equal(t, w[len(w)-2], witness[len(w)-2])
|
||||||
|
assert.Equal(t, w[len(w)-1], witness[len(w)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseWitnessBin(t *testing.T) {
|
||||||
|
testCircuitParseWitnessBin(t, "circuit1k")
|
||||||
|
testCircuitParseWitnessBin(t, "circuit5k")
|
||||||
|
}
|
||||||
|
|||||||
BIN
prover/cpu.prof
BIN
prover/cpu.prof
Binary file not shown.
443
prover/gextra.go
Normal file
443
prover/gextra.go
Normal file
@@ -0,0 +1,443 @@
|
|||||||
|
package prover
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
|
cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TableG1 struct{
|
||||||
|
data []*bn256.G1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func (t TableG1) GetData() []*bn256.G1 {
|
||||||
|
return t.data
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Compute table of gsize elements as ::
|
||||||
|
// Table[0] = Inf
|
||||||
|
// Table[1] = a[0]
|
||||||
|
// Table[2] = a[1]
|
||||||
|
// Table[3] = a[0]+a[1]
|
||||||
|
// .....
|
||||||
|
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
||||||
|
func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int){
|
||||||
|
// EC table
|
||||||
|
table := make([]*bn256.G1, 0)
|
||||||
|
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
a_ext := make([]*bn256.G1, 0)
|
||||||
|
a_ext = append(a_ext, a...)
|
||||||
|
|
||||||
|
for i:=len(a); i<gsize; i++ {
|
||||||
|
a_ext = append(a_ext,new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
|
||||||
|
}
|
||||||
|
|
||||||
|
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
||||||
|
table = append(table,elG1)
|
||||||
|
last_pow2 := 1
|
||||||
|
nelems := 0
|
||||||
|
for i :=1; i< 1<<gsize; i++ {
|
||||||
|
elG1 := new(bn256.G1)
|
||||||
|
// if power of 2
|
||||||
|
if i & (i-1) == 0{
|
||||||
|
last_pow2 = i
|
||||||
|
elG1.Set(a_ext[nelems])
|
||||||
|
nelems++
|
||||||
|
} else {
|
||||||
|
elG1.Add(table[last_pow2], table[i-last_pow2])
|
||||||
|
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
||||||
|
//table[i].MakeAffine()
|
||||||
|
}
|
||||||
|
table = append(table, elG1)
|
||||||
|
}
|
||||||
|
t.data = table
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scalar by precomputed table of G1 elements
|
||||||
|
func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
k_ext := make([]*big.Int, 0)
|
||||||
|
k_ext = append(k_ext, k...)
|
||||||
|
|
||||||
|
for i:=len(k); i < gsize; i++ {
|
||||||
|
k_ext = append(k_ext,new(big.Int).SetUint64(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
||||||
|
|
||||||
|
msb := getMsb(k_ext)
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
|
Q = new(bn256.G1).Add(Q,Q)
|
||||||
|
b := getBit(k_ext,i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q.Add(Q, t.data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if Q_prev != nil {
|
||||||
|
return Q.Add(Q,Q_prev)
|
||||||
|
} else {
|
||||||
|
return Q
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
||||||
|
func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
min_nelems := len(t) * gsize
|
||||||
|
k_ext := make([]*big.Int, 0)
|
||||||
|
k_ext = append(k_ext, k...)
|
||||||
|
for i := len(k); i < min_nelems; i++ {
|
||||||
|
k_ext = append(k_ext,new(big.Int).SetUint64(0))
|
||||||
|
}
|
||||||
|
// Init Adders
|
||||||
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
|
Q := make([]*bn256.G1,nbitsQ)
|
||||||
|
|
||||||
|
for i:=0; i< nbitsQ; i++ {
|
||||||
|
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform bitwise addition
|
||||||
|
for j:=0; j < len(t); j++ {
|
||||||
|
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q[i].Add(Q[i], t[j].data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consolidate Addition
|
||||||
|
R := new(bn256.G1).Set(Q[nbitsQ-1])
|
||||||
|
for i:=nbitsQ-1; i>0; i-- {
|
||||||
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
|
R = new(bn256.G1).Add(R,R)
|
||||||
|
R.Add(R,Q[i-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
if Q_prev != nil {
|
||||||
|
return R.Add(R,Q_prev)
|
||||||
|
} else {
|
||||||
|
return R
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute tables within function. This solution should still be faster than std multiplication
|
||||||
|
// for gsize = 7
|
||||||
|
func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
|
table := TableG1{}
|
||||||
|
Q:= new(bn256.G1).ScalarBaseMult(new(big.Int))
|
||||||
|
|
||||||
|
for i:=0; i<ntables-1; i++ {
|
||||||
|
table.NewTableG1( a[i*gsize:(i+1)*gsize], gsize)
|
||||||
|
Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
|
||||||
|
}
|
||||||
|
table.NewTableG1( a[(ntables-1)*gsize:], gsize)
|
||||||
|
Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize)
|
||||||
|
|
||||||
|
if Q_prev != nil {
|
||||||
|
return Q.Add(Q,Q_prev)
|
||||||
|
} else {
|
||||||
|
return Q
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
||||||
|
func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
|
||||||
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
|
table := TableG1{}
|
||||||
|
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
min_nelems := ntables * gsize
|
||||||
|
k_ext := make([]*big.Int, 0)
|
||||||
|
k_ext = append(k_ext, k...)
|
||||||
|
for i := len(k); i < min_nelems; i++ {
|
||||||
|
k_ext = append(k_ext,new(big.Int).SetUint64(0))
|
||||||
|
}
|
||||||
|
// Init Adders
|
||||||
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
|
Q := make([]*bn256.G1,nbitsQ)
|
||||||
|
|
||||||
|
for i:=0; i< nbitsQ; i++ {
|
||||||
|
Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform bitwise addition
|
||||||
|
for j:=0; j < ntables-1; j++ {
|
||||||
|
table.NewTableG1( a[j*gsize:(j+1)*gsize], gsize)
|
||||||
|
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q[i].Add(Q[i], table.data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
table.NewTableG1( a[(ntables-1)*gsize:], gsize)
|
||||||
|
msb := getMsb(k_ext[(ntables-1)*gsize:])
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
b := getBit(k_ext[(ntables-1)*gsize:],i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q[i].Add(Q[i], table.data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consolidate Addition
|
||||||
|
R := new(bn256.G1).Set(Q[nbitsQ-1])
|
||||||
|
for i:=nbitsQ-1; i>0; i-- {
|
||||||
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
|
R = new(bn256.G1).Add(R,R)
|
||||||
|
R.Add(R,Q[i-1])
|
||||||
|
}
|
||||||
|
if Q_prev != nil {
|
||||||
|
return R.Add(R,Q_prev)
|
||||||
|
} else {
|
||||||
|
return R
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/////
|
||||||
|
|
||||||
|
// TODO - How can avoid replicating code in G2?
|
||||||
|
//G2
|
||||||
|
|
||||||
|
type TableG2 struct{
|
||||||
|
data []*bn256.G2
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func (t TableG2) GetData() []*bn256.G2 {
|
||||||
|
return t.data
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Compute table of gsize elements as ::
|
||||||
|
// Table[0] = Inf
|
||||||
|
// Table[1] = a[0]
|
||||||
|
// Table[2] = a[1]
|
||||||
|
// Table[3] = a[0]+a[1]
|
||||||
|
// .....
|
||||||
|
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
||||||
|
func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int){
|
||||||
|
// EC table
|
||||||
|
table := make([]*bn256.G2, 0)
|
||||||
|
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
a_ext := make([]*bn256.G2, 0)
|
||||||
|
a_ext = append(a_ext, a...)
|
||||||
|
|
||||||
|
for i:=len(a); i<gsize; i++ {
|
||||||
|
a_ext = append(a_ext,new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
|
||||||
|
}
|
||||||
|
|
||||||
|
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
||||||
|
table = append(table,elG2)
|
||||||
|
last_pow2 := 1
|
||||||
|
nelems := 0
|
||||||
|
for i :=1; i< 1<<gsize; i++ {
|
||||||
|
elG2 := new(bn256.G2)
|
||||||
|
// if power of 2
|
||||||
|
if i & (i-1) == 0{
|
||||||
|
last_pow2 = i
|
||||||
|
elG2.Set(a_ext[nelems])
|
||||||
|
nelems++
|
||||||
|
} else {
|
||||||
|
elG2.Add(table[last_pow2], table[i-last_pow2])
|
||||||
|
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
||||||
|
//table[i].MakeAffine()
|
||||||
|
}
|
||||||
|
table = append(table, elG2)
|
||||||
|
}
|
||||||
|
t.data = table
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scalar by precomputed table of G2 elements
|
||||||
|
func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
k_ext := make([]*big.Int, 0)
|
||||||
|
k_ext = append(k_ext, k...)
|
||||||
|
|
||||||
|
for i:=len(k); i < gsize; i++ {
|
||||||
|
k_ext = append(k_ext,new(big.Int).SetUint64(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
||||||
|
|
||||||
|
msb := getMsb(k_ext)
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
|
Q = new(bn256.G2).Add(Q,Q)
|
||||||
|
b := getBit(k_ext,i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q.Add(Q, t.data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if Q_prev != nil {
|
||||||
|
return Q.Add(Q, Q_prev)
|
||||||
|
} else {
|
||||||
|
return Q
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
||||||
|
func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
min_nelems := len(t) * gsize
|
||||||
|
k_ext := make([]*big.Int, 0)
|
||||||
|
k_ext = append(k_ext, k...)
|
||||||
|
for i := len(k); i < min_nelems; i++ {
|
||||||
|
k_ext = append(k_ext,new(big.Int).SetUint64(0))
|
||||||
|
}
|
||||||
|
// Init Adders
|
||||||
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
|
Q := make([]*bn256.G2,nbitsQ)
|
||||||
|
|
||||||
|
for i:=0; i< nbitsQ; i++ {
|
||||||
|
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform bitwise addition
|
||||||
|
for j:=0; j < len(t); j++ {
|
||||||
|
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q[i].Add(Q[i], t[j].data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consolidate Addition
|
||||||
|
R := new(bn256.G2).Set(Q[nbitsQ-1])
|
||||||
|
for i:=nbitsQ-1; i>0; i-- {
|
||||||
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
|
R = new(bn256.G2).Add(R,R)
|
||||||
|
R.Add(R,Q[i-1])
|
||||||
|
}
|
||||||
|
if Q_prev != nil {
|
||||||
|
return R.Add(R,Q_prev)
|
||||||
|
} else {
|
||||||
|
return R
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute tables within function. This solution should still be faster than std multiplication
|
||||||
|
// for gsize = 7
|
||||||
|
func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
|
table := TableG2{}
|
||||||
|
Q:= new(bn256.G2).ScalarBaseMult(new(big.Int))
|
||||||
|
|
||||||
|
for i:=0; i<ntables-1; i++ {
|
||||||
|
table.NewTableG2( a[i*gsize:(i+1)*gsize], gsize)
|
||||||
|
Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
|
||||||
|
}
|
||||||
|
table.NewTableG2( a[(ntables-1)*gsize:], gsize)
|
||||||
|
Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize)
|
||||||
|
|
||||||
|
if Q_prev != nil {
|
||||||
|
return Q.Add(Q,Q_prev)
|
||||||
|
} else {
|
||||||
|
return Q
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
||||||
|
func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
|
||||||
|
ntables := int((len(a) + gsize - 1) / gsize)
|
||||||
|
table := TableG2{}
|
||||||
|
|
||||||
|
// We need at least gsize elements. If not enough, fill with 0
|
||||||
|
min_nelems := ntables * gsize
|
||||||
|
k_ext := make([]*big.Int, 0)
|
||||||
|
k_ext = append(k_ext, k...)
|
||||||
|
for i := len(k); i < min_nelems; i++ {
|
||||||
|
k_ext = append(k_ext,new(big.Int).SetUint64(0))
|
||||||
|
}
|
||||||
|
// Init Adders
|
||||||
|
nbitsQ := cryptoConstants.Q.BitLen()
|
||||||
|
Q := make([]*bn256.G2,nbitsQ)
|
||||||
|
|
||||||
|
for i:=0; i< nbitsQ; i++ {
|
||||||
|
Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform bitwise addition
|
||||||
|
for j:=0; j < ntables-1; j++ {
|
||||||
|
table.NewTableG2( a[j*gsize:(j+1)*gsize], gsize)
|
||||||
|
msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q[i].Add(Q[i], table.data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
table.NewTableG2( a[(ntables-1)*gsize:], gsize)
|
||||||
|
msb := getMsb(k_ext[(ntables-1)*gsize:])
|
||||||
|
|
||||||
|
for i := msb-1; i >= 0; i-- {
|
||||||
|
b := getBit(k_ext[(ntables-1)*gsize:],i)
|
||||||
|
if b != 0 {
|
||||||
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
||||||
|
Q[i].Add(Q[i], table.data[b])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consolidate Addition
|
||||||
|
R := new(bn256.G2).Set(Q[nbitsQ-1])
|
||||||
|
for i:=nbitsQ-1; i>0; i-- {
|
||||||
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
||||||
|
R = new(bn256.G2).Add(R,R)
|
||||||
|
R.Add(R,Q[i-1])
|
||||||
|
}
|
||||||
|
if Q_prev != nil {
|
||||||
|
return R.Add(R,Q_prev)
|
||||||
|
} else {
|
||||||
|
return R
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return most significant bit position in a group of Big Integers
|
||||||
|
func getMsb(k []*big.Int) int{
|
||||||
|
msb := 0
|
||||||
|
|
||||||
|
for _, el := range(k){
|
||||||
|
tmp_msb := el.BitLen()
|
||||||
|
if tmp_msb > msb {
|
||||||
|
msb = tmp_msb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return msb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return ith bit in group of Big Integers
|
||||||
|
func getBit(k []*big.Int, i int) uint {
|
||||||
|
table_idx := uint(0)
|
||||||
|
|
||||||
|
for idx, el := range(k){
|
||||||
|
b := el.Bit(i)
|
||||||
|
table_idx += (b << idx)
|
||||||
|
}
|
||||||
|
return table_idx
|
||||||
|
}
|
||||||
166
prover/gextra_test.go
Normal file
166
prover/gextra_test.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package prover
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
|
"time"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
N1 = 5000
|
||||||
|
N2 = 5000
|
||||||
|
)
|
||||||
|
|
||||||
|
func randomBigIntArray(n int) []*big.Int{
|
||||||
|
var p []*big.Int
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
pi := randBI()
|
||||||
|
p = append(p, pi)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomG1Array(n int) []*bn256.G1 {
|
||||||
|
arrayG1 := make([]*bn256.G1, n)
|
||||||
|
|
||||||
|
for i:=0; i<n; i++ {
|
||||||
|
_, arrayG1[i], _ = bn256.RandomG1(rand.Reader)
|
||||||
|
}
|
||||||
|
return arrayG1
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomG2Array(n int) []*bn256.G2 {
|
||||||
|
arrayG2 := make([]*bn256.G2, n)
|
||||||
|
|
||||||
|
for i:=0; i<n; i++ {
|
||||||
|
_, arrayG2[i], _ = bn256.RandomG2(rand.Reader)
|
||||||
|
}
|
||||||
|
return arrayG2
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
func TestTableG1(t *testing.T){
|
||||||
|
n := N1
|
||||||
|
|
||||||
|
// init scalar
|
||||||
|
var arrayW = randomBigIntArray(n)
|
||||||
|
// init G1 array
|
||||||
|
var arrayG1 = randomG1Array(n)
|
||||||
|
|
||||||
|
beforeT := time.Now()
|
||||||
|
Q1 := new(bn256.G1).ScalarBaseMult(new(big.Int))
|
||||||
|
for i:=0; i < n; i++ {
|
||||||
|
Q1.Add(Q1, new(bn256.G1).ScalarMult(arrayG1[i], arrayW[i]))
|
||||||
|
}
|
||||||
|
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
|
||||||
|
|
||||||
|
for gsize:=2; gsize < 10; gsize++ {
|
||||||
|
ntables := int((n + gsize - 1) / gsize)
|
||||||
|
table := make([]TableG1, ntables)
|
||||||
|
|
||||||
|
for i:=0; i<ntables-1; i++ {
|
||||||
|
table[i].NewTableG1( arrayG1[i*gsize:(i+1)*gsize], gsize)
|
||||||
|
}
|
||||||
|
table[ntables-1].NewTableG1( arrayG1[(ntables-1)*gsize:], gsize)
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q2:= new(bn256.G1).ScalarBaseMult(new(big.Int))
|
||||||
|
for i:=0; i<ntables-1; i++ {
|
||||||
|
Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
|
||||||
|
}
|
||||||
|
Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q2.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMult")
|
||||||
|
}
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q3.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMult with table comp")
|
||||||
|
}
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q4.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMultNoDouble")
|
||||||
|
}
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q5.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMultNoDoublee with table comp")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTableG2(t *testing.T){
|
||||||
|
n := N2
|
||||||
|
|
||||||
|
// init scalar
|
||||||
|
var arrayW = randomBigIntArray(n)
|
||||||
|
// init G2 array
|
||||||
|
var arrayG2 = randomG2Array(n)
|
||||||
|
|
||||||
|
beforeT := time.Now()
|
||||||
|
Q1 := new(bn256.G2).ScalarBaseMult(new(big.Int))
|
||||||
|
for i:=0; i < n; i++ {
|
||||||
|
Q1.Add(Q1, new(bn256.G2).ScalarMult(arrayG2[i], arrayW[i]))
|
||||||
|
}
|
||||||
|
fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT))
|
||||||
|
|
||||||
|
for gsize:=2; gsize < 10; gsize++ {
|
||||||
|
ntables := int((n + gsize - 1) / gsize)
|
||||||
|
table := make([]TableG2, ntables)
|
||||||
|
|
||||||
|
for i:=0; i<ntables-1; i++ {
|
||||||
|
table[i].NewTableG2( arrayG2[i*gsize:(i+1)*gsize], gsize)
|
||||||
|
}
|
||||||
|
table[ntables-1].NewTableG2( arrayG2[(ntables-1)*gsize:], gsize)
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q2:= new(bn256.G2).ScalarBaseMult(new(big.Int))
|
||||||
|
for i:=0; i<ntables-1; i++ {
|
||||||
|
Q2 =table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
|
||||||
|
}
|
||||||
|
Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
beforeT = time.Now()
|
||||||
|
Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
|
||||||
|
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize,time.Since(beforeT))
|
||||||
|
|
||||||
|
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q2.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMult")
|
||||||
|
}
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q3.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMult with table comp")
|
||||||
|
}
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q4.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMultNoDouble")
|
||||||
|
}
|
||||||
|
if bytes.Compare(Q1.Marshal(),Q5.Marshal()) != 0 {
|
||||||
|
t.Error("Error in TMultNoDoublee with table comp")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
106
prover/prover.go
106
prover/prover.go
@@ -10,6 +10,7 @@ import (
|
|||||||
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
"github.com/iden3/go-circom-prover-verifier/types"
|
"github.com/iden3/go-circom-prover-verifier/types"
|
||||||
"github.com/iden3/go-iden3-crypto/utils"
|
"github.com/iden3/go-iden3-crypto/utils"
|
||||||
|
//"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proof is the data structure of the Groth16 zkSNARK proof
|
// Proof is the data structure of the Groth16 zkSNARK proof
|
||||||
@@ -42,6 +43,11 @@ type Pk struct {
|
|||||||
// Witness contains the witness
|
// Witness contains the witness
|
||||||
type Witness []*big.Int
|
type Witness []*big.Int
|
||||||
|
|
||||||
|
// Group Size
|
||||||
|
const (
|
||||||
|
GSIZE = 6
|
||||||
|
)
|
||||||
|
|
||||||
func randBigInt() (*big.Int, error) {
|
func randBigInt() (*big.Int, error) {
|
||||||
maxbits := types.R.BitLen()
|
maxbits := types.R.BitLen()
|
||||||
b := make([]byte, (maxbits/8)-1)
|
b := make([]byte, (maxbits/8)-1)
|
||||||
@@ -68,27 +74,41 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BEGIN PAR
|
||||||
numcpu := runtime.NumCPU()
|
numcpu := runtime.NumCPU()
|
||||||
|
|
||||||
proofA := arrayOfZeroesG1(numcpu)
|
proofA := arrayOfZeroesG1(numcpu)
|
||||||
proofB := arrayOfZeroesG2(numcpu)
|
proofB := arrayOfZeroesG2(numcpu)
|
||||||
proofC := arrayOfZeroesG1(numcpu)
|
proofC := arrayOfZeroesG1(numcpu)
|
||||||
proofBG1 := arrayOfZeroesG1(numcpu)
|
proofBG1 := arrayOfZeroesG1(numcpu)
|
||||||
|
gsize := GSIZE
|
||||||
var wg1 sync.WaitGroup
|
var wg1 sync.WaitGroup
|
||||||
wg1.Add(numcpu)
|
wg1.Add(numcpu)
|
||||||
for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
|
for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
|
||||||
// split 1
|
// split 1
|
||||||
go func(cpu int, ranges [2]int) {
|
go func(cpu int, ranges [2]int) {
|
||||||
tmpG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
|
||||||
tmpG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
|
w[ranges[0]:ranges[1]],
|
||||||
for i := ranges[0]; i < ranges[1]; i++ {
|
proofA[cpu],
|
||||||
proofA[cpu].Add(proofA[cpu], tmpG1.ScalarMult(pk.A[i], w[i]))
|
gsize)
|
||||||
proofB[cpu].Add(proofB[cpu], tmpG2.ScalarMult(pk.B2[i], w[i]))
|
proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
|
||||||
proofBG1[cpu].Add(proofBG1[cpu], tmpG1.ScalarMult(pk.B1[i], w[i]))
|
w[ranges[0]:ranges[1]],
|
||||||
if i >= pk.NPublic+1 {
|
proofB[cpu],
|
||||||
proofC[cpu].Add(proofC[cpu], tmpG1.ScalarMult(pk.C[i], w[i]))
|
gsize)
|
||||||
}
|
proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
|
||||||
}
|
w[ranges[0]:ranges[1]],
|
||||||
|
proofBG1[cpu],
|
||||||
|
gsize)
|
||||||
|
min_lim := pk.NPublic+1
|
||||||
|
if ranges[0] > pk.NPublic+1 {
|
||||||
|
min_lim = ranges[0]
|
||||||
|
}
|
||||||
|
if ranges[1] > pk.NPublic + 1 {
|
||||||
|
proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]],
|
||||||
|
w[min_lim:ranges[1]],
|
||||||
|
proofC[cpu],
|
||||||
|
gsize)
|
||||||
|
}
|
||||||
wg1.Done()
|
wg1.Done()
|
||||||
}(_cpu, _ranges)
|
}(_cpu, _ranges)
|
||||||
}
|
}
|
||||||
@@ -103,11 +123,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
|
|||||||
proof.A = proofA[0]
|
proof.A = proofA[0]
|
||||||
proof.B = proofB[0]
|
proof.B = proofB[0]
|
||||||
proof.C = proofC[0]
|
proof.C = proofC[0]
|
||||||
|
// END PAR
|
||||||
|
|
||||||
h := calculateH(pk, w)
|
h := calculateH(pk, w)
|
||||||
|
|
||||||
println("len(h)", len(h))
|
|
||||||
|
|
||||||
proof.A.Add(proof.A, pk.VkAlpha1)
|
proof.A.Add(proof.A, pk.VkAlpha1)
|
||||||
proof.A.Add(proof.A, new(bn256.G1).ScalarMult(pk.VkDelta1, r))
|
proof.A.Add(proof.A, new(bn256.G1).ScalarMult(pk.VkDelta1, r))
|
||||||
|
|
||||||
@@ -123,10 +142,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
|
|||||||
for _cpu, _ranges := range ranges(len(h), numcpu) {
|
for _cpu, _ranges := range ranges(len(h), numcpu) {
|
||||||
// split 2
|
// split 2
|
||||||
go func(cpu int, ranges [2]int) {
|
go func(cpu int, ranges [2]int) {
|
||||||
tmpG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
|
proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
|
||||||
for i := ranges[0]; i < ranges[1]; i++ {
|
h[ranges[0]:ranges[1]],
|
||||||
proofC[cpu].Add(proofC[cpu], tmpG1.ScalarMult(pk.HExps[i], h[i]))
|
proofC[cpu],
|
||||||
}
|
gsize)
|
||||||
wg2.Done()
|
wg2.Done()
|
||||||
}(_cpu, _ranges)
|
}(_cpu, _ranges)
|
||||||
}
|
}
|
||||||
@@ -139,7 +158,7 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
|
|||||||
|
|
||||||
proof.C.Add(proof.C, new(bn256.G1).ScalarMult(proof.A, s))
|
proof.C.Add(proof.C, new(bn256.G1).ScalarMult(proof.A, s))
|
||||||
proof.C.Add(proof.C, new(bn256.G1).ScalarMult(proofBG1[0], r))
|
proof.C.Add(proof.C, new(bn256.G1).ScalarMult(proofBG1[0], r))
|
||||||
rsneg := new(big.Int).Mod(new(big.Int).Neg(new(big.Int).Mul(r, s)), types.R) // fAdd & fMul
|
rsneg := new(big.Int).Mod(new(big.Int).Neg(new(big.Int).Mul(r, s)), types.R)
|
||||||
proof.C.Add(proof.C, new(bn256.G1).ScalarMult(pk.VkDelta1, rsneg))
|
proof.C.Add(proof.C, new(bn256.G1).ScalarMult(pk.VkDelta1, rsneg))
|
||||||
|
|
||||||
pubSignals := w[1 : pk.NPublic+1]
|
pubSignals := w[1 : pk.NPublic+1]
|
||||||
@@ -152,14 +171,27 @@ func calculateH(pk *types.Pk, w types.Witness) []*big.Int {
|
|||||||
polAT := arrayOfZeroes(m)
|
polAT := arrayOfZeroes(m)
|
||||||
polBT := arrayOfZeroes(m)
|
polBT := arrayOfZeroes(m)
|
||||||
|
|
||||||
for i := 0; i < pk.NVars; i++ {
|
numcpu := runtime.NumCPU()
|
||||||
for j := range pk.PolsA[i] {
|
|
||||||
polAT[j] = fAdd(polAT[j], fMul(w[i], pk.PolsA[i][j]))
|
var wg1 sync.WaitGroup
|
||||||
|
wg1.Add(2)
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < pk.NVars; i++ {
|
||||||
|
for j := range pk.PolsA[i] {
|
||||||
|
polAT[j] = fAdd(polAT[j], fMul(w[i], pk.PolsA[i][j]))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for j := range pk.PolsB[i] {
|
wg1.Done()
|
||||||
polBT[j] = fAdd(polBT[j], fMul(w[i], pk.PolsB[i][j]))
|
}()
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < pk.NVars; i++ {
|
||||||
|
for j := range pk.PolsB[i] {
|
||||||
|
polBT[j] = fAdd(polBT[j], fMul(w[i], pk.PolsB[i][j]))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
wg1.Done()
|
||||||
|
}()
|
||||||
|
wg1.Wait()
|
||||||
polATe := utils.BigIntArrayToElementArray(polAT)
|
polATe := utils.BigIntArrayToElementArray(polAT)
|
||||||
polBTe := utils.BigIntArrayToElementArray(polBT)
|
polBTe := utils.BigIntArrayToElementArray(polBT)
|
||||||
|
|
||||||
@@ -170,19 +202,35 @@ func calculateH(pk *types.Pk, w types.Witness) []*big.Int {
|
|||||||
roots := newRootsT()
|
roots := newRootsT()
|
||||||
roots.setRoots(r)
|
roots.setRoots(r)
|
||||||
|
|
||||||
for i := 0; i < len(polASe); i++ {
|
var wg2 sync.WaitGroup
|
||||||
polASe[i].Mul(polASe[i], roots.roots[r][i])
|
wg2.Add(numcpu)
|
||||||
polBSe[i].Mul(polBSe[i], roots.roots[r][i])
|
for _cpu, _ranges := range ranges(len(polASe), numcpu) {
|
||||||
|
go func(cpu int, ranges [2]int) {
|
||||||
|
for i := ranges[0]; i < ranges[1]; i++ {
|
||||||
|
polASe[i].Mul(polASe[i], roots.roots[r][i])
|
||||||
|
polBSe[i].Mul(polBSe[i], roots.roots[r][i])
|
||||||
|
}
|
||||||
|
wg2.Done()
|
||||||
|
}(_cpu, _ranges)
|
||||||
}
|
}
|
||||||
|
wg2.Wait()
|
||||||
|
|
||||||
polATodd := fft(polASe)
|
polATodd := fft(polASe)
|
||||||
polBTodd := fft(polBSe)
|
polBTodd := fft(polBSe)
|
||||||
|
|
||||||
polABT := arrayOfZeroesE(len(polASe) * 2)
|
polABT := arrayOfZeroesE(len(polASe) * 2)
|
||||||
for i := 0; i < len(polASe); i++ {
|
var wg3 sync.WaitGroup
|
||||||
polABT[2*i].Mul(polATe[i], polBTe[i])
|
wg3.Add(numcpu)
|
||||||
polABT[2*i+1].Mul(polATodd[i], polBTodd[i])
|
for _cpu, _ranges := range ranges(len(polASe), numcpu) {
|
||||||
|
go func(cpu int, ranges [2]int) {
|
||||||
|
for i := ranges[0]; i < ranges[1]; i++ {
|
||||||
|
polABT[2*i].Mul(polATe[i], polBTe[i])
|
||||||
|
polABT[2*i+1].Mul(polATodd[i], polBTodd[i])
|
||||||
|
}
|
||||||
|
wg3.Done()
|
||||||
|
}(_cpu, _ranges)
|
||||||
}
|
}
|
||||||
|
wg3.Wait()
|
||||||
|
|
||||||
hSeFull := ifft(polABT)
|
hSeFull := ifft(polABT)
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -16,8 +16,8 @@ import (
|
|||||||
func TestCircuitsGenerateProof(t *testing.T) {
|
func TestCircuitsGenerateProof(t *testing.T) {
|
||||||
testCircuitGenerateProof(t, "circuit1k") // 1000 constraints
|
testCircuitGenerateProof(t, "circuit1k") // 1000 constraints
|
||||||
testCircuitGenerateProof(t, "circuit5k") // 5000 constraints
|
testCircuitGenerateProof(t, "circuit5k") // 5000 constraints
|
||||||
// testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
|
//testCircuitGenerateProof(t, "circuit10k") // 10000 constraints
|
||||||
// testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
|
//testCircuitGenerateProof(t, "circuit20k") // 20000 constraints
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCircuitGenerateProof(t *testing.T, circuit string) {
|
func testCircuitGenerateProof(t *testing.T, circuit string) {
|
||||||
@@ -61,12 +61,12 @@ func testCircuitGenerateProof(t *testing.T, circuit string) {
|
|||||||
|
|
||||||
func BenchmarkGenerateProof(b *testing.B) {
|
func BenchmarkGenerateProof(b *testing.B) {
|
||||||
// benchmark with a circuit of 10000 constraints
|
// benchmark with a circuit of 10000 constraints
|
||||||
provingKeyJson, err := ioutil.ReadFile("../testdata/circuit1/proving_key.json")
|
provingKeyJson, err := ioutil.ReadFile("../testdata/circuit5k/proving_key.json")
|
||||||
require.Nil(b, err)
|
require.Nil(b, err)
|
||||||
pk, err := parsers.ParsePk(provingKeyJson)
|
pk, err := parsers.ParsePk(provingKeyJson)
|
||||||
require.Nil(b, err)
|
require.Nil(b, err)
|
||||||
|
|
||||||
witnessJson, err := ioutil.ReadFile("../testdata/circuit1/witness.json")
|
witnessJson, err := ioutil.ReadFile("../testdata/circuit5k/witness.json")
|
||||||
require.Nil(b, err)
|
require.Nil(b, err)
|
||||||
w, err := parsers.ParseWitness(witnessJson)
|
w, err := parsers.ParseWitness(witnessJson)
|
||||||
require.Nil(b, err)
|
require.Nil(b, err)
|
||||||
|
|||||||
49
prover/tables.md
Normal file
49
prover/tables.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Tables Pre-calculation
|
||||||
|
The most time consuming part of a ZKSnark proof calculation is the scalar multiplication of elliptic curve points. Direct mechanism accumulates each multiplication. However, prover only needs the total accumulation.
|
||||||
|
|
||||||
|
There are two potential improvements to the naive approach:
|
||||||
|
|
||||||
|
1. Apply Strauss-Shamir method (https://stackoverflow.com/questions/50993471/ec-scalar-multiplication-with-strauss-shamir-method).
|
||||||
|
2. Leave the doubling operation for the last step
|
||||||
|
|
||||||
|
Both options can be combined.
|
||||||
|
|
||||||
|
In the following table, we show the results of using the naive method, Srauss-Shamir and Strauss-Shamir + No doubling. These last two options are repeated for different table grouping order.
|
||||||
|
|
||||||
|
There are 50000 G1 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
|
||||||
|
|
||||||
|
There may be some concern on the additional size of the tables since they need to be loaded into a smartphone during the proof, and the time required to load these tables may exceed the benefits. If this is a problem, another althernative is to compute the tables during the proof itself. Depending on the Group Size, timing may be better than the naive approach.
|
||||||
|
|
||||||
|
|
||||||
|
| Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|
||||||
|
|---|---|---|---|---|---|---|---|---|
|
||||||
|
| Naive | 6.63s | - | - | - | - | - | - | - |
|
||||||
|
| Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s |
|
||||||
|
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
|
||||||
|
| No Doubling | 3.74s | 3.00s | 2.38s | 1.96s | 1.79s | 1.54s | 1.50s | 1.44s|
|
||||||
|
| No Doubling + Table Computation | 6.83s | 5.1s | 4.16s | 3.52s| 3.22s | 3.21s | 3.57s | 4.56s |
|
||||||
|
|
||||||
|
There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
|
||||||
|
|
||||||
|
| Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|
||||||
|
|---|---|---|---|---|---|---|---|---|
|
||||||
|
| Naive | 3.55s | | | | | | | |
|
||||||
|
| Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms |
|
||||||
|
| Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s |
|
||||||
|
| No Doubling | 1.49s | 1.16s | 952ms | 719ms | 661ms | 548ms | 506ms| 444ms |
|
||||||
|
| No Doubling + Table Computation | 1.55s | 1.21s | 984ms | 841ms | 826ms | 847ms | 1.03s | 1.39s |
|
||||||
|
|
||||||
|
| GS | Extra Disk Space per Constraint (G1)|
|
||||||
|
|----|--------|
|
||||||
|
| 2 | 64 B |
|
||||||
|
| 3 | 106 B |
|
||||||
|
| 4 | 192 B |
|
||||||
|
| 5 | 346 B |
|
||||||
|
| 6 | 618 B |
|
||||||
|
| 7 | 1106 B |
|
||||||
|
| 8 | 1984 B |
|
||||||
|
| 9 | 3577 B |
|
||||||
|
| N | 2^(N+6)/N - 64 B |
|
||||||
|
|
||||||
|
Extra disk space per constraint in G2 is twice the requirements for G1
|
||||||
|
|
||||||
25
tables.md
Normal file
25
tables.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Tables Pre-calculation
|
||||||
|
The most time consuming part of a ZKSnark proof calculation is the scalar multiplication of elliptic curve points. Direct mechanism accumulates each multiplication. However, prover only needs the total accumulation.
|
||||||
|
|
||||||
|
There are two potential improvements to the naive approach:
|
||||||
|
|
||||||
|
1. Apply Strauss-Shamir method (https://stackoverflow.com/questions/50993471/ec-scalar-multiplication-with-strauss-shamir-method).
|
||||||
|
2. Leave the doubling operation for the last step
|
||||||
|
|
||||||
|
Both options can be combined.
|
||||||
|
|
||||||
|
In the following table, we show the results of using the naive method, Srauss-Shamir and Strauss-Shamir + No doubling. These last two options are repeated for different table grouping order.
|
||||||
|
|
||||||
|
There are 5000 G1 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
|
||||||
|
|
||||||
|
There may be some concern on the additional size of the tables since they need to be loaded into a smartphone during the proof, and the time required to load these tables may exceed the benefits. If this is a problem, another althernative is to compute the tables during the proof itself. Depending on the Group Size, timing may be better than the naive approach.
|
||||||
|
|
||||||
|
|
||||||
|
| Algorithm | GS / Time |
|
||||||
|
|---|---|---|
|
||||||
|
| Naive | 6.63s | | | | | | | |
|
||||||
|
| Strauss | 13.16s | 9.033s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | 1.44 s |
|
||||||
|
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
|
||||||
|
| No Doubling | 3.74s | 3.00s | 2.38s | 1.96s | 1.79s | 1.54s | 1.50s | 1.44s|
|
||||||
|
| No Doubling + Table Computation | 6.83s | 5.1s | 4.16s | 3.52s| 3.22s | 3.21s | 3.57s | 4.56s |
|
||||||
|
|
||||||
21
testdata/compile-circuits.sh
vendored
21
testdata/compile-circuits.sh
vendored
@@ -1,5 +1,7 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
|
npm install
|
||||||
|
|
||||||
compile_and_ts_and_witness() {
|
compile_and_ts_and_witness() {
|
||||||
echo $(date +"%T") "circom circuit.circom --r1cs --wasm --sym"
|
echo $(date +"%T") "circom circuit.circom --r1cs --wasm --sym"
|
||||||
itime="$(date -u +%s)"
|
itime="$(date -u +%s)"
|
||||||
@@ -40,3 +42,22 @@ compile_and_ts_and_witness
|
|||||||
# echo "compile & trustesetup for circuit20k"
|
# echo "compile & trustesetup for circuit20k"
|
||||||
# cd ../circuit20k
|
# cd ../circuit20k
|
||||||
# compile_and_ts_and_witness
|
# compile_and_ts_and_witness
|
||||||
|
|
||||||
|
cd ../
|
||||||
|
|
||||||
|
echo "convert witness & pk of circuit1k to bin"
|
||||||
|
node node_modules/wasmsnark/tools/buildwitness.js -i circuit1k/witness.json -o circuit1k/witness.bin
|
||||||
|
node node_modules/wasmsnark/tools/buildpkey.js -i circuit1k/proving_key.json -o circuit1k/proving_key.bin
|
||||||
|
|
||||||
|
echo "convert witness & pk of circuit5k to bin"
|
||||||
|
node node_modules/wasmsnark/tools/buildwitness.js -i circuit5k/witness.json -o circuit5k/witness.bin
|
||||||
|
node node_modules/wasmsnark/tools/buildpkey.js -i circuit5k/proving_key.json -o circuit5k/proving_key.bin
|
||||||
|
|
||||||
|
# echo "convert witness & pk of circuit10k to bin"
|
||||||
|
# node node_modules/wasmsnark/tools/buildwitness.js -i circuit10k/witness.json -o circuit10k/witness.bin
|
||||||
|
# node node_modules/wasmsnark/tools/buildpkey.js -i circuit10k/proving_key.json -o circuit10k/proving_key.bin
|
||||||
|
#
|
||||||
|
# echo "convert witness & pk of circuit20k to bin"
|
||||||
|
# node node_modules/wasmsnark/tools/buildwitness.js -i circuit20k/witness.json -o circuit20k/witness.bin
|
||||||
|
# node node_modules/wasmsnark/tools/buildpkey.js -i circuit20k/proving_key.json -o circuit20k/proving_key.bin
|
||||||
|
|
||||||
|
|||||||
18
testdata/package-lock.json
generated
vendored
18
testdata/package-lock.json
generated
vendored
@@ -1,6 +1,8 @@
|
|||||||
{
|
{
|
||||||
"requires": true,
|
"name": "binformat",
|
||||||
|
"version": "0.0.1",
|
||||||
"lockfileVersion": 1,
|
"lockfileVersion": 1,
|
||||||
|
"requires": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/color-name": {
|
"@types/color-name": {
|
||||||
"version": "1.1.1",
|
"version": "1.1.1",
|
||||||
@@ -40,6 +42,11 @@
|
|||||||
"resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.48.tgz",
|
"resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.48.tgz",
|
||||||
"integrity": "sha512-j51egjPa7/i+RdiRuJbPdJ2FIUYYPhvYLjzoYbcMMm62ooO6F94fETG4MTs46zPAF9Brs04OajboA/qTGuz78w=="
|
"integrity": "sha512-j51egjPa7/i+RdiRuJbPdJ2FIUYYPhvYLjzoYbcMMm62ooO6F94fETG4MTs46zPAF9Brs04OajboA/qTGuz78w=="
|
||||||
},
|
},
|
||||||
|
"blakejs": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/blakejs/-/blakejs-1.1.0.tgz",
|
||||||
|
"integrity": "sha1-ad+S75U6qIylGjLfarHFShVfx6U="
|
||||||
|
},
|
||||||
"brace-expansion": {
|
"brace-expansion": {
|
||||||
"version": "1.1.11",
|
"version": "1.1.11",
|
||||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
||||||
@@ -819,6 +826,15 @@
|
|||||||
"big-integer": "^1.6.48"
|
"big-integer": "^1.6.48"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"wasmsnark": {
|
||||||
|
"version": "0.0.10",
|
||||||
|
"resolved": "https://registry.npmjs.org/wasmsnark/-/wasmsnark-0.0.10.tgz",
|
||||||
|
"integrity": "sha512-ARrJWhxvnBJXMERwBcnEnO5ByLwYhJZr1xwac9dl61SN7+1eOmG7Od3SJL1GzU6zaf86b9wza20y1d5ThCecLw==",
|
||||||
|
"requires": {
|
||||||
|
"big-integer": "^1.6.42",
|
||||||
|
"blakejs": "^1.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"which": {
|
"which": {
|
||||||
"version": "1.3.1",
|
"version": "1.3.1",
|
||||||
"resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
|
"resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
|
||||||
|
|||||||
16
testdata/package.json
vendored
Normal file
16
testdata/package.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"name": "binformat",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"description": "",
|
||||||
|
"main": "index.js",
|
||||||
|
"scripts": {
|
||||||
|
"test": "echo \"Error: no test specified\" && exit 1"
|
||||||
|
},
|
||||||
|
"author": "",
|
||||||
|
"license": "GPL-3.0",
|
||||||
|
"dependencies": {
|
||||||
|
"wasmsnark": "0.0.10",
|
||||||
|
"circom": "^0.5.11",
|
||||||
|
"snarkjs": "^0.1.31"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@ type Vk struct {
|
|||||||
IC []*bn256.G1
|
IC []*bn256.G1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify verifies the Groth16 zkSNARK proof
|
||||||
func Verify(vk *types.Vk, proof *types.Proof, inputs []*big.Int) bool {
|
func Verify(vk *types.Vk, proof *types.Proof, inputs []*big.Int) bool {
|
||||||
if len(inputs)+1 != len(vk.IC) {
|
if len(inputs)+1 != len(vk.IC) {
|
||||||
fmt.Println("len(inputs)+1 != len(vk.IC)")
|
fmt.Println("len(inputs)+1 != len(vk.IC)")
|
||||||
|
|||||||
Reference in New Issue
Block a user