From 0f48cfa2a553f7724ece6574f939eb9178e10972 Mon Sep 17 00:00:00 2001 From: arnaucube Date: Wed, 6 May 2020 14:18:07 +0200 Subject: [PATCH] Add proof parsers to string (decimal & hex) Also adds ProofToSmartContractFormat, which returns a ProofString as the proof.B elements swap is not a valid point for the bn256.G2 format. Also unexports internal structs and methods of the prover package. Also apply golint. --- parsers/parsers.go | 78 ++++++++++++- parsers/parsers_test.go | 24 ++++ prover/gextra.go | 247 +++++++++++++++++++--------------------- prover/gextra_test.go | 35 +++--- prover/prover.go | 58 +++++----- prover/prover_test.go | 4 +- 6 files changed, 268 insertions(+), 178 deletions(-) diff --git a/parsers/parsers.go b/parsers/parsers.go index 297f977..0dfaf97 100644 --- a/parsers/parsers.go +++ b/parsers/parsers.go @@ -467,8 +467,35 @@ func stringToG2(h [][]string) (*bn256.G2, error) { return p, err } -// ProofToJson outputs the Proof i Json format -func ProofToJson(p *types.Proof) ([]byte, error) { +// ProofStringToSmartContractFormat converts the ProofString to a ProofString in the SmartContract format in a ProofString structure +func ProofStringToSmartContractFormat(s ProofString) ProofString { + var rs ProofString + rs.A = make([]string, 2) + rs.B = make([][]string, 2) + rs.B[0] = make([]string, 2) + rs.B[1] = make([]string, 2) + rs.C = make([]string, 2) + + rs.A[0] = s.A[0] + rs.A[1] = s.A[1] + rs.B[0][0] = s.B[0][1] + rs.B[0][1] = s.B[0][0] + rs.B[1][0] = s.B[1][1] + rs.B[1][1] = s.B[1][0] + rs.C[0] = s.C[0] + rs.C[1] = s.C[1] + rs.Protocol = s.Protocol + return rs +} + +// ProofToSmartContractFormat converts the *types.Proof to a ProofString in the SmartContract format in a ProofString structure +func ProofToSmartContractFormat(p *types.Proof) ProofString { + s := ProofToString(p) + return ProofStringToSmartContractFormat(s) +} + +// ProofToString converts the Proof to ProofString +func ProofToString(p *types.Proof) ProofString { var ps ProofString ps.A = make([]string, 3) ps.B = make([][]string, 3) @@ -497,10 +524,55 @@ func ProofToJson(p *types.Proof) ([]byte, error) { ps.Protocol = "groth" + return ps +} + +// ProofToJson outputs the Proof i Json format +func ProofToJson(p *types.Proof) ([]byte, error) { + ps := ProofToString(p) + return json.Marshal(ps) +} + +// ProofToHex converts the Proof to ProofString with hexadecimal strings +func ProofToHex(p *types.Proof) ProofString { + var ps ProofString + ps.A = make([]string, 3) + ps.B = make([][]string, 3) + ps.B[0] = make([]string, 2) + ps.B[1] = make([]string, 2) + ps.B[2] = make([]string, 2) + ps.C = make([]string, 3) + + a := p.A.Marshal() + ps.A[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[:32]).Bytes()) + ps.A[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[32:64]).Bytes()) + ps.A[2] = "1" + + b := p.B.Marshal() + ps.B[0][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[:32]).Bytes()) + ps.B[0][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[32:64]).Bytes()) + ps.B[1][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[64:96]).Bytes()) + ps.B[1][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[96:128]).Bytes()) + ps.B[2][0] = "1" + ps.B[2][1] = "0" + + c := p.C.Marshal() + ps.C[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[:32]).Bytes()) + ps.C[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[32:64]).Bytes()) + ps.C[2] = "1" + + ps.Protocol = "groth" + + return ps +} + +// ProofToJsonHex outputs the Proof i Json format with hexadecimal strings +func ProofToJsonHex(p *types.Proof) ([]byte, error) { + ps := ProofToHex(p) return json.Marshal(ps) } -// ParseWitness parses binary file representation of the Witness into the Witness struct +// ParseWitnessBin parses binary file representation of the Witness into the Witness struct func ParseWitnessBin(f *os.File) (types.Witness, error) { var w types.Witness r := bufio.NewReader(f) diff --git a/parsers/parsers_test.go b/parsers/parsers_test.go index 2df3830..82b6dea 100644 --- a/parsers/parsers_test.go +++ b/parsers/parsers_test.go @@ -172,3 +172,27 @@ func TestParseWitnessBin(t *testing.T) { testCircuitParseWitnessBin(t, "circuit1k") testCircuitParseWitnessBin(t, "circuit5k") } + +func TestProofSmartContractFormat(t *testing.T) { + proofJson, err := ioutil.ReadFile("../testdata/circuit1k/proof.json") + require.Nil(t, err) + proof, err := ParseProof(proofJson) + require.Nil(t, err) + pS := ProofToString(proof) + + pSC := ProofToSmartContractFormat(proof) + assert.Nil(t, err) + assert.Equal(t, pS.A[0], pSC.A[0]) + assert.Equal(t, pS.A[1], pSC.A[1]) + assert.Equal(t, pS.B[0][0], pSC.B[0][1]) + assert.Equal(t, pS.B[0][1], pSC.B[0][0]) + assert.Equal(t, pS.B[1][0], pSC.B[1][1]) + assert.Equal(t, pS.B[1][1], pSC.B[1][0]) + assert.Equal(t, pS.C[0], pSC.C[0]) + assert.Equal(t, pS.C[1], pSC.C[1]) + assert.Equal(t, pS.Protocol, pSC.Protocol) + + pSC2 := ProofStringToSmartContractFormat(pS) + assert.Equal(t, pSC, pSC2) + +} diff --git a/prover/gextra.go b/prover/gextra.go index fb44f0f..b374878 100644 --- a/prover/gextra.go +++ b/prover/gextra.go @@ -1,16 +1,17 @@ package prover import ( + "math/big" + bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" cryptoConstants "github.com/iden3/go-iden3-crypto/constants" - "math/big" ) -type TableG1 struct { +type tableG1 struct { data []*bn256.G1 } -func (t TableG1) GetData() []*bn256.G1 { +func (t tableG1) getData() []*bn256.G1 { return t.data } @@ -21,31 +22,31 @@ func (t TableG1) GetData() []*bn256.G1 { // Table[3] = a[0]+a[1] // ..... // Table[(1<= 0; i-- { // TODO. bn256 doesn't export double operation. We will need to fork repo and export it Q = new(bn256.G1).Add(Q, Q) - b := getBit(k_ext, i) + b := getBit(kExt, i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q.Add(Q, t.data[b]) } } - if Q_prev != nil { - return Q.Add(Q, Q_prev) - } else { - return Q + if qPrev != nil { + return Q.Add(Q, qPrev) } + return Q } // Multiply scalar by precomputed table of G1 elements without intermediate doubling -func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { +func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { // We need at least gsize elements. If not enough, fill with 0 - min_nelems := len(t) * gsize - k_ext := make([]*big.Int, 0) - k_ext = append(k_ext, k...) - for i := len(k); i < min_nelems; i++ { - k_ext = append(k_ext, new(big.Int).SetUint64(0)) + minNElems := len(t) * gsize + kExt := make([]*big.Int, 0) + kExt = append(kExt, k...) + for i := len(k); i < minNElems; i++ { + kExt = append(kExt, new(big.Int).SetUint64(0)) } // Init Adders nbitsQ := cryptoConstants.Q.BitLen() @@ -118,10 +118,10 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) // Perform bitwise addition for j := 0; j < len(t); j++ { - msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + msb := getMsb(kExt[j*gsize : (j+1)*gsize]) for i := msb - 1; i >= 0; i-- { - b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + b := getBit(kExt[j*gsize:(j+1)*gsize], i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q[i].Add(Q[i], t[j].data[b]) @@ -137,45 +137,43 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) R.Add(R, Q[i-1]) } - if Q_prev != nil { - return R.Add(R, Q_prev) - } else { - return R + if qPrev != nil { + return R.Add(R, qPrev) } + return R } // Compute tables within function. This solution should still be faster than std multiplication // for gsize = 7 -func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { +func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { ntables := int((len(a) + gsize - 1) / gsize) - table := TableG1{} + table := tableG1{} Q := new(bn256.G1).ScalarBaseMult(new(big.Int)) for i := 0; i < ntables-1; i++ { - table.NewTableG1(a[i*gsize:(i+1)*gsize], gsize, false) - Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) + table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false) + Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) } - table.NewTableG1(a[(ntables-1)*gsize:], gsize, false) - Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize) + table.newTableG1(a[(ntables-1)*gsize:], gsize, false) + Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize) - if Q_prev != nil { - return Q.Add(Q, Q_prev) - } else { - return Q + if qPrev != nil { + return Q.Add(Q, qPrev) } + return Q } // Multiply scalar by precomputed table of G1 elements without intermediate doubling -func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { +func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { ntables := int((len(a) + gsize - 1) / gsize) - table := TableG1{} + table := tableG1{} // We need at least gsize elements. If not enough, fill with 0 - min_nelems := ntables * gsize - k_ext := make([]*big.Int, 0) - k_ext = append(k_ext, k...) - for i := len(k); i < min_nelems; i++ { - k_ext = append(k_ext, new(big.Int).SetUint64(0)) + minNElems := ntables * gsize + kExt := make([]*big.Int, 0) + kExt = append(kExt, k...) + for i := len(k); i < minNElems; i++ { + kExt = append(kExt, new(big.Int).SetUint64(0)) } // Init Adders nbitsQ := cryptoConstants.Q.BitLen() @@ -187,22 +185,22 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i // Perform bitwise addition for j := 0; j < ntables-1; j++ { - table.NewTableG1(a[j*gsize:(j+1)*gsize], gsize, false) - msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false) + msb := getMsb(kExt[j*gsize : (j+1)*gsize]) for i := msb - 1; i >= 0; i-- { - b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + b := getBit(kExt[j*gsize:(j+1)*gsize], i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q[i].Add(Q[i], table.data[b]) } } } - table.NewTableG1(a[(ntables-1)*gsize:], gsize, false) - msb := getMsb(k_ext[(ntables-1)*gsize:]) + table.newTableG1(a[(ntables-1)*gsize:], gsize, false) + msb := getMsb(kExt[(ntables-1)*gsize:]) for i := msb - 1; i >= 0; i-- { - b := getBit(k_ext[(ntables-1)*gsize:], i) + b := getBit(kExt[(ntables-1)*gsize:], i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q[i].Add(Q[i], table.data[b]) @@ -216,11 +214,10 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i R = new(bn256.G1).Add(R, R) R.Add(R, Q[i-1]) } - if Q_prev != nil { - return R.Add(R, Q_prev) - } else { - return R + if qPrev != nil { + return R.Add(R, qPrev) } + return R } ///// @@ -228,11 +225,11 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i // TODO - How can avoid replicating code in G2? //G2 -type TableG2 struct { +type tableG2 struct { data []*bn256.G2 } -func (t TableG2) GetData() []*bn256.G2 { +func (t tableG2) getData() []*bn256.G2 { return t.data } @@ -244,31 +241,31 @@ func (t TableG2) GetData() []*bn256.G2 { // ..... // Table[(1< toaffine = True doesnt work. Problem with Marshal/Unmarshal -func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) { +func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) { // EC table table := make([]*bn256.G2, 0) // We need at least gsize elements. If not enough, fill with 0 - a_ext := make([]*bn256.G2, 0) - a_ext = append(a_ext, a...) + aExt := make([]*bn256.G2, 0) + aExt = append(aExt, a...) for i := len(a); i < gsize; i++ { - a_ext = append(a_ext, new(bn256.G2).ScalarBaseMult(big.NewInt(0))) + aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0))) } elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) table = append(table, elG2) - last_pow2 := 1 + lastPow2 := 1 nelems := 0 for i := 1; i < 1<= 0; i-- { // TODO. bn256 doesn't export double operation. We will need to fork repo and export it Q = new(bn256.G2).Add(Q, Q) - b := getBit(k_ext, i) + b := getBit(kExt, i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q.Add(Q, t.data[b]) } } - if Q_prev != nil { - return Q.Add(Q, Q_prev) - } else { - return Q + if qPrev != nil { + return Q.Add(Q, qPrev) } + return Q } // Multiply scalar by precomputed table of G2 elements without intermediate doubling -func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { +func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { // We need at least gsize elements. If not enough, fill with 0 - min_nelems := len(t) * gsize - k_ext := make([]*big.Int, 0) - k_ext = append(k_ext, k...) - for i := len(k); i < min_nelems; i++ { - k_ext = append(k_ext, new(big.Int).SetUint64(0)) + minNElems := len(t) * gsize + kExt := make([]*big.Int, 0) + kExt = append(kExt, k...) + for i := len(k); i < minNElems; i++ { + kExt = append(kExt, new(big.Int).SetUint64(0)) } // Init Adders nbitsQ := cryptoConstants.Q.BitLen() @@ -341,10 +337,10 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) // Perform bitwise addition for j := 0; j < len(t); j++ { - msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + msb := getMsb(kExt[j*gsize : (j+1)*gsize]) for i := msb - 1; i >= 0; i-- { - b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + b := getBit(kExt[j*gsize:(j+1)*gsize], i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q[i].Add(Q[i], t[j].data[b]) @@ -359,45 +355,43 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) R = new(bn256.G2).Add(R, R) R.Add(R, Q[i-1]) } - if Q_prev != nil { - return R.Add(R, Q_prev) - } else { - return R + if qPrev != nil { + return R.Add(R, qPrev) } + return R } // Compute tables within function. This solution should still be faster than std multiplication // for gsize = 7 -func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { +func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { ntables := int((len(a) + gsize - 1) / gsize) - table := TableG2{} + table := tableG2{} Q := new(bn256.G2).ScalarBaseMult(new(big.Int)) for i := 0; i < ntables-1; i++ { - table.NewTableG2(a[i*gsize:(i+1)*gsize], gsize, false) - Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) + table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false) + Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) } - table.NewTableG2(a[(ntables-1)*gsize:], gsize, false) - Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize) + table.newTableG2(a[(ntables-1)*gsize:], gsize, false) + Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize) - if Q_prev != nil { - return Q.Add(Q, Q_prev) - } else { - return Q + if qPrev != nil { + return Q.Add(Q, qPrev) } + return Q } // Multiply scalar by precomputed table of G2 elements without intermediate doubling -func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { +func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { ntables := int((len(a) + gsize - 1) / gsize) - table := TableG2{} + table := tableG2{} // We need at least gsize elements. If not enough, fill with 0 - min_nelems := ntables * gsize - k_ext := make([]*big.Int, 0) - k_ext = append(k_ext, k...) - for i := len(k); i < min_nelems; i++ { - k_ext = append(k_ext, new(big.Int).SetUint64(0)) + minNElems := ntables * gsize + kExt := make([]*big.Int, 0) + kExt = append(kExt, k...) + for i := len(k); i < minNElems; i++ { + kExt = append(kExt, new(big.Int).SetUint64(0)) } // Init Adders nbitsQ := cryptoConstants.Q.BitLen() @@ -409,22 +403,22 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i // Perform bitwise addition for j := 0; j < ntables-1; j++ { - table.NewTableG2(a[j*gsize:(j+1)*gsize], gsize, false) - msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false) + msb := getMsb(kExt[j*gsize : (j+1)*gsize]) for i := msb - 1; i >= 0; i-- { - b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + b := getBit(kExt[j*gsize:(j+1)*gsize], i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q[i].Add(Q[i], table.data[b]) } } } - table.NewTableG2(a[(ntables-1)*gsize:], gsize, false) - msb := getMsb(k_ext[(ntables-1)*gsize:]) + table.newTableG2(a[(ntables-1)*gsize:], gsize, false) + msb := getMsb(kExt[(ntables-1)*gsize:]) for i := msb - 1; i >= 0; i-- { - b := getBit(k_ext[(ntables-1)*gsize:], i) + b := getBit(kExt[(ntables-1)*gsize:], i) if b != 0 { // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. Q[i].Add(Q[i], table.data[b]) @@ -438,11 +432,10 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i R = new(bn256.G2).Add(R, R) R.Add(R, Q[i-1]) } - if Q_prev != nil { - return R.Add(R, Q_prev) - } else { - return R + if qPrev != nil { + return R.Add(R, qPrev) } + return R } // Return most significant bit position in a group of Big Integers @@ -450,9 +443,9 @@ func getMsb(k []*big.Int) int { msb := 0 for _, el := range k { - tmp_msb := el.BitLen() - if tmp_msb > msb { - msb = tmp_msb + tmpMsb := el.BitLen() + if tmpMsb > msb { + msb = tmpMsb } } return msb @@ -460,11 +453,11 @@ func getMsb(k []*big.Int) int { // Return ith bit in group of Big Integers func getBit(k []*big.Int, i int) uint { - table_idx := uint(0) + tableIdx := uint(0) for idx, el := range k { b := el.Bit(i) - table_idx += (b << idx) + tableIdx += (b << idx) } - return table_idx + return tableIdx } diff --git a/prover/gextra_test.go b/prover/gextra_test.go index 26b910f..dc82c47 100644 --- a/prover/gextra_test.go +++ b/prover/gextra_test.go @@ -4,10 +4,11 @@ import ( "bytes" "crypto/rand" "fmt" - bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" "math/big" "testing" "time" + + bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" ) const ( @@ -60,31 +61,31 @@ func TestTableG1(t *testing.T) { for gsize := 2; gsize < 10; gsize++ { ntables := int((n + gsize - 1) / gsize) - table := make([]TableG1, ntables) + table := make([]tableG1, ntables) for i := 0; i < ntables-1; i++ { - table[i].NewTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true) + table[i].newTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true) } - table[ntables-1].NewTableG1(arrayG1[(ntables-1)*gsize:], gsize, true) + table[ntables-1].newTableG1(arrayG1[(ntables-1)*gsize:], gsize, true) beforeT = time.Now() Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int)) for i := 0; i < ntables-1; i++ { - Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) + Q2 = table[i].mulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) } - Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize) + Q2 = table[ntables-1].mulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize) fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT)) beforeT = time.Now() - Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize) + Q3 := scalarMultG1(arrayG1, arrayW, nil, gsize) fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) beforeT = time.Now() - Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize) + Q4 := mulTableNoDoubleG1(table, arrayW, nil, gsize) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT)) beforeT = time.Now() - Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize) + Q5 := scalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 { @@ -119,31 +120,31 @@ func TestTableG2(t *testing.T) { for gsize := 2; gsize < 10; gsize++ { ntables := int((n + gsize - 1) / gsize) - table := make([]TableG2, ntables) + table := make([]tableG2, ntables) for i := 0; i < ntables-1; i++ { - table[i].NewTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false) + table[i].newTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false) } - table[ntables-1].NewTableG2(arrayG2[(ntables-1)*gsize:], gsize, false) + table[ntables-1].newTableG2(arrayG2[(ntables-1)*gsize:], gsize, false) beforeT = time.Now() Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int)) for i := 0; i < ntables-1; i++ { - Q2 = table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) + Q2 = table[i].mulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) } - Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize) + Q2 = table[ntables-1].mulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize) fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT)) beforeT = time.Now() - Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize) + Q3 := scalarMultG2(arrayG2, arrayW, nil, gsize) fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) beforeT = time.Now() - Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize) + Q4 := mulTableNoDoubleG2(table, arrayW, nil, gsize) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT)) beforeT = time.Now() - Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize) + Q5 := scalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 { diff --git a/prover/prover.go b/prover/prover.go index 07b0f73..67a401e 100644 --- a/prover/prover.go +++ b/prover/prover.go @@ -10,7 +10,7 @@ import ( bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" "github.com/iden3/go-circom-prover-verifier/types" "github.com/iden3/go-iden3-crypto/utils" - //"fmt" + //"fmt" ) // Proof is the data structure of the Groth16 zkSNARK proof @@ -45,7 +45,7 @@ type Witness []*big.Int // Group Size const ( - GSIZE = 6 + GSIZE = 6 ) func randBigInt() (*big.Int, error) { @@ -81,34 +81,34 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err proofB := arrayOfZeroesG2(numcpu) proofC := arrayOfZeroesG1(numcpu) proofBG1 := arrayOfZeroesG1(numcpu) - gsize := GSIZE + gsize := GSIZE var wg1 sync.WaitGroup wg1.Add(numcpu) for _cpu, _ranges := range ranges(pk.NVars, numcpu) { // split 1 go func(cpu int, ranges [2]int) { - proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]], - w[ranges[0]:ranges[1]], - proofA[cpu], - gsize) - proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]], - w[ranges[0]:ranges[1]], - proofB[cpu], - gsize) - proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]], - w[ranges[0]:ranges[1]], - proofBG1[cpu], - gsize) - min_lim := pk.NPublic+1 - if ranges[0] > pk.NPublic+1 { - min_lim = ranges[0] - } - if ranges[1] > pk.NPublic + 1 { - proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]], - w[min_lim:ranges[1]], - proofC[cpu], - gsize) - } + proofA[cpu] = scalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]], + w[ranges[0]:ranges[1]], + proofA[cpu], + gsize) + proofB[cpu] = scalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]], + w[ranges[0]:ranges[1]], + proofB[cpu], + gsize) + proofBG1[cpu] = scalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]], + w[ranges[0]:ranges[1]], + proofBG1[cpu], + gsize) + minLim := pk.NPublic + 1 + if ranges[0] > pk.NPublic+1 { + minLim = ranges[0] + } + if ranges[1] > pk.NPublic+1 { + proofC[cpu] = scalarMultNoDoubleG1(pk.C[minLim:ranges[1]], + w[minLim:ranges[1]], + proofC[cpu], + gsize) + } wg1.Done() }(_cpu, _ranges) } @@ -142,10 +142,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err for _cpu, _ranges := range ranges(len(h), numcpu) { // split 2 go func(cpu int, ranges [2]int) { - proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]], - h[ranges[0]:ranges[1]], - proofC[cpu], - gsize) + proofC[cpu] = scalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]], + h[ranges[0]:ranges[1]], + proofC[cpu], + gsize) wg2.Done() }(_cpu, _ranges) } diff --git a/prover/prover_test.go b/prover/prover_test.go index cfea3f4..ea7350a 100644 --- a/prover/prover_test.go +++ b/prover/prover_test.go @@ -16,8 +16,8 @@ import ( func TestCircuitsGenerateProof(t *testing.T) { testCircuitGenerateProof(t, "circuit1k") // 1000 constraints testCircuitGenerateProof(t, "circuit5k") // 5000 constraints - //testCircuitGenerateProof(t, "circuit10k") // 10000 constraints - //testCircuitGenerateProof(t, "circuit20k") // 20000 constraints + //testCircuitGenerateProof(t, "circuit10k") // 10000 constraints + //testCircuitGenerateProof(t, "circuit20k") // 20000 constraints } func testCircuitGenerateProof(t *testing.T, circuit string) {