From 423d5f0ce757ea5c51005dd03dd3c5d4c6754d6e Mon Sep 17 00:00:00 2001 From: druiz0992 Date: Sun, 3 May 2020 03:02:49 +0200 Subject: [PATCH] Add G1/G2 table calculation functionality --- prover/gextra.go | 470 ++++++++++++++++++++++++++++++++++++++++++ prover/gextra_test.go | 162 +++++++++++++++ prover/prover.go | 44 +++- prover/prover_test.go | 4 +- prover/tables.md | 49 +++++ 5 files changed, 716 insertions(+), 13 deletions(-) create mode 100644 prover/gextra.go create mode 100644 prover/gextra_test.go create mode 100644 prover/tables.md diff --git a/prover/gextra.go b/prover/gextra.go new file mode 100644 index 0000000..fb44f0f --- /dev/null +++ b/prover/gextra.go @@ -0,0 +1,470 @@ +package prover + +import ( + bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" + cryptoConstants "github.com/iden3/go-iden3-crypto/constants" + "math/big" +) + +type TableG1 struct { + data []*bn256.G1 +} + +func (t TableG1) GetData() []*bn256.G1 { + return t.data +} + +// Compute table of gsize elements as :: +// Table[0] = Inf +// Table[1] = a[0] +// Table[2] = a[1] +// Table[3] = a[0]+a[1] +// ..... +// Table[(1<= 0; i-- { + // TODO. bn256 doesn't export double operation. We will need to fork repo and export it + Q = new(bn256.G1).Add(Q, Q) + b := getBit(k_ext, i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q.Add(Q, t.data[b]) + } + } + if Q_prev != nil { + return Q.Add(Q, Q_prev) + } else { + return Q + } +} + +// Multiply scalar by precomputed table of G1 elements without intermediate doubling +func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { + // We need at least gsize elements. If not enough, fill with 0 + min_nelems := len(t) * gsize + k_ext := make([]*big.Int, 0) + k_ext = append(k_ext, k...) + for i := len(k); i < min_nelems; i++ { + k_ext = append(k_ext, new(big.Int).SetUint64(0)) + } + // Init Adders + nbitsQ := cryptoConstants.Q.BitLen() + Q := make([]*bn256.G1, nbitsQ) + + for i := 0; i < nbitsQ; i++ { + Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0)) + } + + // Perform bitwise addition + for j := 0; j < len(t); j++ { + msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + + for i := msb - 1; i >= 0; i-- { + b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q[i].Add(Q[i], t[j].data[b]) + } + } + } + + // Consolidate Addition + R := new(bn256.G1).Set(Q[nbitsQ-1]) + for i := nbitsQ - 1; i > 0; i-- { + // TODO. bn256 doesn't export double operation. We will need to fork repo and export it + R = new(bn256.G1).Add(R, R) + R.Add(R, Q[i-1]) + } + + if Q_prev != nil { + return R.Add(R, Q_prev) + } else { + return R + } +} + +// Compute tables within function. This solution should still be faster than std multiplication +// for gsize = 7 +func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { + ntables := int((len(a) + gsize - 1) / gsize) + table := TableG1{} + Q := new(bn256.G1).ScalarBaseMult(new(big.Int)) + + for i := 0; i < ntables-1; i++ { + table.NewTableG1(a[i*gsize:(i+1)*gsize], gsize, false) + Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) + } + table.NewTableG1(a[(ntables-1)*gsize:], gsize, false) + Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize) + + if Q_prev != nil { + return Q.Add(Q, Q_prev) + } else { + return Q + } +} + +// Multiply scalar by precomputed table of G1 elements without intermediate doubling +func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { + ntables := int((len(a) + gsize - 1) / gsize) + table := TableG1{} + + // We need at least gsize elements. If not enough, fill with 0 + min_nelems := ntables * gsize + k_ext := make([]*big.Int, 0) + k_ext = append(k_ext, k...) + for i := len(k); i < min_nelems; i++ { + k_ext = append(k_ext, new(big.Int).SetUint64(0)) + } + // Init Adders + nbitsQ := cryptoConstants.Q.BitLen() + Q := make([]*bn256.G1, nbitsQ) + + for i := 0; i < nbitsQ; i++ { + Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0)) + } + + // Perform bitwise addition + for j := 0; j < ntables-1; j++ { + table.NewTableG1(a[j*gsize:(j+1)*gsize], gsize, false) + msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + + for i := msb - 1; i >= 0; i-- { + b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q[i].Add(Q[i], table.data[b]) + } + } + } + table.NewTableG1(a[(ntables-1)*gsize:], gsize, false) + msb := getMsb(k_ext[(ntables-1)*gsize:]) + + for i := msb - 1; i >= 0; i-- { + b := getBit(k_ext[(ntables-1)*gsize:], i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q[i].Add(Q[i], table.data[b]) + } + } + + // Consolidate Addition + R := new(bn256.G1).Set(Q[nbitsQ-1]) + for i := nbitsQ - 1; i > 0; i-- { + // TODO. bn256 doesn't export double operation. We will need to fork repo and export it + R = new(bn256.G1).Add(R, R) + R.Add(R, Q[i-1]) + } + if Q_prev != nil { + return R.Add(R, Q_prev) + } else { + return R + } +} + +///// + +// TODO - How can avoid replicating code in G2? +//G2 + +type TableG2 struct { + data []*bn256.G2 +} + +func (t TableG2) GetData() []*bn256.G2 { + return t.data +} + +// Compute table of gsize elements as :: +// Table[0] = Inf +// Table[1] = a[0] +// Table[2] = a[1] +// Table[3] = a[0]+a[1] +// ..... +// Table[(1< toaffine = True doesnt work. Problem with Marshal/Unmarshal +func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) { + // EC table + table := make([]*bn256.G2, 0) + + // We need at least gsize elements. If not enough, fill with 0 + a_ext := make([]*bn256.G2, 0) + a_ext = append(a_ext, a...) + + for i := len(a); i < gsize; i++ { + a_ext = append(a_ext, new(bn256.G2).ScalarBaseMult(big.NewInt(0))) + } + + elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) + table = append(table, elG2) + last_pow2 := 1 + nelems := 0 + for i := 1; i < 1<= 0; i-- { + // TODO. bn256 doesn't export double operation. We will need to fork repo and export it + Q = new(bn256.G2).Add(Q, Q) + b := getBit(k_ext, i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q.Add(Q, t.data[b]) + } + } + if Q_prev != nil { + return Q.Add(Q, Q_prev) + } else { + return Q + } +} + +// Multiply scalar by precomputed table of G2 elements without intermediate doubling +func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { + // We need at least gsize elements. If not enough, fill with 0 + min_nelems := len(t) * gsize + k_ext := make([]*big.Int, 0) + k_ext = append(k_ext, k...) + for i := len(k); i < min_nelems; i++ { + k_ext = append(k_ext, new(big.Int).SetUint64(0)) + } + // Init Adders + nbitsQ := cryptoConstants.Q.BitLen() + Q := make([]*bn256.G2, nbitsQ) + + for i := 0; i < nbitsQ; i++ { + Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0)) + } + + // Perform bitwise addition + for j := 0; j < len(t); j++ { + msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + + for i := msb - 1; i >= 0; i-- { + b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q[i].Add(Q[i], t[j].data[b]) + } + } + } + + // Consolidate Addition + R := new(bn256.G2).Set(Q[nbitsQ-1]) + for i := nbitsQ - 1; i > 0; i-- { + // TODO. bn256 doesn't export double operation. We will need to fork repo and export it + R = new(bn256.G2).Add(R, R) + R.Add(R, Q[i-1]) + } + if Q_prev != nil { + return R.Add(R, Q_prev) + } else { + return R + } +} + +// Compute tables within function. This solution should still be faster than std multiplication +// for gsize = 7 +func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { + ntables := int((len(a) + gsize - 1) / gsize) + table := TableG2{} + Q := new(bn256.G2).ScalarBaseMult(new(big.Int)) + + for i := 0; i < ntables-1; i++ { + table.NewTableG2(a[i*gsize:(i+1)*gsize], gsize, false) + Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) + } + table.NewTableG2(a[(ntables-1)*gsize:], gsize, false) + Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize) + + if Q_prev != nil { + return Q.Add(Q, Q_prev) + } else { + return Q + } +} + +// Multiply scalar by precomputed table of G2 elements without intermediate doubling +func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { + ntables := int((len(a) + gsize - 1) / gsize) + table := TableG2{} + + // We need at least gsize elements. If not enough, fill with 0 + min_nelems := ntables * gsize + k_ext := make([]*big.Int, 0) + k_ext = append(k_ext, k...) + for i := len(k); i < min_nelems; i++ { + k_ext = append(k_ext, new(big.Int).SetUint64(0)) + } + // Init Adders + nbitsQ := cryptoConstants.Q.BitLen() + Q := make([]*bn256.G2, nbitsQ) + + for i := 0; i < nbitsQ; i++ { + Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0)) + } + + // Perform bitwise addition + for j := 0; j < ntables-1; j++ { + table.NewTableG2(a[j*gsize:(j+1)*gsize], gsize, false) + msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) + + for i := msb - 1; i >= 0; i-- { + b := getBit(k_ext[j*gsize:(j+1)*gsize], i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q[i].Add(Q[i], table.data[b]) + } + } + } + table.NewTableG2(a[(ntables-1)*gsize:], gsize, false) + msb := getMsb(k_ext[(ntables-1)*gsize:]) + + for i := msb - 1; i >= 0; i-- { + b := getBit(k_ext[(ntables-1)*gsize:], i) + if b != 0 { + // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. + Q[i].Add(Q[i], table.data[b]) + } + } + + // Consolidate Addition + R := new(bn256.G2).Set(Q[nbitsQ-1]) + for i := nbitsQ - 1; i > 0; i-- { + // TODO. bn256 doesn't export double operation. We will need to fork repo and export it + R = new(bn256.G2).Add(R, R) + R.Add(R, Q[i-1]) + } + if Q_prev != nil { + return R.Add(R, Q_prev) + } else { + return R + } +} + +// Return most significant bit position in a group of Big Integers +func getMsb(k []*big.Int) int { + msb := 0 + + for _, el := range k { + tmp_msb := el.BitLen() + if tmp_msb > msb { + msb = tmp_msb + } + } + return msb +} + +// Return ith bit in group of Big Integers +func getBit(k []*big.Int, i int) uint { + table_idx := uint(0) + + for idx, el := range k { + b := el.Bit(i) + table_idx += (b << idx) + } + return table_idx +} diff --git a/prover/gextra_test.go b/prover/gextra_test.go new file mode 100644 index 0000000..26b910f --- /dev/null +++ b/prover/gextra_test.go @@ -0,0 +1,162 @@ +package prover + +import ( + "bytes" + "crypto/rand" + "fmt" + bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" + "math/big" + "testing" + "time" +) + +const ( + N1 = 5000 + N2 = 5000 +) + +func randomBigIntArray(n int) []*big.Int { + var p []*big.Int + for i := 0; i < n; i++ { + pi := randBI() + p = append(p, pi) + } + + return p +} + +func randomG1Array(n int) []*bn256.G1 { + arrayG1 := make([]*bn256.G1, n) + + for i := 0; i < n; i++ { + _, arrayG1[i], _ = bn256.RandomG1(rand.Reader) + } + return arrayG1 +} + +func randomG2Array(n int) []*bn256.G2 { + arrayG2 := make([]*bn256.G2, n) + + for i := 0; i < n; i++ { + _, arrayG2[i], _ = bn256.RandomG2(rand.Reader) + } + return arrayG2 +} + +func TestTableG1(t *testing.T) { + n := N1 + + // init scalar + var arrayW = randomBigIntArray(n) + // init G1 array + var arrayG1 = randomG1Array(n) + + beforeT := time.Now() + Q1 := new(bn256.G1).ScalarBaseMult(new(big.Int)) + for i := 0; i < n; i++ { + Q1.Add(Q1, new(bn256.G1).ScalarMult(arrayG1[i], arrayW[i])) + } + fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT)) + + for gsize := 2; gsize < 10; gsize++ { + ntables := int((n + gsize - 1) / gsize) + table := make([]TableG1, ntables) + + for i := 0; i < ntables-1; i++ { + table[i].NewTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true) + } + table[ntables-1].NewTableG1(arrayG1[(ntables-1)*gsize:], gsize, true) + + beforeT = time.Now() + Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int)) + for i := 0; i < ntables-1; i++ { + Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) + } + Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize) + fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT)) + + beforeT = time.Now() + Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize) + fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) + + beforeT = time.Now() + Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize) + fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT)) + + beforeT = time.Now() + Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize) + fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) + + if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 { + t.Error("Error in TMult") + } + if bytes.Compare(Q1.Marshal(), Q3.Marshal()) != 0 { + t.Error("Error in TMult with table comp") + } + if bytes.Compare(Q1.Marshal(), Q4.Marshal()) != 0 { + t.Error("Error in TMultNoDouble") + } + if bytes.Compare(Q1.Marshal(), Q5.Marshal()) != 0 { + t.Error("Error in TMultNoDoublee with table comp") + } + } +} + +func TestTableG2(t *testing.T) { + n := N2 + + // init scalar + var arrayW = randomBigIntArray(n) + // init G2 array + var arrayG2 = randomG2Array(n) + + beforeT := time.Now() + Q1 := new(bn256.G2).ScalarBaseMult(new(big.Int)) + for i := 0; i < n; i++ { + Q1.Add(Q1, new(bn256.G2).ScalarMult(arrayG2[i], arrayW[i])) + } + fmt.Println("Std. Mult. time elapsed:", time.Since(beforeT)) + + for gsize := 2; gsize < 10; gsize++ { + ntables := int((n + gsize - 1) / gsize) + table := make([]TableG2, ntables) + + for i := 0; i < ntables-1; i++ { + table[i].NewTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false) + } + table[ntables-1].NewTableG2(arrayG2[(ntables-1)*gsize:], gsize, false) + + beforeT = time.Now() + Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int)) + for i := 0; i < ntables-1; i++ { + Q2 = table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) + } + Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize) + fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT)) + + beforeT = time.Now() + Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize) + fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) + + beforeT = time.Now() + Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize) + fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT)) + + beforeT = time.Now() + Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize) + fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) + + if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 { + t.Error("Error in TMult") + } + if bytes.Compare(Q1.Marshal(), Q3.Marshal()) != 0 { + t.Error("Error in TMult with table comp") + } + if bytes.Compare(Q1.Marshal(), Q4.Marshal()) != 0 { + t.Error("Error in TMultNoDouble") + } + if bytes.Compare(Q1.Marshal(), Q5.Marshal()) != 0 { + t.Error("Error in TMultNoDoublee with table comp") + } + } +} diff --git a/prover/prover.go b/prover/prover.go index 433ca21..07b0f73 100644 --- a/prover/prover.go +++ b/prover/prover.go @@ -10,6 +10,7 @@ import ( bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" "github.com/iden3/go-circom-prover-verifier/types" "github.com/iden3/go-iden3-crypto/utils" + //"fmt" ) // Proof is the data structure of the Groth16 zkSNARK proof @@ -42,6 +43,11 @@ type Pk struct { // Witness contains the witness type Witness []*big.Int +// Group Size +const ( + GSIZE = 6 +) + func randBigInt() (*big.Int, error) { maxbits := types.R.BitLen() b := make([]byte, (maxbits/8)-1) @@ -75,19 +81,34 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err proofB := arrayOfZeroesG2(numcpu) proofC := arrayOfZeroesG1(numcpu) proofBG1 := arrayOfZeroesG1(numcpu) + gsize := GSIZE var wg1 sync.WaitGroup wg1.Add(numcpu) for _cpu, _ranges := range ranges(pk.NVars, numcpu) { // split 1 go func(cpu int, ranges [2]int) { - for i := ranges[0]; i < ranges[1]; i++ { - proofA[cpu].Add(proofA[cpu], new(bn256.G1).ScalarMult(pk.A[i], w[i])) - proofB[cpu].Add(proofB[cpu], new(bn256.G2).ScalarMult(pk.B2[i], w[i])) - proofBG1[cpu].Add(proofBG1[cpu], new(bn256.G1).ScalarMult(pk.B1[i], w[i])) - if i >= pk.NPublic+1 { - proofC[cpu].Add(proofC[cpu], new(bn256.G1).ScalarMult(pk.C[i], w[i])) - } - } + proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]], + w[ranges[0]:ranges[1]], + proofA[cpu], + gsize) + proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]], + w[ranges[0]:ranges[1]], + proofB[cpu], + gsize) + proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]], + w[ranges[0]:ranges[1]], + proofBG1[cpu], + gsize) + min_lim := pk.NPublic+1 + if ranges[0] > pk.NPublic+1 { + min_lim = ranges[0] + } + if ranges[1] > pk.NPublic + 1 { + proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]], + w[min_lim:ranges[1]], + proofC[cpu], + gsize) + } wg1.Done() }(_cpu, _ranges) } @@ -121,9 +142,10 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err for _cpu, _ranges := range ranges(len(h), numcpu) { // split 2 go func(cpu int, ranges [2]int) { - for i := ranges[0]; i < ranges[1]; i++ { - proofC[cpu].Add(proofC[cpu], new(bn256.G1).ScalarMult(pk.HExps[i], h[i])) - } + proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]], + h[ranges[0]:ranges[1]], + proofC[cpu], + gsize) wg2.Done() }(_cpu, _ranges) } diff --git a/prover/prover_test.go b/prover/prover_test.go index 3812b07..cfea3f4 100644 --- a/prover/prover_test.go +++ b/prover/prover_test.go @@ -16,8 +16,8 @@ import ( func TestCircuitsGenerateProof(t *testing.T) { testCircuitGenerateProof(t, "circuit1k") // 1000 constraints testCircuitGenerateProof(t, "circuit5k") // 5000 constraints - // testCircuitGenerateProof(t, "circuit10k") // 10000 constraints - // testCircuitGenerateProof(t, "circuit20k") // 20000 constraints + //testCircuitGenerateProof(t, "circuit10k") // 10000 constraints + //testCircuitGenerateProof(t, "circuit20k") // 20000 constraints } func testCircuitGenerateProof(t *testing.T, circuit string) { diff --git a/prover/tables.md b/prover/tables.md new file mode 100644 index 0000000..6307585 --- /dev/null +++ b/prover/tables.md @@ -0,0 +1,49 @@ +# Tables Pre-calculation +The most time consuming part of a ZKSnark proof calculation is the scalar multiplication of elliptic curve points. Direct mechanism accumulates each multiplication. However, prover only needs the total accumulation. + +There are two potential improvements to the naive approach: + +1. Apply Strauss-Shamir method (https://stackoverflow.com/questions/50993471/ec-scalar-multiplication-with-strauss-shamir-method). +2. Leave the doubling operation for the last step + +Both options can be combined. + +In the following table, we show the results of using the naive method, Srauss-Shamir and Strauss-Shamir + No doubling. These last two options are repeated for different table grouping order. + +There are 50000 G1 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve). + +There may be some concern on the additional size of the tables since they need to be loaded into a smartphone during the proof, and the time required to load these tables may exceed the benefits. If this is a problem, another althernative is to compute the tables during the proof itself. Depending on the Group Size, timing may be better than the naive approach. + + +| Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 | +|---|---|---|--|---|---|---|---|---| +| Naive | 6.63s | - | - | - | - | - | - | - | +| Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | +| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s | +| No Doubling | 3.74s | 3.00s | 2.38s | 1.96s | 1.79s | 1.54s | 1.50s | 1.44s| +| No Doubling + Table Computation | 6.83s | 5.1s | 4.16s | 3.52s| 3.22s | 3.21s | 3.57s | 4.56s | + +There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve). + +| Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 | +|---|---|---|--|---|---|---|---|---| +| Naive | 3.55s | | | | | | | | +| Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms | +| Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s | +| No Doubling | 1.49s | 1.16s | 952ms | 719ms | 661ms | 548ms | 506ms| 444ms | +| No Doubling + Table Computation | 1.55s | 1.21s | 984ms | 841ms | 826ms | 847ms | 1.03s | 1.39s | + +| GS | Extra Disk Space per Constraint (G1)| +|----|--------| +| 2 | 64 B | +| 3 | 106 B | +| 4 | 192 B | +| 5 | 346 B | +| 6 | 618 B | +| 7 | 1106 B | +| 8 | 1984 B | +| 9 | 3577 B | +| N | 2^(N+6)/N - 64 B | + +Extra disk space per constraint in G2 is twice the requirements for G1 +