3 Commits

Author SHA1 Message Date
arnaucube
0f48cfa2a5 Add proof parsers to string (decimal & hex)
Also adds ProofToSmartContractFormat, which returns a ProofString as the
proof.B elements swap is not a valid point for the bn256.G2 format.

Also unexports internal structs and methods of the prover package.
Also apply golint.
2020-05-06 14:45:44 +02:00
Eduard S
6ec118d4e2 Merge pull request #10 from iden3/feature/tables2
Add G1/G2 table calculation functionality
2020-05-06 11:43:06 +02:00
druiz0992
423d5f0ce7 Add G1/G2 table calculation functionality 2020-05-06 09:27:15 +02:00
8 changed files with 610 additions and 522 deletions

View File

@@ -467,8 +467,35 @@ func stringToG2(h [][]string) (*bn256.G2, error) {
return p, err return p, err
} }
// ProofToJson outputs the Proof i Json format // ProofStringToSmartContractFormat converts the ProofString to a ProofString in the SmartContract format in a ProofString structure
func ProofToJson(p *types.Proof) ([]byte, error) { func ProofStringToSmartContractFormat(s ProofString) ProofString {
var rs ProofString
rs.A = make([]string, 2)
rs.B = make([][]string, 2)
rs.B[0] = make([]string, 2)
rs.B[1] = make([]string, 2)
rs.C = make([]string, 2)
rs.A[0] = s.A[0]
rs.A[1] = s.A[1]
rs.B[0][0] = s.B[0][1]
rs.B[0][1] = s.B[0][0]
rs.B[1][0] = s.B[1][1]
rs.B[1][1] = s.B[1][0]
rs.C[0] = s.C[0]
rs.C[1] = s.C[1]
rs.Protocol = s.Protocol
return rs
}
// ProofToSmartContractFormat converts the *types.Proof to a ProofString in the SmartContract format in a ProofString structure
func ProofToSmartContractFormat(p *types.Proof) ProofString {
s := ProofToString(p)
return ProofStringToSmartContractFormat(s)
}
// ProofToString converts the Proof to ProofString
func ProofToString(p *types.Proof) ProofString {
var ps ProofString var ps ProofString
ps.A = make([]string, 3) ps.A = make([]string, 3)
ps.B = make([][]string, 3) ps.B = make([][]string, 3)
@@ -497,10 +524,55 @@ func ProofToJson(p *types.Proof) ([]byte, error) {
ps.Protocol = "groth" ps.Protocol = "groth"
return ps
}
// ProofToJson outputs the Proof i Json format
func ProofToJson(p *types.Proof) ([]byte, error) {
ps := ProofToString(p)
return json.Marshal(ps) return json.Marshal(ps)
} }
// ParseWitness parses binary file representation of the Witness into the Witness struct // ProofToHex converts the Proof to ProofString with hexadecimal strings
func ProofToHex(p *types.Proof) ProofString {
var ps ProofString
ps.A = make([]string, 3)
ps.B = make([][]string, 3)
ps.B[0] = make([]string, 2)
ps.B[1] = make([]string, 2)
ps.B[2] = make([]string, 2)
ps.C = make([]string, 3)
a := p.A.Marshal()
ps.A[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[:32]).Bytes())
ps.A[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(a[32:64]).Bytes())
ps.A[2] = "1"
b := p.B.Marshal()
ps.B[0][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[:32]).Bytes())
ps.B[0][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[32:64]).Bytes())
ps.B[1][1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[64:96]).Bytes())
ps.B[1][0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(b[96:128]).Bytes())
ps.B[2][0] = "1"
ps.B[2][1] = "0"
c := p.C.Marshal()
ps.C[0] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[:32]).Bytes())
ps.C[1] = "0x" + hex.EncodeToString(new(big.Int).SetBytes(c[32:64]).Bytes())
ps.C[2] = "1"
ps.Protocol = "groth"
return ps
}
// ProofToJsonHex outputs the Proof i Json format with hexadecimal strings
func ProofToJsonHex(p *types.Proof) ([]byte, error) {
ps := ProofToHex(p)
return json.Marshal(ps)
}
// ParseWitnessBin parses binary file representation of the Witness into the Witness struct
func ParseWitnessBin(f *os.File) (types.Witness, error) { func ParseWitnessBin(f *os.File) (types.Witness, error) {
var w types.Witness var w types.Witness
r := bufio.NewReader(f) r := bufio.NewReader(f)

View File

@@ -172,3 +172,27 @@ func TestParseWitnessBin(t *testing.T) {
testCircuitParseWitnessBin(t, "circuit1k") testCircuitParseWitnessBin(t, "circuit1k")
testCircuitParseWitnessBin(t, "circuit5k") testCircuitParseWitnessBin(t, "circuit5k")
} }
func TestProofSmartContractFormat(t *testing.T) {
proofJson, err := ioutil.ReadFile("../testdata/circuit1k/proof.json")
require.Nil(t, err)
proof, err := ParseProof(proofJson)
require.Nil(t, err)
pS := ProofToString(proof)
pSC := ProofToSmartContractFormat(proof)
assert.Nil(t, err)
assert.Equal(t, pS.A[0], pSC.A[0])
assert.Equal(t, pS.A[1], pSC.A[1])
assert.Equal(t, pS.B[0][0], pSC.B[0][1])
assert.Equal(t, pS.B[0][1], pSC.B[0][0])
assert.Equal(t, pS.B[1][0], pSC.B[1][1])
assert.Equal(t, pS.B[1][1], pSC.B[1][0])
assert.Equal(t, pS.C[0], pSC.C[0])
assert.Equal(t, pS.C[1], pSC.C[1])
assert.Equal(t, pS.Protocol, pSC.Protocol)
pSC2 := ProofStringToSmartContractFormat(pS)
assert.Equal(t, pSC, pSC2)
}

View File

@@ -2,20 +2,19 @@ package prover
import ( import (
"math/big" "math/big"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
cryptoConstants "github.com/iden3/go-iden3-crypto/constants" cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
) )
type TableG1 struct{ type tableG1 struct {
data []*bn256.G1 data []*bn256.G1
} }
func (t tableG1) getData() []*bn256.G1 {
func (t TableG1) GetData() []*bn256.G1 {
return t.data return t.data
} }
// Compute table of gsize elements as :: // Compute table of gsize elements as ::
// Table[0] = Inf // Table[0] = Inf
// Table[1] = a[0] // Table[1] = a[0]
@@ -23,77 +22,91 @@ func (t TableG1) GetData() []*bn256.G1 {
// Table[3] = a[0]+a[1] // Table[3] = a[0]+a[1]
// ..... // .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1] // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int){ func (t *tableG1) newTableG1(a []*bn256.G1, gsize int, toaffine bool) {
// EC table // EC table
table := make([]*bn256.G1, 0) table := make([]*bn256.G1, 0)
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
a_ext := make([]*bn256.G1, 0) aExt := make([]*bn256.G1, 0)
a_ext = append(a_ext, a...) aExt = append(aExt, a...)
for i := len(a); i < gsize; i++ { for i := len(a); i < gsize; i++ {
a_ext = append(a_ext,new(bn256.G1).ScalarBaseMult(big.NewInt(0))) aExt = append(aExt, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
} }
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0)) elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
table = append(table, elG1) table = append(table, elG1)
last_pow2 := 1 lastPow2 := 1
nelems := 0 nelems := 0
for i := 1; i < 1<<gsize; i++ { for i := 1; i < 1<<gsize; i++ {
elG1 := new(bn256.G1) elG1 := new(bn256.G1)
// if power of 2 // if power of 2
if i&(i-1) == 0 { if i&(i-1) == 0 {
last_pow2 = i lastPow2 = i
elG1.Set(a_ext[nelems]) elG1.Set(aExt[nelems])
nelems++ nelems++
} else { } else {
elG1.Add(table[last_pow2], table[i-last_pow2]) elG1.Add(table[lastPow2], table[i-lastPow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo // TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine() //table[i].MakeAffine()
} }
table = append(table, elG1) table = append(table, elG1)
} }
if toaffine {
for i := 0; i < len(table); i++ {
info := table[i].Marshal()
table[i].Unmarshal(info)
}
}
t.data = table t.data = table
} }
func (t tableG1) Marshal() []byte {
info := make([]byte, 0)
for _, el := range t.data {
info = append(info, el.Marshal()...)
}
return info
}
// Multiply scalar by precomputed table of G1 elements // Multiply scalar by precomputed table of G1 elements
func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func (t *tableG1) mulTableG1(k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < gsize; i++ { for i := len(k); i < gsize; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
} }
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0)) Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
msb := getMsb(k_ext) msb := getMsb(kExt)
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G1).Add(Q, Q) Q = new(bn256.G1).Add(Q, Q)
b := getBit(k_ext,i) b := getBit(kExt, i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b]) Q.Add(Q, t.data[b])
} }
} }
if Q_prev != nil { if qPrev != nil {
return Q.Add(Q,Q_prev) return Q.Add(Q, qPrev)
} else {
return Q
} }
return Q
} }
// Multiply scalar by precomputed table of G1 elements without intermediate doubling // Multiply scalar by precomputed table of G1 elements without intermediate doubling
func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := len(t) * gsize minNElems := len(t) * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
} }
// Init Adders // Init Adders
nbitsQ := cryptoConstants.Q.BitLen() nbitsQ := cryptoConstants.Q.BitLen()
@@ -105,10 +118,10 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int)
// Perform bitwise addition // Perform bitwise addition
for j := 0; j < len(t); j++ { for j := 0; j < len(t); j++ {
msb := getMsb(k_ext[j*gsize:(j+1)*gsize]) msb := getMsb(kExt[j*gsize : (j+1)*gsize])
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i) b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b]) Q[i].Add(Q[i], t[j].data[b])
@@ -124,45 +137,43 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int)
R.Add(R, Q[i-1]) R.Add(R, Q[i-1])
} }
if Q_prev != nil { if qPrev != nil {
return R.Add(R,Q_prev) return R.Add(R, qPrev)
} else {
return R
} }
return R
} }
// Compute tables within function. This solution should still be faster than std multiplication // Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7 // for gsize = 7
func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG1{} table := tableG1{}
Q := new(bn256.G1).ScalarBaseMult(new(big.Int)) Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table.NewTableG1( a[i*gsize:(i+1)*gsize], gsize) table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
} }
table.NewTableG1( a[(ntables-1)*gsize:], gsize) table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize) Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize)
if Q_prev != nil { if qPrev != nil {
return Q.Add(Q,Q_prev) return Q.Add(Q, qPrev)
} else {
return Q
} }
return Q
} }
// Multiply scalar by precomputed table of G1 elements without intermediate doubling // Multiply scalar by precomputed table of G1 elements without intermediate doubling
func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG1{} table := tableG1{}
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := ntables * gsize minNElems := ntables * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
} }
// Init Adders // Init Adders
nbitsQ := cryptoConstants.Q.BitLen() nbitsQ := cryptoConstants.Q.BitLen()
@@ -174,22 +185,22 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
// Perform bitwise addition // Perform bitwise addition
for j := 0; j < ntables-1; j++ { for j := 0; j < ntables-1; j++ {
table.NewTableG1( a[j*gsize:(j+1)*gsize], gsize) table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
msb := getMsb(k_ext[j*gsize:(j+1)*gsize]) msb := getMsb(kExt[j*gsize : (j+1)*gsize])
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i) b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b]) Q[i].Add(Q[i], table.data[b])
} }
} }
} }
table.NewTableG1( a[(ntables-1)*gsize:], gsize) table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
msb := getMsb(k_ext[(ntables-1)*gsize:]) msb := getMsb(kExt[(ntables-1)*gsize:])
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
b := getBit(k_ext[(ntables-1)*gsize:],i) b := getBit(kExt[(ntables-1)*gsize:], i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b]) Q[i].Add(Q[i], table.data[b])
@@ -203,11 +214,10 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
R = new(bn256.G1).Add(R, R) R = new(bn256.G1).Add(R, R)
R.Add(R, Q[i-1]) R.Add(R, Q[i-1])
} }
if Q_prev != nil { if qPrev != nil {
return R.Add(R,Q_prev) return R.Add(R, qPrev)
} else {
return R
} }
return R
} }
///// /////
@@ -215,16 +225,14 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
// TODO - How can avoid replicating code in G2? // TODO - How can avoid replicating code in G2?
//G2 //G2
type TableG2 struct{ type tableG2 struct {
data []*bn256.G2 data []*bn256.G2
} }
func (t tableG2) getData() []*bn256.G2 {
func (t TableG2) GetData() []*bn256.G2 {
return t.data return t.data
} }
// Compute table of gsize elements as :: // Compute table of gsize elements as ::
// Table[0] = Inf // Table[0] = Inf
// Table[1] = a[0] // Table[1] = a[0]
@@ -232,77 +240,92 @@ func (t TableG2) GetData() []*bn256.G2 {
// Table[3] = a[0]+a[1] // Table[3] = a[0]+a[1]
// ..... // .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1] // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int){ // TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) {
// EC table // EC table
table := make([]*bn256.G2, 0) table := make([]*bn256.G2, 0)
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
a_ext := make([]*bn256.G2, 0) aExt := make([]*bn256.G2, 0)
a_ext = append(a_ext, a...) aExt = append(aExt, a...)
for i := len(a); i < gsize; i++ { for i := len(a); i < gsize; i++ {
a_ext = append(a_ext,new(bn256.G2).ScalarBaseMult(big.NewInt(0))) aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
} }
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
table = append(table, elG2) table = append(table, elG2)
last_pow2 := 1 lastPow2 := 1
nelems := 0 nelems := 0
for i := 1; i < 1<<gsize; i++ { for i := 1; i < 1<<gsize; i++ {
elG2 := new(bn256.G2) elG2 := new(bn256.G2)
// if power of 2 // if power of 2
if i&(i-1) == 0 { if i&(i-1) == 0 {
last_pow2 = i lastPow2 = i
elG2.Set(a_ext[nelems]) elG2.Set(aExt[nelems])
nelems++ nelems++
} else { } else {
elG2.Add(table[last_pow2], table[i-last_pow2]) elG2.Add(table[lastPow2], table[i-lastPow2])
// TODO bn256 doesn't export MakeAffine function. We need to fork repo // TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine() //table[i].MakeAffine()
} }
table = append(table, elG2) table = append(table, elG2)
} }
if toaffine {
for i := 0; i < len(table); i++ {
info := table[i].Marshal()
table[i].Unmarshal(info)
}
}
t.data = table t.data = table
} }
func (t tableG2) Marshal() []byte {
info := make([]byte, 0)
for _, el := range t.data {
info = append(info, el.Marshal()...)
}
return info
}
// Multiply scalar by precomputed table of G2 elements // Multiply scalar by precomputed table of G2 elements
func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func (t *tableG2) mulTableG2(k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < gsize; i++ { for i := len(k); i < gsize; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
} }
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
msb := getMsb(k_ext) msb := getMsb(kExt)
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new(bn256.G2).Add(Q, Q) Q = new(bn256.G2).Add(Q, Q)
b := getBit(k_ext,i) b := getBit(kExt, i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q.Add(Q, t.data[b]) Q.Add(Q, t.data[b])
} }
} }
if Q_prev != nil { if qPrev != nil {
return Q.Add(Q, Q_prev) return Q.Add(Q, qPrev)
} else {
return Q
} }
return Q
} }
// Multiply scalar by precomputed table of G2 elements without intermediate doubling // Multiply scalar by precomputed table of G2 elements without intermediate doubling
func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := len(t) * gsize minNElems := len(t) * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
} }
// Init Adders // Init Adders
nbitsQ := cryptoConstants.Q.BitLen() nbitsQ := cryptoConstants.Q.BitLen()
@@ -314,10 +337,10 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int)
// Perform bitwise addition // Perform bitwise addition
for j := 0; j < len(t); j++ { for j := 0; j < len(t); j++ {
msb := getMsb(k_ext[j*gsize:(j+1)*gsize]) msb := getMsb(kExt[j*gsize : (j+1)*gsize])
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i) b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], t[j].data[b]) Q[i].Add(Q[i], t[j].data[b])
@@ -332,45 +355,43 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int)
R = new(bn256.G2).Add(R, R) R = new(bn256.G2).Add(R, R)
R.Add(R, Q[i-1]) R.Add(R, Q[i-1])
} }
if Q_prev != nil { if qPrev != nil {
return R.Add(R,Q_prev) return R.Add(R, qPrev)
} else {
return R
} }
return R
} }
// Compute tables within function. This solution should still be faster than std multiplication // Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7 // for gsize = 7
func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG2{} table := tableG2{}
Q := new(bn256.G2).ScalarBaseMult(new(big.Int)) Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table.NewTableG2( a[i*gsize:(i+1)*gsize], gsize) table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
} }
table.NewTableG2( a[(ntables-1)*gsize:], gsize) table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize) Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize)
if Q_prev != nil { if qPrev != nil {
return Q.Add(Q,Q_prev) return Q.Add(Q, qPrev)
} else {
return Q
} }
return Q
} }
// Multiply scalar by precomputed table of G2 elements without intermediate doubling // Multiply scalar by precomputed table of G2 elements without intermediate doubling
func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
ntables := int((len(a) + gsize - 1) / gsize) ntables := int((len(a) + gsize - 1) / gsize)
table := TableG2{} table := tableG2{}
// We need at least gsize elements. If not enough, fill with 0 // We need at least gsize elements. If not enough, fill with 0
min_nelems := ntables * gsize minNElems := ntables * gsize
k_ext := make([]*big.Int, 0) kExt := make([]*big.Int, 0)
k_ext = append(k_ext, k...) kExt = append(kExt, k...)
for i := len(k); i < min_nelems; i++ { for i := len(k); i < minNElems; i++ {
k_ext = append(k_ext,new(big.Int).SetUint64(0)) kExt = append(kExt, new(big.Int).SetUint64(0))
} }
// Init Adders // Init Adders
nbitsQ := cryptoConstants.Q.BitLen() nbitsQ := cryptoConstants.Q.BitLen()
@@ -382,22 +403,22 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i
// Perform bitwise addition // Perform bitwise addition
for j := 0; j < ntables-1; j++ { for j := 0; j < ntables-1; j++ {
table.NewTableG2( a[j*gsize:(j+1)*gsize], gsize) table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
msb := getMsb(k_ext[j*gsize:(j+1)*gsize]) msb := getMsb(kExt[j*gsize : (j+1)*gsize])
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
b := getBit(k_ext[j*gsize:(j+1)*gsize],i) b := getBit(kExt[j*gsize:(j+1)*gsize], i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b]) Q[i].Add(Q[i], table.data[b])
} }
} }
} }
table.NewTableG2( a[(ntables-1)*gsize:], gsize) table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
msb := getMsb(k_ext[(ntables-1)*gsize:]) msb := getMsb(kExt[(ntables-1)*gsize:])
for i := msb - 1; i >= 0; i-- { for i := msb - 1; i >= 0; i-- {
b := getBit(k_ext[(ntables-1)*gsize:],i) b := getBit(kExt[(ntables-1)*gsize:], i)
if b != 0 { if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q[i].Add(Q[i], table.data[b]) Q[i].Add(Q[i], table.data[b])
@@ -411,21 +432,20 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i
R = new(bn256.G2).Add(R, R) R = new(bn256.G2).Add(R, R)
R.Add(R, Q[i-1]) R.Add(R, Q[i-1])
} }
if Q_prev != nil { if qPrev != nil {
return R.Add(R,Q_prev) return R.Add(R, qPrev)
} else {
return R
} }
return R
} }
// Return most significant bit position in a group of Big Integers // Return most significant bit position in a group of Big Integers
func getMsb(k []*big.Int) int { func getMsb(k []*big.Int) int {
msb := 0 msb := 0
for _, el := range(k){ for _, el := range k {
tmp_msb := el.BitLen() tmpMsb := el.BitLen()
if tmp_msb > msb { if tmpMsb > msb {
msb = tmp_msb msb = tmpMsb
} }
} }
return msb return msb
@@ -433,11 +453,11 @@ func getMsb(k []*big.Int) int{
// Return ith bit in group of Big Integers // Return ith bit in group of Big Integers
func getBit(k []*big.Int, i int) uint { func getBit(k []*big.Int, i int) uint {
table_idx := uint(0) tableIdx := uint(0)
for idx, el := range(k){ for idx, el := range k {
b := el.Bit(i) b := el.Bit(i)
table_idx += (b << idx) tableIdx += (b << idx)
} }
return table_idx return tableIdx
} }

View File

@@ -1,13 +1,14 @@
package prover package prover
import ( import (
"bytes"
"crypto/rand" "crypto/rand"
"fmt"
"math/big" "math/big"
"testing" "testing"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
"time" "time"
"bytes"
"fmt" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
) )
const ( const (
@@ -43,8 +44,6 @@ func randomG2Array(n int) []*bn256.G2 {
return arrayG2 return arrayG2
} }
func TestTableG1(t *testing.T) { func TestTableG1(t *testing.T) {
n := N1 n := N1
@@ -62,34 +61,33 @@ func TestTableG1(t *testing.T){
for gsize := 2; gsize < 10; gsize++ { for gsize := 2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize) ntables := int((n + gsize - 1) / gsize)
table := make([]TableG1, ntables) table := make([]tableG1, ntables)
for i := 0; i < ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table[i].NewTableG1( arrayG1[i*gsize:(i+1)*gsize], gsize) table[i].newTableG1(arrayG1[i*gsize:(i+1)*gsize], gsize, true)
} }
table[ntables-1].NewTableG1( arrayG1[(ntables-1)*gsize:], gsize) table[ntables-1].newTableG1(arrayG1[(ntables-1)*gsize:], gsize, true)
beforeT = time.Now() beforeT = time.Now()
Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int)) Q2 := new(bn256.G1).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ { for i := 0; i < ntables-1; i++ {
Q2 = table[i].MulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) Q2 = table[i].mulTableG1(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
} }
Q2 = table[ntables-1].MulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize) Q2 = table[ntables-1].mulTableG1(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q3 := ScalarMultG1(arrayG1, arrayW, nil, gsize) Q3 := scalarMultG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q4 := MulTableNoDoubleG1(table, arrayW, nil, gsize) Q4 := mulTableNoDoubleG1(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q5 := ScalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize) Q5 := scalarMultNoDoubleG1(arrayG1, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 { if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
t.Error("Error in TMult") t.Error("Error in TMult")
} }
@@ -122,34 +120,33 @@ func TestTableG2(t *testing.T){
for gsize := 2; gsize < 10; gsize++ { for gsize := 2; gsize < 10; gsize++ {
ntables := int((n + gsize - 1) / gsize) ntables := int((n + gsize - 1) / gsize)
table := make([]TableG2, ntables) table := make([]tableG2, ntables)
for i := 0; i < ntables-1; i++ { for i := 0; i < ntables-1; i++ {
table[i].NewTableG2( arrayG2[i*gsize:(i+1)*gsize], gsize) table[i].newTableG2(arrayG2[i*gsize:(i+1)*gsize], gsize, false)
} }
table[ntables-1].NewTableG2( arrayG2[(ntables-1)*gsize:], gsize) table[ntables-1].newTableG2(arrayG2[(ntables-1)*gsize:], gsize, false)
beforeT = time.Now() beforeT = time.Now()
Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int)) Q2 := new(bn256.G2).ScalarBaseMult(new(big.Int))
for i := 0; i < ntables-1; i++ { for i := 0; i < ntables-1; i++ {
Q2 =table[i].MulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize) Q2 = table[i].mulTableG2(arrayW[i*gsize:(i+1)*gsize], Q2, gsize)
} }
Q2 = table[ntables-1].MulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize) Q2 = table[ntables-1].mulTableG2(arrayW[(ntables-1)*gsize:], Q2, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMult time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q3 := ScalarMultG2(arrayG2, arrayW, nil, gsize) Q3 := scalarMultG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMult time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q4 := MulTableNoDoubleG2(table, arrayW, nil, gsize) Q4 := mulTableNoDoubleG2(table, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed: %s\n", gsize, time.Since(beforeT))
beforeT = time.Now() beforeT = time.Now()
Q5 := ScalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize) Q5 := scalarMultNoDoubleG2(arrayG2, arrayW, nil, gsize)
fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT)) fmt.Printf("Gsize : %d, TMultNoDouble time elapsed (inc table comp): %s\n", gsize, time.Since(beforeT))
if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 { if bytes.Compare(Q1.Marshal(), Q2.Marshal()) != 0 {
t.Error("Error in TMult") t.Error("Error in TMult")
} }

View File

@@ -87,25 +87,25 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
for _cpu, _ranges := range ranges(pk.NVars, numcpu) { for _cpu, _ranges := range ranges(pk.NVars, numcpu) {
// split 1 // split 1
go func(cpu int, ranges [2]int) { go func(cpu int, ranges [2]int) {
proofA[cpu] = ScalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]], proofA[cpu] = scalarMultNoDoubleG1(pk.A[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]], w[ranges[0]:ranges[1]],
proofA[cpu], proofA[cpu],
gsize) gsize)
proofB[cpu] = ScalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]], proofB[cpu] = scalarMultNoDoubleG2(pk.B2[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]], w[ranges[0]:ranges[1]],
proofB[cpu], proofB[cpu],
gsize) gsize)
proofBG1[cpu] = ScalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]], proofBG1[cpu] = scalarMultNoDoubleG1(pk.B1[ranges[0]:ranges[1]],
w[ranges[0]:ranges[1]], w[ranges[0]:ranges[1]],
proofBG1[cpu], proofBG1[cpu],
gsize) gsize)
min_lim := pk.NPublic+1 minLim := pk.NPublic + 1
if ranges[0] > pk.NPublic+1 { if ranges[0] > pk.NPublic+1 {
min_lim = ranges[0] minLim = ranges[0]
} }
if ranges[1] > pk.NPublic+1 { if ranges[1] > pk.NPublic+1 {
proofC[cpu] = ScalarMultNoDoubleG1(pk.C[min_lim:ranges[1]], proofC[cpu] = scalarMultNoDoubleG1(pk.C[minLim:ranges[1]],
w[min_lim:ranges[1]], w[minLim:ranges[1]],
proofC[cpu], proofC[cpu],
gsize) gsize)
} }
@@ -142,7 +142,7 @@ func GenerateProof(pk *types.Pk, w types.Witness) (*types.Proof, []*big.Int, err
for _cpu, _ranges := range ranges(len(h), numcpu) { for _cpu, _ranges := range ranges(len(h), numcpu) {
// split 2 // split 2
go func(cpu int, ranges [2]int) { go func(cpu int, ranges [2]int) {
proofC[cpu] = ScalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]], proofC[cpu] = scalarMultNoDoubleG1(pk.HExps[ranges[0]:ranges[1]],
h[ranges[0]:ranges[1]], h[ranges[0]:ranges[1]],
proofC[cpu], proofC[cpu],
gsize) gsize)

View File

@@ -16,7 +16,7 @@ There may be some concern on the additional size of the tables since they need t
| Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 | | Algorithm (G1) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|---|---|---|---|---|---|---|---|---| |---|---|---|--|---|---|---|---|---|
| Naive | 6.63s | - | - | - | - | - | - | - | | Naive | 6.63s | - | - | - | - | - | - | - |
| Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | | Strauss | 13.16s | 9.03s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s |
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s | | Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
@@ -26,7 +26,7 @@ There may be some concern on the additional size of the tables since they need t
There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve). There are 5000 G2 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
| Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 | | Algorithm (G2) | GS 2 | GS 3 | GS 4 | GS 5 | GS 6 | GS 7 | GS 8 | GS 9 |
|---|---|---|---|---|---|---|---|---| |---|---|---|--|---|---|---|---|---|
| Naive | 3.55s | | | | | | | | | Naive | 3.55s | | | | | | | |
| Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms | | Strauss | 3.55s | 2.54s | 1.96s | 1.58s | 1.38s | 1.20s | 1.03s | 937ms |
| Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s | | Strauss + Table Computation | 3.59s | 2.58s | 2.04s | 1.71s | 1.51s | 1.46s | 1.51s | 1.82s |

View File

@@ -1,25 +0,0 @@
# Tables Pre-calculation
The most time consuming part of a ZKSnark proof calculation is the scalar multiplication of elliptic curve points. Direct mechanism accumulates each multiplication. However, prover only needs the total accumulation.
There are two potential improvements to the naive approach:
1. Apply Strauss-Shamir method (https://stackoverflow.com/questions/50993471/ec-scalar-multiplication-with-strauss-shamir-method).
2. Leave the doubling operation for the last step
Both options can be combined.
In the following table, we show the results of using the naive method, Srauss-Shamir and Strauss-Shamir + No doubling. These last two options are repeated for different table grouping order.
There are 5000 G1 Elliptical Curve Points, and the scalars are 254 bits (BN256 curve).
There may be some concern on the additional size of the tables since they need to be loaded into a smartphone during the proof, and the time required to load these tables may exceed the benefits. If this is a problem, another althernative is to compute the tables during the proof itself. Depending on the Group Size, timing may be better than the naive approach.
| Algorithm | GS / Time |
|---|---|---|
| Naive | 6.63s | | | | | | | |
| Strauss | 13.16s | 9.033s | 6.95s | 5.61s | 4.91s | 4.26s | 3.88s | 3.54 s | 1.44 s |
| Strauss + Table Computation | 16.13s | 11.32s | 8.47s | 7.10s | 6.2s | 5.94s | 6.01s | 6.69s |
| No Doubling | 3.74s | 3.00s | 2.38s | 1.96s | 1.79s | 1.54s | 1.50s | 1.44s|
| No Doubling + Table Computation | 6.83s | 5.1s | 4.16s | 3.52s| 3.22s | 3.21s | 3.57s | 4.56s |