|
|
@ -1,16 +1,17 @@ |
|
|
|
package prover |
|
|
|
|
|
|
|
import ( |
|
|
|
"math/big" |
|
|
|
|
|
|
|
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" |
|
|
|
cryptoConstants "github.com/iden3/go-iden3-crypto/constants" |
|
|
|
"math/big" |
|
|
|
) |
|
|
|
|
|
|
|
type TableG1 struct { |
|
|
|
type tableG1 struct { |
|
|
|
data []*bn256.G1 |
|
|
|
} |
|
|
|
|
|
|
|
func (t TableG1) GetData() []*bn256.G1 { |
|
|
|
func (t tableG1) getData() []*bn256.G1 { |
|
|
|
return t.data |
|
|
|
} |
|
|
|
|
|
|
@ -21,31 +22,31 @@ func (t TableG1) GetData() []*bn256.G1 { |
|
|
|
// Table[3] = a[0]+a[1]
|
|
|
|
// .....
|
|
|
|
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
|
|
|
func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int, toaffine bool) { |
|
|
|
func (t *tableG1) newTableG1(a []*bn256.G1, gsize int, toaffine bool) { |
|
|
|
// EC table
|
|
|
|
table := make([]*bn256.G1, 0) |
|
|
|
|
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
a_ext := make([]*bn256.G1, 0) |
|
|
|
a_ext = append(a_ext, a...) |
|
|
|
aExt := make([]*bn256.G1, 0) |
|
|
|
aExt = append(aExt, a...) |
|
|
|
|
|
|
|
for i := len(a); i < gsize; i++ { |
|
|
|
a_ext = append(a_ext, new(bn256.G1).ScalarBaseMult(big.NewInt(0))) |
|
|
|
aExt = append(aExt, new(bn256.G1).ScalarBaseMult(big.NewInt(0))) |
|
|
|
} |
|
|
|
|
|
|
|
elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0)) |
|
|
|
table = append(table, elG1) |
|
|
|
last_pow2 := 1 |
|
|
|
lastPow2 := 1 |
|
|
|
nelems := 0 |
|
|
|
for i := 1; i < 1<<gsize; i++ { |
|
|
|
elG1 := new(bn256.G1) |
|
|
|
// if power of 2
|
|
|
|
if i&(i-1) == 0 { |
|
|
|
last_pow2 = i |
|
|
|
elG1.Set(a_ext[nelems]) |
|
|
|
lastPow2 = i |
|
|
|
elG1.Set(aExt[nelems]) |
|
|
|
nelems++ |
|
|
|
} else { |
|
|
|
elG1.Add(table[last_pow2], table[i-last_pow2]) |
|
|
|
elG1.Add(table[lastPow2], table[i-lastPow2]) |
|
|
|
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
|
|
|
//table[i].MakeAffine()
|
|
|
|
} |
|
|
@ -60,7 +61,7 @@ func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int, toaffine bool) { |
|
|
|
t.data = table |
|
|
|
} |
|
|
|
|
|
|
|
func (t TableG1) Marshal() []byte { |
|
|
|
func (t tableG1) Marshal() []byte { |
|
|
|
info := make([]byte, 0) |
|
|
|
for _, el := range t.data { |
|
|
|
info = append(info, el.Marshal()...) |
|
|
@ -70,43 +71,42 @@ func (t TableG1) Marshal() []byte { |
|
|
|
} |
|
|
|
|
|
|
|
// Multiply scalar by precomputed table of G1 elements
|
|
|
|
func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
func (t *tableG1) mulTableG1(k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
k_ext := make([]*big.Int, 0) |
|
|
|
k_ext = append(k_ext, k...) |
|
|
|
kExt := make([]*big.Int, 0) |
|
|
|
kExt = append(kExt, k...) |
|
|
|
|
|
|
|
for i := len(k); i < gsize; i++ { |
|
|
|
k_ext = append(k_ext, new(big.Int).SetUint64(0)) |
|
|
|
kExt = append(kExt, new(big.Int).SetUint64(0)) |
|
|
|
} |
|
|
|
|
|
|
|
Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0)) |
|
|
|
|
|
|
|
msb := getMsb(k_ext) |
|
|
|
msb := getMsb(kExt) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
|
|
|
Q = new(bn256.G1).Add(Q, Q) |
|
|
|
b := getBit(k_ext, i) |
|
|
|
b := getBit(kExt, i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q.Add(Q, t.data[b]) |
|
|
|
} |
|
|
|
} |
|
|
|
if Q_prev != nil { |
|
|
|
return Q.Add(Q, Q_prev) |
|
|
|
} else { |
|
|
|
return Q |
|
|
|
if qPrev != nil { |
|
|
|
return Q.Add(Q, qPrev) |
|
|
|
} |
|
|
|
return Q |
|
|
|
} |
|
|
|
|
|
|
|
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
|
|
|
func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
min_nelems := len(t) * gsize |
|
|
|
k_ext := make([]*big.Int, 0) |
|
|
|
k_ext = append(k_ext, k...) |
|
|
|
for i := len(k); i < min_nelems; i++ { |
|
|
|
k_ext = append(k_ext, new(big.Int).SetUint64(0)) |
|
|
|
minNElems := len(t) * gsize |
|
|
|
kExt := make([]*big.Int, 0) |
|
|
|
kExt = append(kExt, k...) |
|
|
|
for i := len(k); i < minNElems; i++ { |
|
|
|
kExt = append(kExt, new(big.Int).SetUint64(0)) |
|
|
|
} |
|
|
|
// Init Adders
|
|
|
|
nbitsQ := cryptoConstants.Q.BitLen() |
|
|
@ -118,10 +118,10 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) |
|
|
|
|
|
|
|
// Perform bitwise addition
|
|
|
|
for j := 0; j < len(t); j++ { |
|
|
|
msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) |
|
|
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize]) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
b := getBit(k_ext[j*gsize:(j+1)*gsize], i) |
|
|
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q[i].Add(Q[i], t[j].data[b]) |
|
|
@ -137,45 +137,43 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) |
|
|
|
R.Add(R, Q[i-1]) |
|
|
|
} |
|
|
|
|
|
|
|
if Q_prev != nil { |
|
|
|
return R.Add(R, Q_prev) |
|
|
|
} else { |
|
|
|
return R |
|
|
|
if qPrev != nil { |
|
|
|
return R.Add(R, qPrev) |
|
|
|
} |
|
|
|
return R |
|
|
|
} |
|
|
|
|
|
|
|
// Compute tables within function. This solution should still be faster than std multiplication
|
|
|
|
// for gsize = 7
|
|
|
|
func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
ntables := int((len(a) + gsize - 1) / gsize) |
|
|
|
table := TableG1{} |
|
|
|
table := tableG1{} |
|
|
|
Q := new(bn256.G1).ScalarBaseMult(new(big.Int)) |
|
|
|
|
|
|
|
for i := 0; i < ntables-1; i++ { |
|
|
|
table.NewTableG1(a[i*gsize:(i+1)*gsize], gsize, false) |
|
|
|
Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) |
|
|
|
table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false) |
|
|
|
Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize) |
|
|
|
} |
|
|
|
table.NewTableG1(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize) |
|
|
|
table.newTableG1(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize) |
|
|
|
|
|
|
|
if Q_prev != nil { |
|
|
|
return Q.Add(Q, Q_prev) |
|
|
|
} else { |
|
|
|
return Q |
|
|
|
if qPrev != nil { |
|
|
|
return Q.Add(Q, qPrev) |
|
|
|
} |
|
|
|
return Q |
|
|
|
} |
|
|
|
|
|
|
|
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
|
|
|
|
func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 { |
|
|
|
ntables := int((len(a) + gsize - 1) / gsize) |
|
|
|
table := TableG1{} |
|
|
|
table := tableG1{} |
|
|
|
|
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
min_nelems := ntables * gsize |
|
|
|
k_ext := make([]*big.Int, 0) |
|
|
|
k_ext = append(k_ext, k...) |
|
|
|
for i := len(k); i < min_nelems; i++ { |
|
|
|
k_ext = append(k_ext, new(big.Int).SetUint64(0)) |
|
|
|
minNElems := ntables * gsize |
|
|
|
kExt := make([]*big.Int, 0) |
|
|
|
kExt = append(kExt, k...) |
|
|
|
for i := len(k); i < minNElems; i++ { |
|
|
|
kExt = append(kExt, new(big.Int).SetUint64(0)) |
|
|
|
} |
|
|
|
// Init Adders
|
|
|
|
nbitsQ := cryptoConstants.Q.BitLen() |
|
|
@ -187,22 +185,22 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i |
|
|
|
|
|
|
|
// Perform bitwise addition
|
|
|
|
for j := 0; j < ntables-1; j++ { |
|
|
|
table.NewTableG1(a[j*gsize:(j+1)*gsize], gsize, false) |
|
|
|
msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) |
|
|
|
table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false) |
|
|
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize]) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
b := getBit(k_ext[j*gsize:(j+1)*gsize], i) |
|
|
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q[i].Add(Q[i], table.data[b]) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
table.NewTableG1(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
msb := getMsb(k_ext[(ntables-1)*gsize:]) |
|
|
|
table.newTableG1(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
msb := getMsb(kExt[(ntables-1)*gsize:]) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
b := getBit(k_ext[(ntables-1)*gsize:], i) |
|
|
|
b := getBit(kExt[(ntables-1)*gsize:], i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q[i].Add(Q[i], table.data[b]) |
|
|
@ -216,11 +214,10 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i |
|
|
|
R = new(bn256.G1).Add(R, R) |
|
|
|
R.Add(R, Q[i-1]) |
|
|
|
} |
|
|
|
if Q_prev != nil { |
|
|
|
return R.Add(R, Q_prev) |
|
|
|
} else { |
|
|
|
return R |
|
|
|
if qPrev != nil { |
|
|
|
return R.Add(R, qPrev) |
|
|
|
} |
|
|
|
return R |
|
|
|
} |
|
|
|
|
|
|
|
/////
|
|
|
@ -228,11 +225,11 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i |
|
|
|
// TODO - How can avoid replicating code in G2?
|
|
|
|
//G2
|
|
|
|
|
|
|
|
type TableG2 struct { |
|
|
|
type tableG2 struct { |
|
|
|
data []*bn256.G2 |
|
|
|
} |
|
|
|
|
|
|
|
func (t TableG2) GetData() []*bn256.G2 { |
|
|
|
func (t tableG2) getData() []*bn256.G2 { |
|
|
|
return t.data |
|
|
|
} |
|
|
|
|
|
|
@ -244,31 +241,31 @@ func (t TableG2) GetData() []*bn256.G2 { |
|
|
|
// .....
|
|
|
|
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
|
|
|
|
// TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
|
|
|
|
func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) { |
|
|
|
func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) { |
|
|
|
// EC table
|
|
|
|
table := make([]*bn256.G2, 0) |
|
|
|
|
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
a_ext := make([]*bn256.G2, 0) |
|
|
|
a_ext = append(a_ext, a...) |
|
|
|
aExt := make([]*bn256.G2, 0) |
|
|
|
aExt = append(aExt, a...) |
|
|
|
|
|
|
|
for i := len(a); i < gsize; i++ { |
|
|
|
a_ext = append(a_ext, new(bn256.G2).ScalarBaseMult(big.NewInt(0))) |
|
|
|
aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0))) |
|
|
|
} |
|
|
|
|
|
|
|
elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) |
|
|
|
table = append(table, elG2) |
|
|
|
last_pow2 := 1 |
|
|
|
lastPow2 := 1 |
|
|
|
nelems := 0 |
|
|
|
for i := 1; i < 1<<gsize; i++ { |
|
|
|
elG2 := new(bn256.G2) |
|
|
|
// if power of 2
|
|
|
|
if i&(i-1) == 0 { |
|
|
|
last_pow2 = i |
|
|
|
elG2.Set(a_ext[nelems]) |
|
|
|
lastPow2 = i |
|
|
|
elG2.Set(aExt[nelems]) |
|
|
|
nelems++ |
|
|
|
} else { |
|
|
|
elG2.Add(table[last_pow2], table[i-last_pow2]) |
|
|
|
elG2.Add(table[lastPow2], table[i-lastPow2]) |
|
|
|
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
|
|
|
|
//table[i].MakeAffine()
|
|
|
|
} |
|
|
@ -283,7 +280,7 @@ func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) { |
|
|
|
t.data = table |
|
|
|
} |
|
|
|
|
|
|
|
func (t TableG2) Marshal() []byte { |
|
|
|
func (t tableG2) Marshal() []byte { |
|
|
|
info := make([]byte, 0) |
|
|
|
for _, el := range t.data { |
|
|
|
info = append(info, el.Marshal()...) |
|
|
@ -293,43 +290,42 @@ func (t TableG2) Marshal() []byte { |
|
|
|
} |
|
|
|
|
|
|
|
// Multiply scalar by precomputed table of G2 elements
|
|
|
|
func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
func (t *tableG2) mulTableG2(k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
k_ext := make([]*big.Int, 0) |
|
|
|
k_ext = append(k_ext, k...) |
|
|
|
kExt := make([]*big.Int, 0) |
|
|
|
kExt = append(kExt, k...) |
|
|
|
|
|
|
|
for i := len(k); i < gsize; i++ { |
|
|
|
k_ext = append(k_ext, new(big.Int).SetUint64(0)) |
|
|
|
kExt = append(kExt, new(big.Int).SetUint64(0)) |
|
|
|
} |
|
|
|
|
|
|
|
Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0)) |
|
|
|
|
|
|
|
msb := getMsb(k_ext) |
|
|
|
msb := getMsb(kExt) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
|
|
|
|
Q = new(bn256.G2).Add(Q, Q) |
|
|
|
b := getBit(k_ext, i) |
|
|
|
b := getBit(kExt, i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q.Add(Q, t.data[b]) |
|
|
|
} |
|
|
|
} |
|
|
|
if Q_prev != nil { |
|
|
|
return Q.Add(Q, Q_prev) |
|
|
|
} else { |
|
|
|
return Q |
|
|
|
if qPrev != nil { |
|
|
|
return Q.Add(Q, qPrev) |
|
|
|
} |
|
|
|
return Q |
|
|
|
} |
|
|
|
|
|
|
|
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
|
|
|
func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
min_nelems := len(t) * gsize |
|
|
|
k_ext := make([]*big.Int, 0) |
|
|
|
k_ext = append(k_ext, k...) |
|
|
|
for i := len(k); i < min_nelems; i++ { |
|
|
|
k_ext = append(k_ext, new(big.Int).SetUint64(0)) |
|
|
|
minNElems := len(t) * gsize |
|
|
|
kExt := make([]*big.Int, 0) |
|
|
|
kExt = append(kExt, k...) |
|
|
|
for i := len(k); i < minNElems; i++ { |
|
|
|
kExt = append(kExt, new(big.Int).SetUint64(0)) |
|
|
|
} |
|
|
|
// Init Adders
|
|
|
|
nbitsQ := cryptoConstants.Q.BitLen() |
|
|
@ -341,10 +337,10 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) |
|
|
|
|
|
|
|
// Perform bitwise addition
|
|
|
|
for j := 0; j < len(t); j++ { |
|
|
|
msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) |
|
|
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize]) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
b := getBit(k_ext[j*gsize:(j+1)*gsize], i) |
|
|
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q[i].Add(Q[i], t[j].data[b]) |
|
|
@ -359,45 +355,43 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) |
|
|
|
R = new(bn256.G2).Add(R, R) |
|
|
|
R.Add(R, Q[i-1]) |
|
|
|
} |
|
|
|
if Q_prev != nil { |
|
|
|
return R.Add(R, Q_prev) |
|
|
|
} else { |
|
|
|
return R |
|
|
|
if qPrev != nil { |
|
|
|
return R.Add(R, qPrev) |
|
|
|
} |
|
|
|
return R |
|
|
|
} |
|
|
|
|
|
|
|
// Compute tables within function. This solution should still be faster than std multiplication
|
|
|
|
// for gsize = 7
|
|
|
|
func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
ntables := int((len(a) + gsize - 1) / gsize) |
|
|
|
table := TableG2{} |
|
|
|
table := tableG2{} |
|
|
|
Q := new(bn256.G2).ScalarBaseMult(new(big.Int)) |
|
|
|
|
|
|
|
for i := 0; i < ntables-1; i++ { |
|
|
|
table.NewTableG2(a[i*gsize:(i+1)*gsize], gsize, false) |
|
|
|
Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) |
|
|
|
table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false) |
|
|
|
Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize) |
|
|
|
} |
|
|
|
table.NewTableG2(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize) |
|
|
|
table.newTableG2(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize) |
|
|
|
|
|
|
|
if Q_prev != nil { |
|
|
|
return Q.Add(Q, Q_prev) |
|
|
|
} else { |
|
|
|
return Q |
|
|
|
if qPrev != nil { |
|
|
|
return Q.Add(Q, qPrev) |
|
|
|
} |
|
|
|
return Q |
|
|
|
} |
|
|
|
|
|
|
|
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
|
|
|
|
func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 { |
|
|
|
ntables := int((len(a) + gsize - 1) / gsize) |
|
|
|
table := TableG2{} |
|
|
|
table := tableG2{} |
|
|
|
|
|
|
|
// We need at least gsize elements. If not enough, fill with 0
|
|
|
|
min_nelems := ntables * gsize |
|
|
|
k_ext := make([]*big.Int, 0) |
|
|
|
k_ext = append(k_ext, k...) |
|
|
|
for i := len(k); i < min_nelems; i++ { |
|
|
|
k_ext = append(k_ext, new(big.Int).SetUint64(0)) |
|
|
|
minNElems := ntables * gsize |
|
|
|
kExt := make([]*big.Int, 0) |
|
|
|
kExt = append(kExt, k...) |
|
|
|
for i := len(k); i < minNElems; i++ { |
|
|
|
kExt = append(kExt, new(big.Int).SetUint64(0)) |
|
|
|
} |
|
|
|
// Init Adders
|
|
|
|
nbitsQ := cryptoConstants.Q.BitLen() |
|
|
@ -409,22 +403,22 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i |
|
|
|
|
|
|
|
// Perform bitwise addition
|
|
|
|
for j := 0; j < ntables-1; j++ { |
|
|
|
table.NewTableG2(a[j*gsize:(j+1)*gsize], gsize, false) |
|
|
|
msb := getMsb(k_ext[j*gsize : (j+1)*gsize]) |
|
|
|
table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false) |
|
|
|
msb := getMsb(kExt[j*gsize : (j+1)*gsize]) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
b := getBit(k_ext[j*gsize:(j+1)*gsize], i) |
|
|
|
b := getBit(kExt[j*gsize:(j+1)*gsize], i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q[i].Add(Q[i], table.data[b]) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
table.NewTableG2(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
msb := getMsb(k_ext[(ntables-1)*gsize:]) |
|
|
|
table.newTableG2(a[(ntables-1)*gsize:], gsize, false) |
|
|
|
msb := getMsb(kExt[(ntables-1)*gsize:]) |
|
|
|
|
|
|
|
for i := msb - 1; i >= 0; i-- { |
|
|
|
b := getBit(k_ext[(ntables-1)*gsize:], i) |
|
|
|
b := getBit(kExt[(ntables-1)*gsize:], i) |
|
|
|
if b != 0 { |
|
|
|
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
|
|
|
|
Q[i].Add(Q[i], table.data[b]) |
|
|
@ -438,11 +432,10 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i |
|
|
|
R = new(bn256.G2).Add(R, R) |
|
|
|
R.Add(R, Q[i-1]) |
|
|
|
} |
|
|
|
if Q_prev != nil { |
|
|
|
return R.Add(R, Q_prev) |
|
|
|
} else { |
|
|
|
return R |
|
|
|
if qPrev != nil { |
|
|
|
return R.Add(R, qPrev) |
|
|
|
} |
|
|
|
return R |
|
|
|
} |
|
|
|
|
|
|
|
// Return most significant bit position in a group of Big Integers
|
|
|
@ -450,9 +443,9 @@ func getMsb(k []*big.Int) int { |
|
|
|
msb := 0 |
|
|
|
|
|
|
|
for _, el := range k { |
|
|
|
tmp_msb := el.BitLen() |
|
|
|
if tmp_msb > msb { |
|
|
|
msb = tmp_msb |
|
|
|
tmpMsb := el.BitLen() |
|
|
|
if tmpMsb > msb { |
|
|
|
msb = tmpMsb |
|
|
|
} |
|
|
|
} |
|
|
|
return msb |
|
|
@ -460,11 +453,11 @@ func getMsb(k []*big.Int) int { |
|
|
|
|
|
|
|
// Return ith bit in group of Big Integers
|
|
|
|
func getBit(k []*big.Int, i int) uint { |
|
|
|
table_idx := uint(0) |
|
|
|
tableIdx := uint(0) |
|
|
|
|
|
|
|
for idx, el := range k { |
|
|
|
b := el.Bit(i) |
|
|
|
table_idx += (b << idx) |
|
|
|
tableIdx += (b << idx) |
|
|
|
} |
|
|
|
return table_idx |
|
|
|
return tableIdx |
|
|
|
} |