@ -1,16 +1,17 @@
package prover
import (
"math/big"
bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
"math/big"
)
type T ableG1 struct {
type t ableG1 struct {
data [ ] * bn256 . G1
}
func ( t TableG1 ) G etData( ) [ ] * bn256 . G1 {
func ( t tableG1 ) g etData( ) [ ] * bn256 . G1 {
return t . data
}
@ -21,31 +22,31 @@ func (t TableG1) GetData() []*bn256.G1 {
// Table[3] = a[0]+a[1]
// .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
func ( t * TableG1 ) N ewTableG1( a [ ] * bn256 . G1 , gsize int , toaffine bool ) {
func ( t * tableG1 ) n ewTableG1( a [ ] * bn256 . G1 , gsize int , toaffine bool ) {
// EC table
table := make ( [ ] * bn256 . G1 , 0 )
// We need at least gsize elements. If not enough, fill with 0
a_e xt := make ( [ ] * bn256 . G1 , 0 )
a_e xt = append ( a_e xt , a ... )
aE xt := make ( [ ] * bn256 . G1 , 0 )
aE xt = append ( aE xt , a ... )
for i := len ( a ) ; i < gsize ; i ++ {
a_e xt = append ( a_e xt , new ( bn256 . G1 ) . ScalarBaseMult ( big . NewInt ( 0 ) ) )
aE xt = append ( aE xt , new ( bn256 . G1 ) . ScalarBaseMult ( big . NewInt ( 0 ) ) )
}
elG1 := new ( bn256 . G1 ) . ScalarBaseMult ( big . NewInt ( 0 ) )
table = append ( table , elG1 )
last_p ow2 := 1
lastP ow2 := 1
nelems := 0
for i := 1 ; i < 1 << gsize ; i ++ {
elG1 := new ( bn256 . G1 )
// if power of 2
if i & ( i - 1 ) == 0 {
last_p ow2 = i
elG1 . Set ( a_e xt [ nelems ] )
lastP ow2 = i
elG1 . Set ( aE xt [ nelems ] )
nelems ++
} else {
elG1 . Add ( table [ last_p ow2 ] , table [ i - last_p ow2 ] )
elG1 . Add ( table [ lastP ow2 ] , table [ i - lastP ow2 ] )
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine()
}
@ -60,7 +61,7 @@ func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int, toaffine bool) {
t . data = table
}
func ( t T ableG1) Marshal ( ) [ ] byte {
func ( t t ableG1) Marshal ( ) [ ] byte {
info := make ( [ ] byte , 0 )
for _ , el := range t . data {
info = append ( info , el . Marshal ( ) ... )
@ -70,43 +71,42 @@ func (t TableG1) Marshal() []byte {
}
// Multiply scalar by precomputed table of G1 elements
func ( t * TableG1 ) M ulTableG1( k [ ] * big . Int , Q_p rev * bn256 . G1 , gsize int ) * bn256 . G1 {
func ( t * tableG1 ) m ulTableG1( k [ ] * big . Int , qP rev * bn256 . G1 , gsize int ) * bn256 . G1 {
// We need at least gsize elements. If not enough, fill with 0
k_e xt := make ( [ ] * big . Int , 0 )
k_e xt = append ( k_e xt , k ... )
kE xt := make ( [ ] * big . Int , 0 )
kE xt = append ( kE xt , k ... )
for i := len ( k ) ; i < gsize ; i ++ {
k_e xt = append ( k_e xt , new ( big . Int ) . SetUint64 ( 0 ) )
kE xt = append ( kE xt , new ( big . Int ) . SetUint64 ( 0 ) )
}
Q := new ( bn256 . G1 ) . ScalarBaseMult ( big . NewInt ( 0 ) )
msb := getMsb ( k_e xt )
msb := getMsb ( kE xt )
for i := msb - 1 ; i >= 0 ; i -- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new ( bn256 . G1 ) . Add ( Q , Q )
b := getBit ( k_e xt , i )
b := getBit ( kE xt , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q . Add ( Q , t . data [ b ] )
}
}
if Q_prev != nil {
return Q . Add ( Q , Q_prev )
} else {
return Q
if qPrev != nil {
return Q . Add ( Q , qPrev )
}
return Q
}
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
func M ulTableNoDoubleG1( t [ ] T ableG1, k [ ] * big . Int , Q_p rev * bn256 . G1 , gsize int ) * bn256 . G1 {
func m ulTableNoDoubleG1( t [ ] t ableG1, k [ ] * big . Int , qP rev * bn256 . G1 , gsize int ) * bn256 . G1 {
// We need at least gsize elements. If not enough, fill with 0
min_ne lems := len ( t ) * gsize
k_e xt := make ( [ ] * big . Int , 0 )
k_e xt = append ( k_e xt , k ... )
for i := len ( k ) ; i < min_ne lems ; i ++ {
k_e xt = append ( k_e xt , new ( big . Int ) . SetUint64 ( 0 ) )
minNE lems := len ( t ) * gsize
kE xt := make ( [ ] * big . Int , 0 )
kE xt = append ( kE xt , k ... )
for i := len ( k ) ; i < minNE lems ; i ++ {
kE xt = append ( kE xt , new ( big . Int ) . SetUint64 ( 0 ) )
}
// Init Adders
nbitsQ := cryptoConstants . Q . BitLen ( )
@ -118,10 +118,10 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int)
// Perform bitwise addition
for j := 0 ; j < len ( t ) ; j ++ {
msb := getMsb ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] )
msb := getMsb ( kE xt [ j * gsize : ( j + 1 ) * gsize ] )
for i := msb - 1 ; i >= 0 ; i -- {
b := getBit ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] , i )
b := getBit ( kE xt [ j * gsize : ( j + 1 ) * gsize ] , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q [ i ] . Add ( Q [ i ] , t [ j ] . data [ b ] )
@ -137,45 +137,43 @@ func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int)
R . Add ( R , Q [ i - 1 ] )
}
if Q_prev != nil {
return R . Add ( R , Q_prev )
} else {
return R
if qPrev != nil {
return R . Add ( R , qPrev )
}
return R
}
// Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7
func S calarMultG1( a [ ] * bn256 . G1 , k [ ] * big . Int , Q_p rev * bn256 . G1 , gsize int ) * bn256 . G1 {
func s calarMultG1( a [ ] * bn256 . G1 , k [ ] * big . Int , qP rev * bn256 . G1 , gsize int ) * bn256 . G1 {
ntables := int ( ( len ( a ) + gsize - 1 ) / gsize )
table := T ableG1{ }
table := t ableG1{ }
Q := new ( bn256 . G1 ) . ScalarBaseMult ( new ( big . Int ) )
for i := 0 ; i < ntables - 1 ; i ++ {
table . N ewTableG1( a [ i * gsize : ( i + 1 ) * gsize ] , gsize , false )
Q = table . M ulTableG1( k [ i * gsize : ( i + 1 ) * gsize ] , Q , gsize )
table . n ewTableG1( a [ i * gsize : ( i + 1 ) * gsize ] , gsize , false )
Q = table . m ulTableG1( k [ i * gsize : ( i + 1 ) * gsize ] , Q , gsize )
}
table . N ewTableG1( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
Q = table . M ulTableG1( k [ ( ntables - 1 ) * gsize : ] , Q , gsize )
table . n ewTableG1( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
Q = table . m ulTableG1( k [ ( ntables - 1 ) * gsize : ] , Q , gsize )
if Q_prev != nil {
return Q . Add ( Q , Q_prev )
} else {
return Q
if qPrev != nil {
return Q . Add ( Q , qPrev )
}
return Q
}
// Multiply scalar by precomputed table of G1 elements without intermediate doubling
func S calarMultNoDoubleG1( a [ ] * bn256 . G1 , k [ ] * big . Int , Q_p rev * bn256 . G1 , gsize int ) * bn256 . G1 {
func s calarMultNoDoubleG1( a [ ] * bn256 . G1 , k [ ] * big . Int , qP rev * bn256 . G1 , gsize int ) * bn256 . G1 {
ntables := int ( ( len ( a ) + gsize - 1 ) / gsize )
table := T ableG1{ }
table := t ableG1{ }
// We need at least gsize elements. If not enough, fill with 0
min_ne lems := ntables * gsize
k_e xt := make ( [ ] * big . Int , 0 )
k_e xt = append ( k_e xt , k ... )
for i := len ( k ) ; i < min_ne lems ; i ++ {
k_e xt = append ( k_e xt , new ( big . Int ) . SetUint64 ( 0 ) )
minNE lems := ntables * gsize
kE xt := make ( [ ] * big . Int , 0 )
kE xt = append ( kE xt , k ... )
for i := len ( k ) ; i < minNE lems ; i ++ {
kE xt = append ( kE xt , new ( big . Int ) . SetUint64 ( 0 ) )
}
// Init Adders
nbitsQ := cryptoConstants . Q . BitLen ( )
@ -187,22 +185,22 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
// Perform bitwise addition
for j := 0 ; j < ntables - 1 ; j ++ {
table . N ewTableG1( a [ j * gsize : ( j + 1 ) * gsize ] , gsize , false )
msb := getMsb ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] )
table . n ewTableG1( a [ j * gsize : ( j + 1 ) * gsize ] , gsize , false )
msb := getMsb ( kE xt [ j * gsize : ( j + 1 ) * gsize ] )
for i := msb - 1 ; i >= 0 ; i -- {
b := getBit ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] , i )
b := getBit ( kE xt [ j * gsize : ( j + 1 ) * gsize ] , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q [ i ] . Add ( Q [ i ] , table . data [ b ] )
}
}
}
table . N ewTableG1( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
msb := getMsb ( k_e xt [ ( ntables - 1 ) * gsize : ] )
table . n ewTableG1( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
msb := getMsb ( kE xt [ ( ntables - 1 ) * gsize : ] )
for i := msb - 1 ; i >= 0 ; i -- {
b := getBit ( k_e xt [ ( ntables - 1 ) * gsize : ] , i )
b := getBit ( kE xt [ ( ntables - 1 ) * gsize : ] , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q [ i ] . Add ( Q [ i ] , table . data [ b ] )
@ -216,11 +214,10 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
R = new ( bn256 . G1 ) . Add ( R , R )
R . Add ( R , Q [ i - 1 ] )
}
if Q_prev != nil {
return R . Add ( R , Q_prev )
} else {
return R
if qPrev != nil {
return R . Add ( R , qPrev )
}
return R
}
/////
@ -228,11 +225,11 @@ func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize i
// TODO - How can avoid replicating code in G2?
//G2
type T ableG2 struct {
type t ableG2 struct {
data [ ] * bn256 . G2
}
func ( t TableG2 ) G etData( ) [ ] * bn256 . G2 {
func ( t tableG2 ) g etData( ) [ ] * bn256 . G2 {
return t . data
}
@ -244,31 +241,31 @@ func (t TableG2) GetData() []*bn256.G2 {
// .....
// Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
// TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
func ( t * TableG2 ) N ewTableG2( a [ ] * bn256 . G2 , gsize int , toaffine bool ) {
func ( t * tableG2 ) n ewTableG2( a [ ] * bn256 . G2 , gsize int , toaffine bool ) {
// EC table
table := make ( [ ] * bn256 . G2 , 0 )
// We need at least gsize elements. If not enough, fill with 0
a_e xt := make ( [ ] * bn256 . G2 , 0 )
a_e xt = append ( a_e xt , a ... )
aE xt := make ( [ ] * bn256 . G2 , 0 )
aE xt = append ( aE xt , a ... )
for i := len ( a ) ; i < gsize ; i ++ {
a_e xt = append ( a_e xt , new ( bn256 . G2 ) . ScalarBaseMult ( big . NewInt ( 0 ) ) )
aE xt = append ( aE xt , new ( bn256 . G2 ) . ScalarBaseMult ( big . NewInt ( 0 ) ) )
}
elG2 := new ( bn256 . G2 ) . ScalarBaseMult ( big . NewInt ( 0 ) )
table = append ( table , elG2 )
last_p ow2 := 1
lastP ow2 := 1
nelems := 0
for i := 1 ; i < 1 << gsize ; i ++ {
elG2 := new ( bn256 . G2 )
// if power of 2
if i & ( i - 1 ) == 0 {
last_p ow2 = i
elG2 . Set ( a_e xt [ nelems ] )
lastP ow2 = i
elG2 . Set ( aE xt [ nelems ] )
nelems ++
} else {
elG2 . Add ( table [ last_p ow2 ] , table [ i - last_p ow2 ] )
elG2 . Add ( table [ lastP ow2 ] , table [ i - lastP ow2 ] )
// TODO bn256 doesn't export MakeAffine function. We need to fork repo
//table[i].MakeAffine()
}
@ -283,7 +280,7 @@ func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) {
t . data = table
}
func ( t T ableG2) Marshal ( ) [ ] byte {
func ( t t ableG2) Marshal ( ) [ ] byte {
info := make ( [ ] byte , 0 )
for _ , el := range t . data {
info = append ( info , el . Marshal ( ) ... )
@ -293,43 +290,42 @@ func (t TableG2) Marshal() []byte {
}
// Multiply scalar by precomputed table of G2 elements
func ( t * TableG2 ) M ulTableG2( k [ ] * big . Int , Q_p rev * bn256 . G2 , gsize int ) * bn256 . G2 {
func ( t * tableG2 ) m ulTableG2( k [ ] * big . Int , qP rev * bn256 . G2 , gsize int ) * bn256 . G2 {
// We need at least gsize elements. If not enough, fill with 0
k_e xt := make ( [ ] * big . Int , 0 )
k_e xt = append ( k_e xt , k ... )
kE xt := make ( [ ] * big . Int , 0 )
kE xt = append ( kE xt , k ... )
for i := len ( k ) ; i < gsize ; i ++ {
k_e xt = append ( k_e xt , new ( big . Int ) . SetUint64 ( 0 ) )
kE xt = append ( kE xt , new ( big . Int ) . SetUint64 ( 0 ) )
}
Q := new ( bn256 . G2 ) . ScalarBaseMult ( big . NewInt ( 0 ) )
msb := getMsb ( k_e xt )
msb := getMsb ( kE xt )
for i := msb - 1 ; i >= 0 ; i -- {
// TODO. bn256 doesn't export double operation. We will need to fork repo and export it
Q = new ( bn256 . G2 ) . Add ( Q , Q )
b := getBit ( k_e xt , i )
b := getBit ( kE xt , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q . Add ( Q , t . data [ b ] )
}
}
if Q_prev != nil {
return Q . Add ( Q , Q_prev )
} else {
return Q
if qPrev != nil {
return Q . Add ( Q , qPrev )
}
return Q
}
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
func M ulTableNoDoubleG2( t [ ] T ableG2, k [ ] * big . Int , Q_p rev * bn256 . G2 , gsize int ) * bn256 . G2 {
func m ulTableNoDoubleG2( t [ ] t ableG2, k [ ] * big . Int , qP rev * bn256 . G2 , gsize int ) * bn256 . G2 {
// We need at least gsize elements. If not enough, fill with 0
min_ne lems := len ( t ) * gsize
k_e xt := make ( [ ] * big . Int , 0 )
k_e xt = append ( k_e xt , k ... )
for i := len ( k ) ; i < min_ne lems ; i ++ {
k_e xt = append ( k_e xt , new ( big . Int ) . SetUint64 ( 0 ) )
minNE lems := len ( t ) * gsize
kE xt := make ( [ ] * big . Int , 0 )
kE xt = append ( kE xt , k ... )
for i := len ( k ) ; i < minNE lems ; i ++ {
kE xt = append ( kE xt , new ( big . Int ) . SetUint64 ( 0 ) )
}
// Init Adders
nbitsQ := cryptoConstants . Q . BitLen ( )
@ -341,10 +337,10 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int)
// Perform bitwise addition
for j := 0 ; j < len ( t ) ; j ++ {
msb := getMsb ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] )
msb := getMsb ( kE xt [ j * gsize : ( j + 1 ) * gsize ] )
for i := msb - 1 ; i >= 0 ; i -- {
b := getBit ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] , i )
b := getBit ( kE xt [ j * gsize : ( j + 1 ) * gsize ] , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q [ i ] . Add ( Q [ i ] , t [ j ] . data [ b ] )
@ -359,45 +355,43 @@ func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int)
R = new ( bn256 . G2 ) . Add ( R , R )
R . Add ( R , Q [ i - 1 ] )
}
if Q_prev != nil {
return R . Add ( R , Q_prev )
} else {
return R
if qPrev != nil {
return R . Add ( R , qPrev )
}
return R
}
// Compute tables within function. This solution should still be faster than std multiplication
// for gsize = 7
func S calarMultG2( a [ ] * bn256 . G2 , k [ ] * big . Int , Q_p rev * bn256 . G2 , gsize int ) * bn256 . G2 {
func s calarMultG2( a [ ] * bn256 . G2 , k [ ] * big . Int , qP rev * bn256 . G2 , gsize int ) * bn256 . G2 {
ntables := int ( ( len ( a ) + gsize - 1 ) / gsize )
table := T ableG2{ }
table := t ableG2{ }
Q := new ( bn256 . G2 ) . ScalarBaseMult ( new ( big . Int ) )
for i := 0 ; i < ntables - 1 ; i ++ {
table . N ewTableG2( a [ i * gsize : ( i + 1 ) * gsize ] , gsize , false )
Q = table . M ulTableG2( k [ i * gsize : ( i + 1 ) * gsize ] , Q , gsize )
table . n ewTableG2( a [ i * gsize : ( i + 1 ) * gsize ] , gsize , false )
Q = table . m ulTableG2( k [ i * gsize : ( i + 1 ) * gsize ] , Q , gsize )
}
table . N ewTableG2( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
Q = table . M ulTableG2( k [ ( ntables - 1 ) * gsize : ] , Q , gsize )
table . n ewTableG2( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
Q = table . m ulTableG2( k [ ( ntables - 1 ) * gsize : ] , Q , gsize )
if Q_prev != nil {
return Q . Add ( Q , Q_prev )
} else {
return Q
if qPrev != nil {
return Q . Add ( Q , qPrev )
}
return Q
}
// Multiply scalar by precomputed table of G2 elements without intermediate doubling
func S calarMultNoDoubleG2( a [ ] * bn256 . G2 , k [ ] * big . Int , Q_p rev * bn256 . G2 , gsize int ) * bn256 . G2 {
func s calarMultNoDoubleG2( a [ ] * bn256 . G2 , k [ ] * big . Int , qP rev * bn256 . G2 , gsize int ) * bn256 . G2 {
ntables := int ( ( len ( a ) + gsize - 1 ) / gsize )
table := T ableG2{ }
table := t ableG2{ }
// We need at least gsize elements. If not enough, fill with 0
min_ne lems := ntables * gsize
k_e xt := make ( [ ] * big . Int , 0 )
k_e xt = append ( k_e xt , k ... )
for i := len ( k ) ; i < min_ne lems ; i ++ {
k_e xt = append ( k_e xt , new ( big . Int ) . SetUint64 ( 0 ) )
minNE lems := ntables * gsize
kE xt := make ( [ ] * big . Int , 0 )
kE xt = append ( kE xt , k ... )
for i := len ( k ) ; i < minNE lems ; i ++ {
kE xt = append ( kE xt , new ( big . Int ) . SetUint64 ( 0 ) )
}
// Init Adders
nbitsQ := cryptoConstants . Q . BitLen ( )
@ -409,22 +403,22 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i
// Perform bitwise addition
for j := 0 ; j < ntables - 1 ; j ++ {
table . N ewTableG2( a [ j * gsize : ( j + 1 ) * gsize ] , gsize , false )
msb := getMsb ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] )
table . n ewTableG2( a [ j * gsize : ( j + 1 ) * gsize ] , gsize , false )
msb := getMsb ( kE xt [ j * gsize : ( j + 1 ) * gsize ] )
for i := msb - 1 ; i >= 0 ; i -- {
b := getBit ( k_e xt [ j * gsize : ( j + 1 ) * gsize ] , i )
b := getBit ( kE xt [ j * gsize : ( j + 1 ) * gsize ] , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q [ i ] . Add ( Q [ i ] , table . data [ b ] )
}
}
}
table . N ewTableG2( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
msb := getMsb ( k_e xt [ ( ntables - 1 ) * gsize : ] )
table . n ewTableG2( a [ ( ntables - 1 ) * gsize : ] , gsize , false )
msb := getMsb ( kE xt [ ( ntables - 1 ) * gsize : ] )
for i := msb - 1 ; i >= 0 ; i -- {
b := getBit ( k_e xt [ ( ntables - 1 ) * gsize : ] , i )
b := getBit ( kE xt [ ( ntables - 1 ) * gsize : ] , i )
if b != 0 {
// TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
Q [ i ] . Add ( Q [ i ] , table . data [ b ] )
@ -438,11 +432,10 @@ func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize i
R = new ( bn256 . G2 ) . Add ( R , R )
R . Add ( R , Q [ i - 1 ] )
}
if Q_prev != nil {
return R . Add ( R , Q_prev )
} else {
return R
if qPrev != nil {
return R . Add ( R , qPrev )
}
return R
}
// Return most significant bit position in a group of Big Integers
@ -450,9 +443,9 @@ func getMsb(k []*big.Int) int {
msb := 0
for _ , el := range k {
tmp_m sb := el . BitLen ( )
if tmp_m sb > msb {
msb = tmp_m sb
tmpM sb := el . BitLen ( )
if tmpM sb > msb {
msb = tmpM sb
}
}
return msb
@ -460,11 +453,11 @@ func getMsb(k []*big.Int) int {
// Return ith bit in group of Big Integers
func getBit ( k [ ] * big . Int , i int ) uint {
table_i dx := uint ( 0 )
tableI dx := uint ( 0 )
for idx , el := range k {
b := el . Bit ( i )
table_i dx += ( b << idx )
tableI dx += ( b << idx )
}
return table_i dx
return tableI dx
}