You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

443 lines
12 KiB

  1. package prover
  2. import (
  3. "math/big"
  4. bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
  5. cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
  6. )
  7. type TableG1 struct{
  8. data []*bn256.G1
  9. }
  10. func (t TableG1) GetData() []*bn256.G1 {
  11. return t.data
  12. }
  13. // Compute table of gsize elements as ::
  14. // Table[0] = Inf
  15. // Table[1] = a[0]
  16. // Table[2] = a[1]
  17. // Table[3] = a[0]+a[1]
  18. // .....
  19. // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
  20. func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int){
  21. // EC table
  22. table := make([]*bn256.G1, 0)
  23. // We need at least gsize elements. If not enough, fill with 0
  24. a_ext := make([]*bn256.G1, 0)
  25. a_ext = append(a_ext, a...)
  26. for i:=len(a); i<gsize; i++ {
  27. a_ext = append(a_ext,new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
  28. }
  29. elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  30. table = append(table,elG1)
  31. last_pow2 := 1
  32. nelems := 0
  33. for i :=1; i< 1<<gsize; i++ {
  34. elG1 := new(bn256.G1)
  35. // if power of 2
  36. if i & (i-1) == 0{
  37. last_pow2 = i
  38. elG1.Set(a_ext[nelems])
  39. nelems++
  40. } else {
  41. elG1.Add(table[last_pow2], table[i-last_pow2])
  42. // TODO bn256 doesn't export MakeAffine function. We need to fork repo
  43. //table[i].MakeAffine()
  44. }
  45. table = append(table, elG1)
  46. }
  47. t.data = table
  48. }
  49. // Multiply scalar by precomputed table of G1 elements
  50. func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  51. // We need at least gsize elements. If not enough, fill with 0
  52. k_ext := make([]*big.Int, 0)
  53. k_ext = append(k_ext, k...)
  54. for i:=len(k); i < gsize; i++ {
  55. k_ext = append(k_ext,new(big.Int).SetUint64(0))
  56. }
  57. Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  58. msb := getMsb(k_ext)
  59. for i := msb-1; i >= 0; i-- {
  60. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  61. Q = new(bn256.G1).Add(Q,Q)
  62. b := getBit(k_ext,i)
  63. if b != 0 {
  64. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  65. Q.Add(Q, t.data[b])
  66. }
  67. }
  68. if Q_prev != nil {
  69. return Q.Add(Q,Q_prev)
  70. } else {
  71. return Q
  72. }
  73. }
  74. // Multiply scalar by precomputed table of G1 elements without intermediate doubling
  75. func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  76. // We need at least gsize elements. If not enough, fill with 0
  77. min_nelems := len(t) * gsize
  78. k_ext := make([]*big.Int, 0)
  79. k_ext = append(k_ext, k...)
  80. for i := len(k); i < min_nelems; i++ {
  81. k_ext = append(k_ext,new(big.Int).SetUint64(0))
  82. }
  83. // Init Adders
  84. nbitsQ := cryptoConstants.Q.BitLen()
  85. Q := make([]*bn256.G1,nbitsQ)
  86. for i:=0; i< nbitsQ; i++ {
  87. Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  88. }
  89. // Perform bitwise addition
  90. for j:=0; j < len(t); j++ {
  91. msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
  92. for i := msb-1; i >= 0; i-- {
  93. b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
  94. if b != 0 {
  95. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  96. Q[i].Add(Q[i], t[j].data[b])
  97. }
  98. }
  99. }
  100. // Consolidate Addition
  101. R := new(bn256.G1).Set(Q[nbitsQ-1])
  102. for i:=nbitsQ-1; i>0; i-- {
  103. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  104. R = new(bn256.G1).Add(R,R)
  105. R.Add(R,Q[i-1])
  106. }
  107. if Q_prev != nil {
  108. return R.Add(R,Q_prev)
  109. } else {
  110. return R
  111. }
  112. }
  113. // Compute tables within function. This solution should still be faster than std multiplication
  114. // for gsize = 7
  115. func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  116. ntables := int((len(a) + gsize - 1) / gsize)
  117. table := TableG1{}
  118. Q:= new(bn256.G1).ScalarBaseMult(new(big.Int))
  119. for i:=0; i<ntables-1; i++ {
  120. table.NewTableG1( a[i*gsize:(i+1)*gsize], gsize)
  121. Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
  122. }
  123. table.NewTableG1( a[(ntables-1)*gsize:], gsize)
  124. Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize)
  125. if Q_prev != nil {
  126. return Q.Add(Q,Q_prev)
  127. } else {
  128. return Q
  129. }
  130. }
  131. // Multiply scalar by precomputed table of G1 elements without intermediate doubling
  132. func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  133. ntables := int((len(a) + gsize - 1) / gsize)
  134. table := TableG1{}
  135. // We need at least gsize elements. If not enough, fill with 0
  136. min_nelems := ntables * gsize
  137. k_ext := make([]*big.Int, 0)
  138. k_ext = append(k_ext, k...)
  139. for i := len(k); i < min_nelems; i++ {
  140. k_ext = append(k_ext,new(big.Int).SetUint64(0))
  141. }
  142. // Init Adders
  143. nbitsQ := cryptoConstants.Q.BitLen()
  144. Q := make([]*bn256.G1,nbitsQ)
  145. for i:=0; i< nbitsQ; i++ {
  146. Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  147. }
  148. // Perform bitwise addition
  149. for j:=0; j < ntables-1; j++ {
  150. table.NewTableG1( a[j*gsize:(j+1)*gsize], gsize)
  151. msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
  152. for i := msb-1; i >= 0; i-- {
  153. b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
  154. if b != 0 {
  155. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  156. Q[i].Add(Q[i], table.data[b])
  157. }
  158. }
  159. }
  160. table.NewTableG1( a[(ntables-1)*gsize:], gsize)
  161. msb := getMsb(k_ext[(ntables-1)*gsize:])
  162. for i := msb-1; i >= 0; i-- {
  163. b := getBit(k_ext[(ntables-1)*gsize:],i)
  164. if b != 0 {
  165. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  166. Q[i].Add(Q[i], table.data[b])
  167. }
  168. }
  169. // Consolidate Addition
  170. R := new(bn256.G1).Set(Q[nbitsQ-1])
  171. for i:=nbitsQ-1; i>0; i-- {
  172. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  173. R = new(bn256.G1).Add(R,R)
  174. R.Add(R,Q[i-1])
  175. }
  176. if Q_prev != nil {
  177. return R.Add(R,Q_prev)
  178. } else {
  179. return R
  180. }
  181. }
  182. /////
  183. // TODO - How can avoid replicating code in G2?
  184. //G2
  185. type TableG2 struct{
  186. data []*bn256.G2
  187. }
  188. func (t TableG2) GetData() []*bn256.G2 {
  189. return t.data
  190. }
  191. // Compute table of gsize elements as ::
  192. // Table[0] = Inf
  193. // Table[1] = a[0]
  194. // Table[2] = a[1]
  195. // Table[3] = a[0]+a[1]
  196. // .....
  197. // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
  198. func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int){
  199. // EC table
  200. table := make([]*bn256.G2, 0)
  201. // We need at least gsize elements. If not enough, fill with 0
  202. a_ext := make([]*bn256.G2, 0)
  203. a_ext = append(a_ext, a...)
  204. for i:=len(a); i<gsize; i++ {
  205. a_ext = append(a_ext,new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
  206. }
  207. elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  208. table = append(table,elG2)
  209. last_pow2 := 1
  210. nelems := 0
  211. for i :=1; i< 1<<gsize; i++ {
  212. elG2 := new(bn256.G2)
  213. // if power of 2
  214. if i & (i-1) == 0{
  215. last_pow2 = i
  216. elG2.Set(a_ext[nelems])
  217. nelems++
  218. } else {
  219. elG2.Add(table[last_pow2], table[i-last_pow2])
  220. // TODO bn256 doesn't export MakeAffine function. We need to fork repo
  221. //table[i].MakeAffine()
  222. }
  223. table = append(table, elG2)
  224. }
  225. t.data = table
  226. }
  227. // Multiply scalar by precomputed table of G2 elements
  228. func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  229. // We need at least gsize elements. If not enough, fill with 0
  230. k_ext := make([]*big.Int, 0)
  231. k_ext = append(k_ext, k...)
  232. for i:=len(k); i < gsize; i++ {
  233. k_ext = append(k_ext,new(big.Int).SetUint64(0))
  234. }
  235. Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  236. msb := getMsb(k_ext)
  237. for i := msb-1; i >= 0; i-- {
  238. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  239. Q = new(bn256.G2).Add(Q,Q)
  240. b := getBit(k_ext,i)
  241. if b != 0 {
  242. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  243. Q.Add(Q, t.data[b])
  244. }
  245. }
  246. if Q_prev != nil {
  247. return Q.Add(Q, Q_prev)
  248. } else {
  249. return Q
  250. }
  251. }
  252. // Multiply scalar by precomputed table of G2 elements without intermediate doubling
  253. func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  254. // We need at least gsize elements. If not enough, fill with 0
  255. min_nelems := len(t) * gsize
  256. k_ext := make([]*big.Int, 0)
  257. k_ext = append(k_ext, k...)
  258. for i := len(k); i < min_nelems; i++ {
  259. k_ext = append(k_ext,new(big.Int).SetUint64(0))
  260. }
  261. // Init Adders
  262. nbitsQ := cryptoConstants.Q.BitLen()
  263. Q := make([]*bn256.G2,nbitsQ)
  264. for i:=0; i< nbitsQ; i++ {
  265. Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  266. }
  267. // Perform bitwise addition
  268. for j:=0; j < len(t); j++ {
  269. msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
  270. for i := msb-1; i >= 0; i-- {
  271. b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
  272. if b != 0 {
  273. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  274. Q[i].Add(Q[i], t[j].data[b])
  275. }
  276. }
  277. }
  278. // Consolidate Addition
  279. R := new(bn256.G2).Set(Q[nbitsQ-1])
  280. for i:=nbitsQ-1; i>0; i-- {
  281. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  282. R = new(bn256.G2).Add(R,R)
  283. R.Add(R,Q[i-1])
  284. }
  285. if Q_prev != nil {
  286. return R.Add(R,Q_prev)
  287. } else {
  288. return R
  289. }
  290. }
  291. // Compute tables within function. This solution should still be faster than std multiplication
  292. // for gsize = 7
  293. func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  294. ntables := int((len(a) + gsize - 1) / gsize)
  295. table := TableG2{}
  296. Q:= new(bn256.G2).ScalarBaseMult(new(big.Int))
  297. for i:=0; i<ntables-1; i++ {
  298. table.NewTableG2( a[i*gsize:(i+1)*gsize], gsize)
  299. Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
  300. }
  301. table.NewTableG2( a[(ntables-1)*gsize:], gsize)
  302. Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize)
  303. if Q_prev != nil {
  304. return Q.Add(Q,Q_prev)
  305. } else {
  306. return Q
  307. }
  308. }
  309. // Multiply scalar by precomputed table of G2 elements without intermediate doubling
  310. func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  311. ntables := int((len(a) + gsize - 1) / gsize)
  312. table := TableG2{}
  313. // We need at least gsize elements. If not enough, fill with 0
  314. min_nelems := ntables * gsize
  315. k_ext := make([]*big.Int, 0)
  316. k_ext = append(k_ext, k...)
  317. for i := len(k); i < min_nelems; i++ {
  318. k_ext = append(k_ext,new(big.Int).SetUint64(0))
  319. }
  320. // Init Adders
  321. nbitsQ := cryptoConstants.Q.BitLen()
  322. Q := make([]*bn256.G2,nbitsQ)
  323. for i:=0; i< nbitsQ; i++ {
  324. Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  325. }
  326. // Perform bitwise addition
  327. for j:=0; j < ntables-1; j++ {
  328. table.NewTableG2( a[j*gsize:(j+1)*gsize], gsize)
  329. msb := getMsb(k_ext[j*gsize:(j+1)*gsize])
  330. for i := msb-1; i >= 0; i-- {
  331. b := getBit(k_ext[j*gsize:(j+1)*gsize],i)
  332. if b != 0 {
  333. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  334. Q[i].Add(Q[i], table.data[b])
  335. }
  336. }
  337. }
  338. table.NewTableG2( a[(ntables-1)*gsize:], gsize)
  339. msb := getMsb(k_ext[(ntables-1)*gsize:])
  340. for i := msb-1; i >= 0; i-- {
  341. b := getBit(k_ext[(ntables-1)*gsize:],i)
  342. if b != 0 {
  343. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  344. Q[i].Add(Q[i], table.data[b])
  345. }
  346. }
  347. // Consolidate Addition
  348. R := new(bn256.G2).Set(Q[nbitsQ-1])
  349. for i:=nbitsQ-1; i>0; i-- {
  350. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  351. R = new(bn256.G2).Add(R,R)
  352. R.Add(R,Q[i-1])
  353. }
  354. if Q_prev != nil {
  355. return R.Add(R,Q_prev)
  356. } else {
  357. return R
  358. }
  359. }
  360. // Return most significant bit position in a group of Big Integers
  361. func getMsb(k []*big.Int) int{
  362. msb := 0
  363. for _, el := range(k){
  364. tmp_msb := el.BitLen()
  365. if tmp_msb > msb {
  366. msb = tmp_msb
  367. }
  368. }
  369. return msb
  370. }
  371. // Return ith bit in group of Big Integers
  372. func getBit(k []*big.Int, i int) uint {
  373. table_idx := uint(0)
  374. for idx, el := range(k){
  375. b := el.Bit(i)
  376. table_idx += (b << idx)
  377. }
  378. return table_idx
  379. }