You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

470 lines
12 KiB

  1. package prover
  2. import (
  3. bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
  4. cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
  5. "math/big"
  6. )
  7. type TableG1 struct {
  8. data []*bn256.G1
  9. }
  10. func (t TableG1) GetData() []*bn256.G1 {
  11. return t.data
  12. }
  13. // Compute table of gsize elements as ::
  14. // Table[0] = Inf
  15. // Table[1] = a[0]
  16. // Table[2] = a[1]
  17. // Table[3] = a[0]+a[1]
  18. // .....
  19. // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
  20. func (t *TableG1) NewTableG1(a []*bn256.G1, gsize int, toaffine bool) {
  21. // EC table
  22. table := make([]*bn256.G1, 0)
  23. // We need at least gsize elements. If not enough, fill with 0
  24. a_ext := make([]*bn256.G1, 0)
  25. a_ext = append(a_ext, a...)
  26. for i := len(a); i < gsize; i++ {
  27. a_ext = append(a_ext, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
  28. }
  29. elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  30. table = append(table, elG1)
  31. last_pow2 := 1
  32. nelems := 0
  33. for i := 1; i < 1<<gsize; i++ {
  34. elG1 := new(bn256.G1)
  35. // if power of 2
  36. if i&(i-1) == 0 {
  37. last_pow2 = i
  38. elG1.Set(a_ext[nelems])
  39. nelems++
  40. } else {
  41. elG1.Add(table[last_pow2], table[i-last_pow2])
  42. // TODO bn256 doesn't export MakeAffine function. We need to fork repo
  43. //table[i].MakeAffine()
  44. }
  45. table = append(table, elG1)
  46. }
  47. if toaffine {
  48. for i := 0; i < len(table); i++ {
  49. info := table[i].Marshal()
  50. table[i].Unmarshal(info)
  51. }
  52. }
  53. t.data = table
  54. }
  55. func (t TableG1) Marshal() []byte {
  56. info := make([]byte, 0)
  57. for _, el := range t.data {
  58. info = append(info, el.Marshal()...)
  59. }
  60. return info
  61. }
  62. // Multiply scalar by precomputed table of G1 elements
  63. func (t *TableG1) MulTableG1(k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  64. // We need at least gsize elements. If not enough, fill with 0
  65. k_ext := make([]*big.Int, 0)
  66. k_ext = append(k_ext, k...)
  67. for i := len(k); i < gsize; i++ {
  68. k_ext = append(k_ext, new(big.Int).SetUint64(0))
  69. }
  70. Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  71. msb := getMsb(k_ext)
  72. for i := msb - 1; i >= 0; i-- {
  73. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  74. Q = new(bn256.G1).Add(Q, Q)
  75. b := getBit(k_ext, i)
  76. if b != 0 {
  77. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  78. Q.Add(Q, t.data[b])
  79. }
  80. }
  81. if Q_prev != nil {
  82. return Q.Add(Q, Q_prev)
  83. } else {
  84. return Q
  85. }
  86. }
  87. // Multiply scalar by precomputed table of G1 elements without intermediate doubling
  88. func MulTableNoDoubleG1(t []TableG1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  89. // We need at least gsize elements. If not enough, fill with 0
  90. min_nelems := len(t) * gsize
  91. k_ext := make([]*big.Int, 0)
  92. k_ext = append(k_ext, k...)
  93. for i := len(k); i < min_nelems; i++ {
  94. k_ext = append(k_ext, new(big.Int).SetUint64(0))
  95. }
  96. // Init Adders
  97. nbitsQ := cryptoConstants.Q.BitLen()
  98. Q := make([]*bn256.G1, nbitsQ)
  99. for i := 0; i < nbitsQ; i++ {
  100. Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  101. }
  102. // Perform bitwise addition
  103. for j := 0; j < len(t); j++ {
  104. msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
  105. for i := msb - 1; i >= 0; i-- {
  106. b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
  107. if b != 0 {
  108. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  109. Q[i].Add(Q[i], t[j].data[b])
  110. }
  111. }
  112. }
  113. // Consolidate Addition
  114. R := new(bn256.G1).Set(Q[nbitsQ-1])
  115. for i := nbitsQ - 1; i > 0; i-- {
  116. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  117. R = new(bn256.G1).Add(R, R)
  118. R.Add(R, Q[i-1])
  119. }
  120. if Q_prev != nil {
  121. return R.Add(R, Q_prev)
  122. } else {
  123. return R
  124. }
  125. }
  126. // Compute tables within function. This solution should still be faster than std multiplication
  127. // for gsize = 7
  128. func ScalarMultG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  129. ntables := int((len(a) + gsize - 1) / gsize)
  130. table := TableG1{}
  131. Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
  132. for i := 0; i < ntables-1; i++ {
  133. table.NewTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
  134. Q = table.MulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
  135. }
  136. table.NewTableG1(a[(ntables-1)*gsize:], gsize, false)
  137. Q = table.MulTableG1(k[(ntables-1)*gsize:], Q, gsize)
  138. if Q_prev != nil {
  139. return Q.Add(Q, Q_prev)
  140. } else {
  141. return Q
  142. }
  143. }
  144. // Multiply scalar by precomputed table of G1 elements without intermediate doubling
  145. func ScalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, Q_prev *bn256.G1, gsize int) *bn256.G1 {
  146. ntables := int((len(a) + gsize - 1) / gsize)
  147. table := TableG1{}
  148. // We need at least gsize elements. If not enough, fill with 0
  149. min_nelems := ntables * gsize
  150. k_ext := make([]*big.Int, 0)
  151. k_ext = append(k_ext, k...)
  152. for i := len(k); i < min_nelems; i++ {
  153. k_ext = append(k_ext, new(big.Int).SetUint64(0))
  154. }
  155. // Init Adders
  156. nbitsQ := cryptoConstants.Q.BitLen()
  157. Q := make([]*bn256.G1, nbitsQ)
  158. for i := 0; i < nbitsQ; i++ {
  159. Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  160. }
  161. // Perform bitwise addition
  162. for j := 0; j < ntables-1; j++ {
  163. table.NewTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
  164. msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
  165. for i := msb - 1; i >= 0; i-- {
  166. b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
  167. if b != 0 {
  168. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  169. Q[i].Add(Q[i], table.data[b])
  170. }
  171. }
  172. }
  173. table.NewTableG1(a[(ntables-1)*gsize:], gsize, false)
  174. msb := getMsb(k_ext[(ntables-1)*gsize:])
  175. for i := msb - 1; i >= 0; i-- {
  176. b := getBit(k_ext[(ntables-1)*gsize:], i)
  177. if b != 0 {
  178. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  179. Q[i].Add(Q[i], table.data[b])
  180. }
  181. }
  182. // Consolidate Addition
  183. R := new(bn256.G1).Set(Q[nbitsQ-1])
  184. for i := nbitsQ - 1; i > 0; i-- {
  185. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  186. R = new(bn256.G1).Add(R, R)
  187. R.Add(R, Q[i-1])
  188. }
  189. if Q_prev != nil {
  190. return R.Add(R, Q_prev)
  191. } else {
  192. return R
  193. }
  194. }
  195. /////
  196. // TODO - How can avoid replicating code in G2?
  197. //G2
  198. type TableG2 struct {
  199. data []*bn256.G2
  200. }
  201. func (t TableG2) GetData() []*bn256.G2 {
  202. return t.data
  203. }
  204. // Compute table of gsize elements as ::
  205. // Table[0] = Inf
  206. // Table[1] = a[0]
  207. // Table[2] = a[1]
  208. // Table[3] = a[0]+a[1]
  209. // .....
  210. // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
  211. // TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
  212. func (t *TableG2) NewTableG2(a []*bn256.G2, gsize int, toaffine bool) {
  213. // EC table
  214. table := make([]*bn256.G2, 0)
  215. // We need at least gsize elements. If not enough, fill with 0
  216. a_ext := make([]*bn256.G2, 0)
  217. a_ext = append(a_ext, a...)
  218. for i := len(a); i < gsize; i++ {
  219. a_ext = append(a_ext, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
  220. }
  221. elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  222. table = append(table, elG2)
  223. last_pow2 := 1
  224. nelems := 0
  225. for i := 1; i < 1<<gsize; i++ {
  226. elG2 := new(bn256.G2)
  227. // if power of 2
  228. if i&(i-1) == 0 {
  229. last_pow2 = i
  230. elG2.Set(a_ext[nelems])
  231. nelems++
  232. } else {
  233. elG2.Add(table[last_pow2], table[i-last_pow2])
  234. // TODO bn256 doesn't export MakeAffine function. We need to fork repo
  235. //table[i].MakeAffine()
  236. }
  237. table = append(table, elG2)
  238. }
  239. if toaffine {
  240. for i := 0; i < len(table); i++ {
  241. info := table[i].Marshal()
  242. table[i].Unmarshal(info)
  243. }
  244. }
  245. t.data = table
  246. }
  247. func (t TableG2) Marshal() []byte {
  248. info := make([]byte, 0)
  249. for _, el := range t.data {
  250. info = append(info, el.Marshal()...)
  251. }
  252. return info
  253. }
  254. // Multiply scalar by precomputed table of G2 elements
  255. func (t *TableG2) MulTableG2(k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  256. // We need at least gsize elements. If not enough, fill with 0
  257. k_ext := make([]*big.Int, 0)
  258. k_ext = append(k_ext, k...)
  259. for i := len(k); i < gsize; i++ {
  260. k_ext = append(k_ext, new(big.Int).SetUint64(0))
  261. }
  262. Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  263. msb := getMsb(k_ext)
  264. for i := msb - 1; i >= 0; i-- {
  265. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  266. Q = new(bn256.G2).Add(Q, Q)
  267. b := getBit(k_ext, i)
  268. if b != 0 {
  269. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  270. Q.Add(Q, t.data[b])
  271. }
  272. }
  273. if Q_prev != nil {
  274. return Q.Add(Q, Q_prev)
  275. } else {
  276. return Q
  277. }
  278. }
  279. // Multiply scalar by precomputed table of G2 elements without intermediate doubling
  280. func MulTableNoDoubleG2(t []TableG2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  281. // We need at least gsize elements. If not enough, fill with 0
  282. min_nelems := len(t) * gsize
  283. k_ext := make([]*big.Int, 0)
  284. k_ext = append(k_ext, k...)
  285. for i := len(k); i < min_nelems; i++ {
  286. k_ext = append(k_ext, new(big.Int).SetUint64(0))
  287. }
  288. // Init Adders
  289. nbitsQ := cryptoConstants.Q.BitLen()
  290. Q := make([]*bn256.G2, nbitsQ)
  291. for i := 0; i < nbitsQ; i++ {
  292. Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  293. }
  294. // Perform bitwise addition
  295. for j := 0; j < len(t); j++ {
  296. msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
  297. for i := msb - 1; i >= 0; i-- {
  298. b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
  299. if b != 0 {
  300. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  301. Q[i].Add(Q[i], t[j].data[b])
  302. }
  303. }
  304. }
  305. // Consolidate Addition
  306. R := new(bn256.G2).Set(Q[nbitsQ-1])
  307. for i := nbitsQ - 1; i > 0; i-- {
  308. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  309. R = new(bn256.G2).Add(R, R)
  310. R.Add(R, Q[i-1])
  311. }
  312. if Q_prev != nil {
  313. return R.Add(R, Q_prev)
  314. } else {
  315. return R
  316. }
  317. }
  318. // Compute tables within function. This solution should still be faster than std multiplication
  319. // for gsize = 7
  320. func ScalarMultG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  321. ntables := int((len(a) + gsize - 1) / gsize)
  322. table := TableG2{}
  323. Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
  324. for i := 0; i < ntables-1; i++ {
  325. table.NewTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
  326. Q = table.MulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
  327. }
  328. table.NewTableG2(a[(ntables-1)*gsize:], gsize, false)
  329. Q = table.MulTableG2(k[(ntables-1)*gsize:], Q, gsize)
  330. if Q_prev != nil {
  331. return Q.Add(Q, Q_prev)
  332. } else {
  333. return Q
  334. }
  335. }
  336. // Multiply scalar by precomputed table of G2 elements without intermediate doubling
  337. func ScalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, Q_prev *bn256.G2, gsize int) *bn256.G2 {
  338. ntables := int((len(a) + gsize - 1) / gsize)
  339. table := TableG2{}
  340. // We need at least gsize elements. If not enough, fill with 0
  341. min_nelems := ntables * gsize
  342. k_ext := make([]*big.Int, 0)
  343. k_ext = append(k_ext, k...)
  344. for i := len(k); i < min_nelems; i++ {
  345. k_ext = append(k_ext, new(big.Int).SetUint64(0))
  346. }
  347. // Init Adders
  348. nbitsQ := cryptoConstants.Q.BitLen()
  349. Q := make([]*bn256.G2, nbitsQ)
  350. for i := 0; i < nbitsQ; i++ {
  351. Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  352. }
  353. // Perform bitwise addition
  354. for j := 0; j < ntables-1; j++ {
  355. table.NewTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
  356. msb := getMsb(k_ext[j*gsize : (j+1)*gsize])
  357. for i := msb - 1; i >= 0; i-- {
  358. b := getBit(k_ext[j*gsize:(j+1)*gsize], i)
  359. if b != 0 {
  360. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  361. Q[i].Add(Q[i], table.data[b])
  362. }
  363. }
  364. }
  365. table.NewTableG2(a[(ntables-1)*gsize:], gsize, false)
  366. msb := getMsb(k_ext[(ntables-1)*gsize:])
  367. for i := msb - 1; i >= 0; i-- {
  368. b := getBit(k_ext[(ntables-1)*gsize:], i)
  369. if b != 0 {
  370. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  371. Q[i].Add(Q[i], table.data[b])
  372. }
  373. }
  374. // Consolidate Addition
  375. R := new(bn256.G2).Set(Q[nbitsQ-1])
  376. for i := nbitsQ - 1; i > 0; i-- {
  377. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  378. R = new(bn256.G2).Add(R, R)
  379. R.Add(R, Q[i-1])
  380. }
  381. if Q_prev != nil {
  382. return R.Add(R, Q_prev)
  383. } else {
  384. return R
  385. }
  386. }
  387. // Return most significant bit position in a group of Big Integers
  388. func getMsb(k []*big.Int) int {
  389. msb := 0
  390. for _, el := range k {
  391. tmp_msb := el.BitLen()
  392. if tmp_msb > msb {
  393. msb = tmp_msb
  394. }
  395. }
  396. return msb
  397. }
  398. // Return ith bit in group of Big Integers
  399. func getBit(k []*big.Int, i int) uint {
  400. table_idx := uint(0)
  401. for idx, el := range k {
  402. b := el.Bit(i)
  403. table_idx += (b << idx)
  404. }
  405. return table_idx
  406. }