You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

463 lines
12 KiB

  1. package prover
  2. import (
  3. "math/big"
  4. bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
  5. cryptoConstants "github.com/iden3/go-iden3-crypto/constants"
  6. )
  7. type tableG1 struct {
  8. data []*bn256.G1
  9. }
  10. func (t tableG1) getData() []*bn256.G1 {
  11. return t.data
  12. }
  13. // Compute table of gsize elements as ::
  14. // Table[0] = Inf
  15. // Table[1] = a[0]
  16. // Table[2] = a[1]
  17. // Table[3] = a[0]+a[1]
  18. // .....
  19. // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
  20. func (t *tableG1) newTableG1(a []*bn256.G1, gsize int, toaffine bool) {
  21. // EC table
  22. table := make([]*bn256.G1, 0)
  23. // We need at least gsize elements. If not enough, fill with 0
  24. aExt := make([]*bn256.G1, 0)
  25. aExt = append(aExt, a...)
  26. for i := len(a); i < gsize; i++ {
  27. aExt = append(aExt, new(bn256.G1).ScalarBaseMult(big.NewInt(0)))
  28. }
  29. elG1 := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  30. table = append(table, elG1)
  31. lastPow2 := 1
  32. nelems := 0
  33. for i := 1; i < 1<<gsize; i++ {
  34. elG1 := new(bn256.G1)
  35. // if power of 2
  36. if i&(i-1) == 0 {
  37. lastPow2 = i
  38. elG1.Set(aExt[nelems])
  39. nelems++
  40. } else {
  41. elG1.Add(table[lastPow2], table[i-lastPow2])
  42. // TODO bn256 doesn't export MakeAffine function. We need to fork repo
  43. //table[i].MakeAffine()
  44. }
  45. table = append(table, elG1)
  46. }
  47. if toaffine {
  48. for i := 0; i < len(table); i++ {
  49. info := table[i].Marshal()
  50. table[i].Unmarshal(info)
  51. }
  52. }
  53. t.data = table
  54. }
  55. func (t tableG1) Marshal() []byte {
  56. info := make([]byte, 0)
  57. for _, el := range t.data {
  58. info = append(info, el.Marshal()...)
  59. }
  60. return info
  61. }
  62. // Multiply scalar by precomputed table of G1 elements
  63. func (t *tableG1) mulTableG1(k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
  64. // We need at least gsize elements. If not enough, fill with 0
  65. kExt := make([]*big.Int, 0)
  66. kExt = append(kExt, k...)
  67. for i := len(k); i < gsize; i++ {
  68. kExt = append(kExt, new(big.Int).SetUint64(0))
  69. }
  70. Q := new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  71. msb := getMsb(kExt)
  72. for i := msb - 1; i >= 0; i-- {
  73. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  74. Q = new(bn256.G1).Add(Q, Q)
  75. b := getBit(kExt, i)
  76. if b != 0 {
  77. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  78. Q.Add(Q, t.data[b])
  79. }
  80. }
  81. if qPrev != nil {
  82. return Q.Add(Q, qPrev)
  83. }
  84. return Q
  85. }
  86. // Multiply scalar by precomputed table of G1 elements without intermediate doubling
  87. func mulTableNoDoubleG1(t []tableG1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
  88. // We need at least gsize elements. If not enough, fill with 0
  89. minNElems := len(t) * gsize
  90. kExt := make([]*big.Int, 0)
  91. kExt = append(kExt, k...)
  92. for i := len(k); i < minNElems; i++ {
  93. kExt = append(kExt, new(big.Int).SetUint64(0))
  94. }
  95. // Init Adders
  96. nbitsQ := cryptoConstants.Q.BitLen()
  97. Q := make([]*bn256.G1, nbitsQ)
  98. for i := 0; i < nbitsQ; i++ {
  99. Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  100. }
  101. // Perform bitwise addition
  102. for j := 0; j < len(t); j++ {
  103. msb := getMsb(kExt[j*gsize : (j+1)*gsize])
  104. for i := msb - 1; i >= 0; i-- {
  105. b := getBit(kExt[j*gsize:(j+1)*gsize], i)
  106. if b != 0 {
  107. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  108. Q[i].Add(Q[i], t[j].data[b])
  109. }
  110. }
  111. }
  112. // Consolidate Addition
  113. R := new(bn256.G1).Set(Q[nbitsQ-1])
  114. for i := nbitsQ - 1; i > 0; i-- {
  115. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  116. R = new(bn256.G1).Add(R, R)
  117. R.Add(R, Q[i-1])
  118. }
  119. if qPrev != nil {
  120. return R.Add(R, qPrev)
  121. }
  122. return R
  123. }
  124. // Compute tables within function. This solution should still be faster than std multiplication
  125. // for gsize = 7
  126. func scalarMultG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
  127. ntables := int((len(a) + gsize - 1) / gsize)
  128. table := tableG1{}
  129. Q := new(bn256.G1).ScalarBaseMult(new(big.Int))
  130. for i := 0; i < ntables-1; i++ {
  131. table.newTableG1(a[i*gsize:(i+1)*gsize], gsize, false)
  132. Q = table.mulTableG1(k[i*gsize:(i+1)*gsize], Q, gsize)
  133. }
  134. table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
  135. Q = table.mulTableG1(k[(ntables-1)*gsize:], Q, gsize)
  136. if qPrev != nil {
  137. return Q.Add(Q, qPrev)
  138. }
  139. return Q
  140. }
  141. // Multiply scalar by precomputed table of G1 elements without intermediate doubling
  142. func scalarMultNoDoubleG1(a []*bn256.G1, k []*big.Int, qPrev *bn256.G1, gsize int) *bn256.G1 {
  143. ntables := int((len(a) + gsize - 1) / gsize)
  144. table := tableG1{}
  145. // We need at least gsize elements. If not enough, fill with 0
  146. minNElems := ntables * gsize
  147. kExt := make([]*big.Int, 0)
  148. kExt = append(kExt, k...)
  149. for i := len(k); i < minNElems; i++ {
  150. kExt = append(kExt, new(big.Int).SetUint64(0))
  151. }
  152. // Init Adders
  153. nbitsQ := cryptoConstants.Q.BitLen()
  154. Q := make([]*bn256.G1, nbitsQ)
  155. for i := 0; i < nbitsQ; i++ {
  156. Q[i] = new(bn256.G1).ScalarBaseMult(big.NewInt(0))
  157. }
  158. // Perform bitwise addition
  159. for j := 0; j < ntables-1; j++ {
  160. table.newTableG1(a[j*gsize:(j+1)*gsize], gsize, false)
  161. msb := getMsb(kExt[j*gsize : (j+1)*gsize])
  162. for i := msb - 1; i >= 0; i-- {
  163. b := getBit(kExt[j*gsize:(j+1)*gsize], i)
  164. if b != 0 {
  165. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  166. Q[i].Add(Q[i], table.data[b])
  167. }
  168. }
  169. }
  170. table.newTableG1(a[(ntables-1)*gsize:], gsize, false)
  171. msb := getMsb(kExt[(ntables-1)*gsize:])
  172. for i := msb - 1; i >= 0; i-- {
  173. b := getBit(kExt[(ntables-1)*gsize:], i)
  174. if b != 0 {
  175. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  176. Q[i].Add(Q[i], table.data[b])
  177. }
  178. }
  179. // Consolidate Addition
  180. R := new(bn256.G1).Set(Q[nbitsQ-1])
  181. for i := nbitsQ - 1; i > 0; i-- {
  182. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  183. R = new(bn256.G1).Add(R, R)
  184. R.Add(R, Q[i-1])
  185. }
  186. if qPrev != nil {
  187. return R.Add(R, qPrev)
  188. }
  189. return R
  190. }
  191. /////
  192. // TODO - How can avoid replicating code in G2?
  193. //G2
  194. type tableG2 struct {
  195. data []*bn256.G2
  196. }
  197. func (t tableG2) getData() []*bn256.G2 {
  198. return t.data
  199. }
  200. // Compute table of gsize elements as ::
  201. // Table[0] = Inf
  202. // Table[1] = a[0]
  203. // Table[2] = a[1]
  204. // Table[3] = a[0]+a[1]
  205. // .....
  206. // Table[(1<<gsize)-1] = a[0]+a[1]+...+a[gsize-1]
  207. // TODO -> toaffine = True doesnt work. Problem with Marshal/Unmarshal
  208. func (t *tableG2) newTableG2(a []*bn256.G2, gsize int, toaffine bool) {
  209. // EC table
  210. table := make([]*bn256.G2, 0)
  211. // We need at least gsize elements. If not enough, fill with 0
  212. aExt := make([]*bn256.G2, 0)
  213. aExt = append(aExt, a...)
  214. for i := len(a); i < gsize; i++ {
  215. aExt = append(aExt, new(bn256.G2).ScalarBaseMult(big.NewInt(0)))
  216. }
  217. elG2 := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  218. table = append(table, elG2)
  219. lastPow2 := 1
  220. nelems := 0
  221. for i := 1; i < 1<<gsize; i++ {
  222. elG2 := new(bn256.G2)
  223. // if power of 2
  224. if i&(i-1) == 0 {
  225. lastPow2 = i
  226. elG2.Set(aExt[nelems])
  227. nelems++
  228. } else {
  229. elG2.Add(table[lastPow2], table[i-lastPow2])
  230. // TODO bn256 doesn't export MakeAffine function. We need to fork repo
  231. //table[i].MakeAffine()
  232. }
  233. table = append(table, elG2)
  234. }
  235. if toaffine {
  236. for i := 0; i < len(table); i++ {
  237. info := table[i].Marshal()
  238. table[i].Unmarshal(info)
  239. }
  240. }
  241. t.data = table
  242. }
  243. func (t tableG2) Marshal() []byte {
  244. info := make([]byte, 0)
  245. for _, el := range t.data {
  246. info = append(info, el.Marshal()...)
  247. }
  248. return info
  249. }
  250. // Multiply scalar by precomputed table of G2 elements
  251. func (t *tableG2) mulTableG2(k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
  252. // We need at least gsize elements. If not enough, fill with 0
  253. kExt := make([]*big.Int, 0)
  254. kExt = append(kExt, k...)
  255. for i := len(k); i < gsize; i++ {
  256. kExt = append(kExt, new(big.Int).SetUint64(0))
  257. }
  258. Q := new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  259. msb := getMsb(kExt)
  260. for i := msb - 1; i >= 0; i-- {
  261. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  262. Q = new(bn256.G2).Add(Q, Q)
  263. b := getBit(kExt, i)
  264. if b != 0 {
  265. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  266. Q.Add(Q, t.data[b])
  267. }
  268. }
  269. if qPrev != nil {
  270. return Q.Add(Q, qPrev)
  271. }
  272. return Q
  273. }
  274. // Multiply scalar by precomputed table of G2 elements without intermediate doubling
  275. func mulTableNoDoubleG2(t []tableG2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
  276. // We need at least gsize elements. If not enough, fill with 0
  277. minNElems := len(t) * gsize
  278. kExt := make([]*big.Int, 0)
  279. kExt = append(kExt, k...)
  280. for i := len(k); i < minNElems; i++ {
  281. kExt = append(kExt, new(big.Int).SetUint64(0))
  282. }
  283. // Init Adders
  284. nbitsQ := cryptoConstants.Q.BitLen()
  285. Q := make([]*bn256.G2, nbitsQ)
  286. for i := 0; i < nbitsQ; i++ {
  287. Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  288. }
  289. // Perform bitwise addition
  290. for j := 0; j < len(t); j++ {
  291. msb := getMsb(kExt[j*gsize : (j+1)*gsize])
  292. for i := msb - 1; i >= 0; i-- {
  293. b := getBit(kExt[j*gsize:(j+1)*gsize], i)
  294. if b != 0 {
  295. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  296. Q[i].Add(Q[i], t[j].data[b])
  297. }
  298. }
  299. }
  300. // Consolidate Addition
  301. R := new(bn256.G2).Set(Q[nbitsQ-1])
  302. for i := nbitsQ - 1; i > 0; i-- {
  303. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  304. R = new(bn256.G2).Add(R, R)
  305. R.Add(R, Q[i-1])
  306. }
  307. if qPrev != nil {
  308. return R.Add(R, qPrev)
  309. }
  310. return R
  311. }
  312. // Compute tables within function. This solution should still be faster than std multiplication
  313. // for gsize = 7
  314. func scalarMultG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
  315. ntables := int((len(a) + gsize - 1) / gsize)
  316. table := tableG2{}
  317. Q := new(bn256.G2).ScalarBaseMult(new(big.Int))
  318. for i := 0; i < ntables-1; i++ {
  319. table.newTableG2(a[i*gsize:(i+1)*gsize], gsize, false)
  320. Q = table.mulTableG2(k[i*gsize:(i+1)*gsize], Q, gsize)
  321. }
  322. table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
  323. Q = table.mulTableG2(k[(ntables-1)*gsize:], Q, gsize)
  324. if qPrev != nil {
  325. return Q.Add(Q, qPrev)
  326. }
  327. return Q
  328. }
  329. // Multiply scalar by precomputed table of G2 elements without intermediate doubling
  330. func scalarMultNoDoubleG2(a []*bn256.G2, k []*big.Int, qPrev *bn256.G2, gsize int) *bn256.G2 {
  331. ntables := int((len(a) + gsize - 1) / gsize)
  332. table := tableG2{}
  333. // We need at least gsize elements. If not enough, fill with 0
  334. minNElems := ntables * gsize
  335. kExt := make([]*big.Int, 0)
  336. kExt = append(kExt, k...)
  337. for i := len(k); i < minNElems; i++ {
  338. kExt = append(kExt, new(big.Int).SetUint64(0))
  339. }
  340. // Init Adders
  341. nbitsQ := cryptoConstants.Q.BitLen()
  342. Q := make([]*bn256.G2, nbitsQ)
  343. for i := 0; i < nbitsQ; i++ {
  344. Q[i] = new(bn256.G2).ScalarBaseMult(big.NewInt(0))
  345. }
  346. // Perform bitwise addition
  347. for j := 0; j < ntables-1; j++ {
  348. table.newTableG2(a[j*gsize:(j+1)*gsize], gsize, false)
  349. msb := getMsb(kExt[j*gsize : (j+1)*gsize])
  350. for i := msb - 1; i >= 0; i-- {
  351. b := getBit(kExt[j*gsize:(j+1)*gsize], i)
  352. if b != 0 {
  353. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  354. Q[i].Add(Q[i], table.data[b])
  355. }
  356. }
  357. }
  358. table.newTableG2(a[(ntables-1)*gsize:], gsize, false)
  359. msb := getMsb(kExt[(ntables-1)*gsize:])
  360. for i := msb - 1; i >= 0; i-- {
  361. b := getBit(kExt[(ntables-1)*gsize:], i)
  362. if b != 0 {
  363. // TODO. bn256 doesn't export mixed addition (Jacobian + Affine), which is more efficient.
  364. Q[i].Add(Q[i], table.data[b])
  365. }
  366. }
  367. // Consolidate Addition
  368. R := new(bn256.G2).Set(Q[nbitsQ-1])
  369. for i := nbitsQ - 1; i > 0; i-- {
  370. // TODO. bn256 doesn't export double operation. We will need to fork repo and export it
  371. R = new(bn256.G2).Add(R, R)
  372. R.Add(R, Q[i-1])
  373. }
  374. if qPrev != nil {
  375. return R.Add(R, qPrev)
  376. }
  377. return R
  378. }
  379. // Return most significant bit position in a group of Big Integers
  380. func getMsb(k []*big.Int) int {
  381. msb := 0
  382. for _, el := range k {
  383. tmpMsb := el.BitLen()
  384. if tmpMsb > msb {
  385. msb = tmpMsb
  386. }
  387. }
  388. return msb
  389. }
  390. // Return ith bit in group of Big Integers
  391. func getBit(k []*big.Int, i int) uint {
  392. tableIdx := uint(0)
  393. for idx, el := range k {
  394. b := el.Bit(i)
  395. tableIdx += (b << idx)
  396. }
  397. return tableIdx
  398. }