From 0620ef544848996121b90f96f0cf2f3aa8c0bf7d Mon Sep 17 00:00:00 2001 From: silur Date: Thu, 14 Sep 2023 10:12:12 +0200 Subject: [PATCH] make second step of the prover happen in O(n) --- Cargo.toml | 2 +- src/protogalaxy.rs | 91 ++++++++++++++++++++++------------------------ 2 files changed, 44 insertions(+), 49 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c68fe1f..791ad6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,6 @@ ark-poly = "0.4.0" ark-serialize = { version = "0.4.0", default-features = false, features = [ "derive" ] } rand = { version = "0.8", features = [ "std", "std_rng" ] } ark-crypto-primitives = { version = "^0.4.0", default-features = false, features = [ "r1cs", "snark", "sponge", "crh" ] } - +getrandom = { version = "0.2.10", features = ["js"] } [dev-dependencies] ark-bls12-381 = "0.4.0" diff --git a/src/protogalaxy.rs b/src/protogalaxy.rs index 612aefd..54fc6f9 100644 --- a/src/protogalaxy.rs +++ b/src/protogalaxy.rs @@ -4,7 +4,6 @@ use ark_ff::fields::PrimeField; use ark_std::log2; use ark_std::{cfg_into_iter, Zero}; use std::marker::PhantomData; -use std::ops::Add; use ark_poly::{ univariate::{DensePolynomial, SparsePolynomial}, @@ -65,12 +64,9 @@ where let f_w = eval_f(r1cs, &w.w); // F(X) - let mut F_X: SparsePolynomial = SparsePolynomial::zero(); - for (i, f_w_i) in f_w.iter().enumerate() { - let lhs = pow_i_over_x::(i, &instance.betas, &deltas); - let curr = &lhs * *f_w_i; - F_X = F_X.add(curr); - } + + let F_X: SparsePolynomial = + calc_f_from_btree(&f_w, &instance.betas, &deltas); let F_X_dense = DensePolynomial::from(F_X.clone()); transcript.add_vec(&F_X_dense.coeffs); @@ -287,24 +283,6 @@ fn pow_i(i: usize, betas: &Vec) -> F { r } -fn pow_i_over_x(i: usize, betas: &Vec, deltas: &Vec) -> SparsePolynomial { - assert_eq!(betas.len(), deltas.len()); - - let n = 2_u64.pow(betas.len() as u32); - let b = bit_decompose(i as u64, n as usize); - - let mut r: SparsePolynomial = - SparsePolynomial::::from_coefficients_vec(vec![(0, F::one())]); // start with r(x) = 1 - for (j, beta_j) in betas.iter().enumerate() { - if b[j] { - let curr: SparsePolynomial = - SparsePolynomial::::from_coefficients_vec(vec![(0, *beta_j), (1, deltas[j])]); - r = r.mul(&curr); - } - } - r -} - // lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300 fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec> { let mut lagrange_polynomials: Vec> = Vec::new(); @@ -317,6 +295,46 @@ fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec(fw: &[F], betas: &[F], deltas: &[F]) -> SparsePolynomial { + assert_eq!(fw.len() & (fw.len() - 1), 0); + assert_eq!(betas.len(), deltas.len()); + let mut layers: Vec>> = Vec::new(); + let leaves: Vec> = fw + .iter() + .enumerate() + .map(|e| SparsePolynomial::::from_coefficients_slice(&[(0, *e.1)])) + .collect(); + layers.push(leaves.to_vec()); + let mut currentNodes = leaves.clone(); + while currentNodes.len() > 1 { + let index = layers.len(); + let limit: usize = (2 * currentNodes.len()) + - 2usize.pow( + (currentNodes.len() & (currentNodes.len() - 1)) + .try_into() + .unwrap(), + ); + layers.push(vec![]); + for (i, ni) in currentNodes.iter().enumerate().step_by(2) { + if i >= limit { + layers[index] = currentNodes[0..limit].to_vec(); + break; + } + let left = ni.clone(); + let right = SparsePolynomial::::from_coefficients_vec(vec![ + (0, betas[layers.len() - 2]), // FIXME + (1, deltas[layers.len() - 2]), // FIXME + ]) + .mul(¤tNodes[i + 1]); + + layers[index].push(left + right); + } + currentNodes = layers[index].clone(); + } + let root_index = layers.len() - 1; + layers[root_index][0].clone() +} + #[derive(Clone, Debug)] pub struct R1CS { pub A: Vec>, @@ -445,29 +463,6 @@ mod tests { } } - #[test] - fn test_pow_i_over_x() { - let mut rng = ark_std::test_rng(); - let t = 3; - let n = 8; - let beta = Fr::rand(&mut rng); - let delta = Fr::rand(&mut rng); - let betas = powers_of_beta(beta, t); - let deltas = powers_of_beta(delta, t); - - // compute b + X*d, with X=rand - let x = Fr::rand(&mut rng); - let bxd = vec_add(&betas, &vec_scalar_mul(&deltas, &x)); - - // assert that computing pow_over_x of betas,deltas, is equivalent to first computing the - // vector [betas+X*deltas] and then computing pow_i over it - for i in 0..n { - let pow_i1 = pow_i_over_x(i, &betas, &deltas); - let pow_i2 = pow_i(i, &bxd); - assert_eq!(pow_i1.evaluate(&x), pow_i2); - } - } - #[test] fn test_eval_f() { let r1cs = get_test_r1cs::();