Browse Source

Optimize native nimfs (#110)

* Optimize the HyperNova `compute_g`, `compute_Ls` and `to_lcccs` methods

- Optimize the HyperNova `compute_g`, `compute_Ls` and `to_lcccs` methods
- in some tests, increase the size of test matrices to a more real-world size.

| method                | matrix size   | old version seconds | new version seconds |
| --------------------- | ------------- | ------------------- | ------------------- |
| compute_g             | 2^8 x 2^8     | 16.48               | 0.16                |
| compute_g             | 2^9 x 2^9     | 122.62              | 0.51                |
| compute_Ls            | 2^8 x 2^8     | 9.73                | 0.11                |
| compute_Ls            | 2^9 x 2^9     | 67.16               | 0.38                |
| to_lcccs              | 2^8 x 2^8     | 4.56                | 0.21                |
| to_lcccs              | 2^9 x 2^9     | 67.65               | 0.84                |

- Note: 2^16 x 2^16 is the actual size (upperbound) of the circuit,
  which is not represented in the table since it was needing too much
  ram to even be computed.

* Optimize HyperNova's `compute_sigmas_thetas` and `compute_Q`

| method                | matrix size   | old version seconds | new version seconds |
| -------------         | ------------- | ------------------- | ------------------- |
| compute_sigmas_thetas | 2^8 x 2^8     | 12.86               | 0.13                |
| compute_sigmas_thetas | 2^9 x 2^9     | 100.01              | 0.51                |
| compute_Q             | 2^8 x 2^8     | 4.49                | 0.07                |
| compute_Q             | 2^9 x 2^9     | 70.77               | 0.55                |

* optimize LCCCS::check_relation & CCCS::check_relation, and remove unnessary methods after last reimplementations
update-nifs-interface
arnaucube 5 months ago
committed by GitHub
parent
commit
5ea55cf54e
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
13 changed files with 335 additions and 277 deletions
  1. +2
    -2
      folding-schemes/src/ccs/mod.rs
  2. +18
    -8
      folding-schemes/src/ccs/r1cs.rs
  3. +32
    -39
      folding-schemes/src/folding/hypernova/cccs.rs
  4. +4
    -3
      folding-schemes/src/folding/hypernova/circuits.rs
  5. +66
    -46
      folding-schemes/src/folding/hypernova/lcccs.rs
  6. +9
    -8
      folding-schemes/src/folding/hypernova/nimfs.rs
  7. +112
    -142
      folding-schemes/src/folding/hypernova/utils.rs
  8. +7
    -7
      folding-schemes/src/folding/nova/nifs.rs
  9. +3
    -3
      folding-schemes/src/folding/protogalaxy/folding.rs
  10. +2
    -0
      folding-schemes/src/lib.rs
  11. +5
    -2
      folding-schemes/src/utils/espresso/virtual_polynomial.rs
  12. +48
    -8
      folding-schemes/src/utils/mle.rs
  13. +27
    -9
      folding-schemes/src/utils/vec.rs

+ 2
- 2
folding-schemes/src/ccs/mod.rs

@ -1,7 +1,7 @@
use ark_ff::PrimeField;
use ark_std::log2;
use crate::utils::vec::*;
use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix};
use crate::Error;
pub mod r1cs;
@ -48,7 +48,7 @@ impl CCS {
// complete the hadamard chain
let mut hadamard_result = vec![F::one(); self.m];
for M_j in vec_M_j.into_iter() {
hadamard_result = hadamard(&hadamard_result, &mat_vec_mul_sparse(M_j, z)?)?;
hadamard_result = hadamard(&hadamard_result, &mat_vec_mul(M_j, z)?)?;
}
// multiply by the coefficient of this step

+ 18
- 8
folding-schemes/src/ccs/r1cs.rs

@ -1,8 +1,9 @@
use ark_ff::PrimeField;
use ark_relations::r1cs::ConstraintSystem;
use ark_std::rand::Rng;
use crate::utils::vec::*;
use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix};
use crate::Error;
use ark_relations::r1cs::ConstraintSystem;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct R1CS<F: PrimeField> {
@ -13,6 +14,15 @@ pub struct R1CS {
}
impl<F: PrimeField> R1CS<F> {
pub fn rand<R: Rng>(rng: &mut R, n_rows: usize, n_cols: usize) -> Self {
Self {
l: 1,
A: SparseMatrix::rand(rng, n_rows, n_cols),
B: SparseMatrix::rand(rng, n_rows, n_cols),
C: SparseMatrix::rand(rng, n_rows, n_cols),
}
}
/// returns a tuple containing (w, x) (witness and public inputs respectively)
pub fn split_z(&self, z: &[F]) -> (Vec<F>, Vec<F>) {
(z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec())
@ -20,9 +30,9 @@ impl R1CS {
/// check that a R1CS structure is satisfied by a z vector. Only for testing.
pub fn check_relation(&self, z: &[F]) -> Result<(), Error> {
let Az = mat_vec_mul_sparse(&self.A, z)?;
let Bz = mat_vec_mul_sparse(&self.B, z)?;
let Cz = mat_vec_mul_sparse(&self.C, z)?;
let Az = mat_vec_mul(&self.A, z)?;
let Bz = mat_vec_mul(&self.B, z)?;
let Cz = mat_vec_mul(&self.C, z)?;
let AzBz = hadamard(&Az, &Bz)?;
if AzBz != Cz {
return Err(Error::NotSatisfied);
@ -58,9 +68,9 @@ pub struct RelaxedR1CS {
impl<F: PrimeField> RelaxedR1CS<F> {
/// check that a RelaxedR1CS structure is satisfied by a z vector. Only for testing.
pub fn check_relation(&self, z: &[F]) -> Result<(), Error> {
let Az = mat_vec_mul_sparse(&self.A, z)?;
let Bz = mat_vec_mul_sparse(&self.B, z)?;
let Cz = mat_vec_mul_sparse(&self.C, z)?;
let Az = mat_vec_mul(&self.A, z)?;
let Bz = mat_vec_mul(&self.B, z)?;
let Cz = mat_vec_mul(&self.C, z)?;
let uCz = vec_scalar_mul(&Cz, &self.u);
let uCzE = vec_add(&uCz, &self.E)?;
let AzBz = hadamard(&Az, &Bz)?;

+ 32
- 39
folding-schemes/src/folding/hypernova/cccs.rs

@ -2,21 +2,19 @@ use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_std::One;
use ark_std::Zero;
use std::ops::Add;
use std::sync::Arc;
use ark_std::rand::Rng;
use super::utils::compute_sum_Mz;
use crate::ccs::CCS;
use crate::commitment::{
pedersen::{Params as PedersenParams, Pedersen},
CommitmentScheme,
};
use crate::utils::hypercube::BooleanHypercube;
use crate::utils::mle::matrix_to_mle;
use crate::utils::mle::vec_to_mle;
use crate::utils::virtual_polynomial::VirtualPolynomial;
use crate::utils::mle::dense_vec_to_dense_mle;
use crate::utils::vec::mat_vec_mul;
use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial};
use crate::Error;
/// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment.
@ -61,41 +59,35 @@ impl CCS {
/// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
/// polynomial over x
pub fn compute_q(&self, z: &[F]) -> VirtualPolynomial<F> {
let z_mle = vec_to_mle(self.s_prime, z);
let mut q = VirtualPolynomial::<F>::new(self.s);
pub fn compute_q(&self, z: &[F]) -> Result<VirtualPolynomial<F>, Error> {
let mut q_x = VirtualPolynomial::<F>::new(self.s);
for i in 0..self.q {
let mut prod: VirtualPolynomial<F> = VirtualPolynomial::<F>::new(self.s);
for j in self.S[i].clone() {
let M_j = matrix_to_mle(self.M[j].clone());
let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.s_prime);
// Fold this sum into the running product
if prod.products.is_empty() {
// If this is the first time we are adding something to this virtual polynomial, we need to
// explicitly add the products using add_mle_list()
// XXX is this true? improve API
prod.add_mle_list([Arc::new(sum_Mz)], F::one()).unwrap();
} else {
prod.mul_by_mle(Arc::new(sum_Mz), F::one()).unwrap();
}
let mut Q_k = vec![];
for &j in self.S[i].iter() {
Q_k.push(dense_vec_to_dense_mle(self.s, &mat_vec_mul(&self.M[j], z)?));
}
// Multiply by the product by the coefficient c_i
prod.scalar_mul(&self.c[i]);
// Add it to the running sum
q = q.add(&prod);
q_x.add_mle_list(Q_k.iter().map(|v| Arc::new(v.clone())), self.c[i])?;
}
q
Ok(q_x)
}
/// Computes Q(x) = eq(beta, x) * q(x)
/// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
/// polynomial over x
pub fn compute_Q(&self, z: &[F], beta: &[F]) -> VirtualPolynomial<F> {
let q = self.compute_q(z);
q.build_f_hat(beta).unwrap()
pub fn compute_Q(&self, z: &[F], beta: &[F]) -> Result<VirtualPolynomial<F>, Error> {
let eq_beta = build_eq_x_r_vec(beta)?;
let eq_beta_mle = dense_vec_to_dense_mle(self.s, &eq_beta);
let mut Q = VirtualPolynomial::<F>::new(self.s);
for i in 0..self.q {
let mut Q_k = vec![];
for &j in self.S[i].iter() {
Q_k.push(dense_vec_to_dense_mle(self.s, &mat_vec_mul(&self.M[j], z)?));
}
Q_k.push(eq_beta_mle.clone());
Q.add_mle_list(Q_k.iter().map(|v| Arc::new(v.clone())), self.c[i])?;
}
Ok(Q)
}
}
@ -118,9 +110,10 @@ impl CCCS {
[vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat();
// A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube
let q_x = ccs.compute_q(&z);
let q_x = ccs.compute_q(&z)?;
for x in BooleanHypercube::new(ccs.s) {
if !q_x.evaluate(&x).unwrap().is_zero() {
if !q_x.evaluate(&x)?.is_zero() {
return Err(Error::NotSatisfied);
}
}
@ -147,7 +140,7 @@ pub mod tests {
let ccs = get_test_ccs::<Fr>();
let z = get_test_z(3);
let q = ccs.compute_q(&z);
let q = ccs.compute_q(&z).unwrap();
// Evaluate inside the hypercube
for x in BooleanHypercube::new(ccs.s) {
@ -171,7 +164,7 @@ pub mod tests {
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Compute Q(x) = eq(beta, x) * q(x).
let Q = ccs.compute_Q(&z, &beta);
let Q = ccs.compute_Q(&z, &beta).unwrap();
// Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y)
// which interpolates the multivariate polynomial q(x) inside the hypercube.
@ -204,9 +197,9 @@ pub mod tests {
// Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which
// should be equal to q(d), since G(x) interpolates q(x) inside the hypercube
let q = ccs.compute_q(&z);
let q = ccs.compute_q(&z).unwrap();
for d in BooleanHypercube::new(ccs.s) {
let Q_at_d = ccs.compute_Q(&z, &d);
let Q_at_d = ccs.compute_Q(&z, &d).unwrap();
// Get G(d) by summing over Q_d(x) over the hypercube
let G_at_d = BooleanHypercube::new(ccs.s)
@ -217,7 +210,7 @@ pub mod tests {
// Now test that they should disagree outside of the hypercube
let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let Q_at_r = ccs.compute_Q(&z, &r);
let Q_at_r = ccs.compute_Q(&z, &r).unwrap();
// Get G(d) by summing over Q_d(x) over the hypercube
let G_at_r = BooleanHypercube::new(ccs.s)

+ 4
- 3
folding-schemes/src/folding/hypernova/circuits.rs

@ -361,7 +361,7 @@ mod tests {
commitment::{pedersen::Pedersen, CommitmentScheme},
folding::hypernova::{
nimfs::NIMFS,
utils::{compute_c, compute_sigmas_and_thetas},
utils::{compute_c, compute_sigmas_thetas},
},
transcript::{
poseidon::{poseidon_canonical_config, PoseidonTranscript, PoseidonTranscriptVar},
@ -409,7 +409,7 @@ mod tests {
cccs_instances.push(inst);
}
let sigmas_thetas = compute_sigmas_and_thetas(&ccs, &z_lcccs, &z_cccs, &r_x_prime);
let sigmas_thetas = compute_sigmas_thetas(&ccs, &z_lcccs, &z_cccs, &r_x_prime).unwrap();
let expected_c = compute_c(
&ccs,
@ -421,7 +421,8 @@ mod tests {
.map(|lcccs| lcccs.r_x.clone())
.collect(),
&r_x_prime,
);
)
.unwrap();
let cs = ConstraintSystem::<Fr>::new_ref();
let mut vec_sigmas = Vec::new();

+ 66
- 46
folding-schemes/src/folding/hypernova/lcccs.rs

@ -1,20 +1,18 @@
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_poly::DenseMultilinearExtension;
use ark_std::One;
use std::sync::Arc;
use ark_poly::MultilinearExtension;
use ark_std::rand::Rng;
use super::cccs::Witness;
use super::utils::{compute_all_sum_Mz_evals, compute_sum_Mz};
use crate::ccs::CCS;
use crate::commitment::{
pedersen::{Params as PedersenParams, Pedersen},
CommitmentScheme,
};
use crate::utils::mle::{matrix_to_mle, vec_to_mle};
use crate::utils::virtual_polynomial::VirtualPolynomial;
use crate::utils::mle::dense_vec_to_dense_mle;
use crate::utils::vec::mat_vec_mul;
use crate::Error;
/// Linearized Committed CCS instance
@ -33,12 +31,6 @@ pub struct LCCCS {
}
impl<F: PrimeField> CCS<F> {
/// Compute v_j values of the linearized committed CCS form
/// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
fn compute_v_j(&self, z: &[F], r: &[F]) -> Vec<F> {
compute_all_sum_Mz_evals(&self.M, z, r, self.s_prime)
}
pub fn to_lcccs<R: Rng, C: CurveGroup>(
&self,
rng: &mut R,
@ -54,7 +46,18 @@ impl CCS {
let C = Pedersen::<C, true>::commit(pedersen_params, &w, &r_w)?;
let r_x: Vec<C::ScalarField> = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect();
let v = self.compute_v_j(z, &r_x);
let Mzs: Vec<DenseMultilinearExtension<F>> = self
.M
.iter()
.map(|M_j| Ok(dense_vec_to_dense_mle(self.s, &mat_vec_mul(M_j, z)?)))
.collect::<Result<_, Error>>()?;
// compute v_j
let v: Vec<F> = Mzs
.iter()
.map(|Mz| Mz.evaluate(&r_x).ok_or(Error::EvaluationFail))
.collect::<Result<_, Error>>()?;
Ok((
LCCCS::<C> {
@ -70,29 +73,6 @@ impl CCS {
}
impl<C: CurveGroup> LCCCS<C> {
/// Compute all L_j(x) polynomials
pub fn compute_Ls(
&self,
ccs: &CCS<C::ScalarField>,
z: &[C::ScalarField],
) -> Vec<VirtualPolynomial<C::ScalarField>> {
let z_mle = vec_to_mle(ccs.s_prime, z);
// Convert all matrices to MLE
let M_x_y_mle: Vec<DenseMultilinearExtension<C::ScalarField>> =
ccs.M.clone().into_iter().map(matrix_to_mle).collect();
let mut vec_L_j_x = Vec::with_capacity(ccs.t);
for M_j in M_x_y_mle {
let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
let sum_Mz_virtual =
VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), C::ScalarField::one());
let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap();
vec_L_j_x.push(L_j_x);
}
vec_L_j_x
}
/// Perform the check of the LCCCS instance described at section 4.2
pub fn check_relation(
&self,
@ -108,7 +88,15 @@ impl LCCCS {
// check CCS relation
let z: Vec<C::ScalarField> = [vec![self.u], self.x.clone(), w.w.to_vec()].concat();
let computed_v = compute_all_sum_Mz_evals(&ccs.M, &z, &self.r_x, ccs.s_prime);
let computed_v: Vec<C::ScalarField> = ccs
.M
.iter()
.map(|M_j| {
let Mz_mle = dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, &z)?);
Mz_mle.evaluate(&self.r_x).ok_or(Error::EvaluationFail)
})
.collect::<Result<_, Error>>()?;
if computed_v != self.v {
return Err(Error::NotSatisfied);
}
@ -118,31 +106,64 @@ impl LCCCS {
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::{Fr, Projective};
use ark_std::test_rng;
use ark_std::One;
use ark_std::UniformRand;
use ark_std::Zero;
use std::sync::Arc;
use crate::ccs::tests::{get_test_ccs, get_test_z};
use super::*;
use crate::ccs::{
r1cs::R1CS,
tests::{get_test_ccs, get_test_z},
};
use crate::utils::hypercube::BooleanHypercube;
use ark_std::test_rng;
use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial};
use ark_pallas::{Fr, Projective};
// method for testing
pub fn compute_Ls<C: CurveGroup>(
ccs: &CCS<C::ScalarField>,
lcccs: &LCCCS<C>,
z: &[C::ScalarField],
) -> Vec<VirtualPolynomial<C::ScalarField>> {
let eq_rx = build_eq_x_r_vec(&lcccs.r_x).unwrap();
let eq_rx_mle = dense_vec_to_dense_mle(ccs.s, &eq_rx);
let mut Ls = Vec::with_capacity(ccs.t);
for M_j in ccs.M.iter() {
let mut L = VirtualPolynomial::<C::ScalarField>::new(ccs.s);
let mut Mz = vec![dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z).unwrap())];
Mz.push(eq_rx_mle.clone());
L.add_mle_list(
Mz.iter().map(|v| Arc::new(v.clone())),
C::ScalarField::one(),
)
.unwrap();
Ls.push(L);
}
Ls
}
#[test]
/// Test linearized CCCS v_j against the L_j(x)
fn test_lcccs_v_j() {
let mut rng = test_rng();
let ccs = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z.clone()).unwrap();
let n_rows = 2_u32.pow(5) as usize;
let n_cols = 2_u32.pow(5) as usize;
let r1cs = R1CS::rand(&mut rng, n_rows, n_cols);
let ccs = CCS::from_r1cs(r1cs);
let z: Vec<Fr> = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect();
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap();
// with our test vector coming from R1CS, v should have length 3
assert_eq!(lcccs.v.len(), 3);
let vec_L_j_x = lcccs.compute_Ls(&ccs, &z);
let vec_L_j_x = compute_Ls(&ccs, &lcccs, &z);
assert_eq!(vec_L_j_x.len(), lcccs.v.len());
for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
@ -175,7 +196,7 @@ pub mod tests {
assert_eq!(lcccs.v.len(), 3);
// Bad compute L_j(x) with the bad z
let vec_L_j_x = lcccs.compute_Ls(&ccs, &bad_z);
let vec_L_j_x = compute_Ls(&ccs, &lcccs, &bad_z);
assert_eq!(vec_L_j_x.len(), lcccs.v.len());
// Make sure that the LCCCS is not satisfied given these L_j(x)
@ -189,7 +210,6 @@ pub mod tests {
satisfied = false;
}
}
assert!(!satisfied);
}
}

+ 9
- 8
folding-schemes/src/folding/hypernova/nimfs.rs

@ -7,7 +7,7 @@ use ark_std::{One, Zero};
use super::cccs::{Witness, CCCS};
use super::lcccs::LCCCS;
use super::utils::{compute_c, compute_g, compute_sigmas_and_thetas};
use super::utils::{compute_c, compute_g, compute_sigmas_thetas};
use crate::ccs::CCS;
use crate::transcript::Transcript;
use crate::utils::hypercube::BooleanHypercube;
@ -200,7 +200,7 @@ where
let beta: Vec<C::ScalarField> = transcript.get_challenges(ccs.s);
// Compute g(x)
let g = compute_g(ccs, running_instances, &z_lcccs, &z_cccs, gamma, &beta);
let g = compute_g(ccs, running_instances, &z_lcccs, &z_cccs, gamma, &beta)?;
// Step 3: Run the sumcheck prover
let sumcheck_proof = IOPSumCheck::<C, T>::prove(&g, transcript)
@ -244,7 +244,7 @@ where
let r_x_prime = sumcheck_proof.point.clone();
// Step 4: compute sigmas and thetas
let sigmas_thetas = compute_sigmas_and_thetas(ccs, &z_lcccs, &z_cccs, &r_x_prime);
let sigmas_thetas = compute_sigmas_thetas(ccs, &z_lcccs, &z_cccs, &r_x_prime)?;
// Step 6: Get the folding challenge
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
@ -336,7 +336,7 @@ where
.map(|lcccs| lcccs.r_x.clone())
.collect(),
&r_x_prime,
);
)?;
// check that the g(r_x') from the sumcheck proof is equal to the computed c from sigmas&thetas
if c != sumcheck_subclaim.expected_evaluation {
@ -345,9 +345,10 @@ where
// Sanity check: we can also compute g(r_x') from the proof last evaluation value, and
// should be equal to the previously obtained values.
let g_on_rxprime_from_sumcheck_last_eval =
DensePolynomial::from_coefficients_slice(&proof.sc_proof.proofs.last().unwrap().coeffs)
.evaluate(r_x_prime.last().unwrap());
let g_on_rxprime_from_sumcheck_last_eval = DensePolynomial::from_coefficients_slice(
&proof.sc_proof.proofs.last().ok_or(Error::Empty)?.coeffs,
)
.evaluate(r_x_prime.last().ok_or(Error::Empty)?);
if g_on_rxprime_from_sumcheck_last_eval != c {
return Err(Error::NotEqual);
}
@ -395,7 +396,7 @@ pub mod tests {
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime).unwrap();
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();

+ 112
- 142
folding-schemes/src/folding/hypernova/utils.rs

@ -1,88 +1,52 @@
use ark_ec::CurveGroup;
use ark_ff::{Field, PrimeField};
use ark_ff::PrimeField;
use ark_poly::DenseMultilinearExtension;
use ark_poly::MultilinearExtension;
use std::ops::Add;
use crate::utils::multilinear_polynomial::fix_variables;
use crate::utils::multilinear_polynomial::scalar_mul;
use ark_std::One;
use std::sync::Arc;
use super::lcccs::LCCCS;
use super::nimfs::SigmasThetas;
use crate::ccs::CCS;
use crate::utils::hypercube::BooleanHypercube;
use crate::utils::mle::dense_vec_to_mle;
use crate::utils::mle::matrix_to_mle;
use crate::utils::vec::SparseMatrix;
use crate::utils::virtual_polynomial::{eq_eval, VirtualPolynomial};
/// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) for all j values
/// in 0..self.t
pub fn compute_all_sum_Mz_evals<F: PrimeField>(
vec_M: &[SparseMatrix<F>],
z: &[F],
r: &[F],
s_prime: usize,
) -> Vec<F> {
// Convert z to MLE
let z_y_mle = dense_vec_to_mle(s_prime, z);
// Convert all matrices to MLE
let M_x_y_mle: Vec<DenseMultilinearExtension<F>> =
vec_M.iter().cloned().map(matrix_to_mle).collect();
let mut v = Vec::with_capacity(M_x_y_mle.len());
for M_i in M_x_y_mle {
let sum_Mz = compute_sum_Mz(M_i, &z_y_mle, s_prime);
let v_i = sum_Mz.evaluate(r).unwrap();
v.push(v_i);
}
v
}
/// Return the multilinear polynomial p(x) = \sum_{y \in {0,1}^s'} M_j(x, y) * z(y)
pub fn compute_sum_Mz<F: PrimeField>(
M_j: DenseMultilinearExtension<F>,
z: &DenseMultilinearExtension<F>,
s_prime: usize,
) -> DenseMultilinearExtension<F> {
let mut sum_Mz = DenseMultilinearExtension {
evaluations: vec![F::zero(); M_j.evaluations.len()],
num_vars: M_j.num_vars - s_prime,
};
let bhc = BooleanHypercube::new(s_prime);
for y in bhc.into_iter() {
// In a slightly counter-intuitive fashion fix_variables() fixes the right-most variables of the polynomial. So
// for a polynomial M(x,y) and a random field element r, if we do fix_variables(M,r) we will get M(x,r).
let M_j_y = fix_variables(&M_j, &y);
let z_y = z.evaluate(&y).unwrap();
let M_j_z = scalar_mul(&M_j_y, &z_y);
sum_Mz = sum_Mz.add(M_j_z);
}
sum_Mz
}
use crate::utils::mle::dense_vec_to_dense_mle;
use crate::utils::vec::mat_vec_mul;
use crate::utils::virtual_polynomial::{build_eq_x_r_vec, eq_eval, VirtualPolynomial};
use crate::Error;
/// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS
/// instances
pub fn compute_sigmas_and_thetas<F: PrimeField>(
pub fn compute_sigmas_thetas<F: PrimeField>(
ccs: &CCS<F>,
z_lcccs: &[Vec<F>],
z_cccs: &[Vec<F>],
r_x_prime: &[F],
) -> SigmasThetas<F> {
) -> Result<SigmasThetas<F>, Error> {
let mut sigmas: Vec<Vec<F>> = Vec::new();
for z_lcccs_i in z_lcccs {
// sigmas
let sigma_i = compute_all_sum_Mz_evals(&ccs.M, z_lcccs_i, r_x_prime, ccs.s_prime);
let mut Mzs: Vec<DenseMultilinearExtension<F>> = vec![];
for M_j in ccs.M.iter() {
Mzs.push(dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z_lcccs_i)?));
}
let sigma_i = Mzs
.iter()
.map(|Mz| Mz.evaluate(r_x_prime).ok_or(Error::EvaluationFail))
.collect::<Result<_, Error>>()?;
sigmas.push(sigma_i);
}
let mut thetas: Vec<Vec<F>> = Vec::new();
for z_cccs_i in z_cccs {
// thetas
let theta_i = compute_all_sum_Mz_evals(&ccs.M, z_cccs_i, r_x_prime, ccs.s_prime);
let mut Mzs: Vec<DenseMultilinearExtension<F>> = vec![];
for M_j in ccs.M.iter() {
Mzs.push(dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, z_cccs_i)?));
}
let theta_i = Mzs
.iter()
.map(|Mz| Mz.evaluate(r_x_prime).ok_or(Error::EvaluationFail))
.collect::<Result<_, Error>>()?;
thetas.push(theta_i);
}
SigmasThetas(sigmas, thetas)
Ok(SigmasThetas(sigmas, thetas))
}
/// computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS
@ -99,13 +63,13 @@ pub fn compute_c(
beta: &[F],
vec_r_x: &Vec<Vec<F>>,
r_x_prime: &[F],
) -> F {
) -> Result<F, Error> {
let (vec_sigmas, vec_thetas) = (st.0.clone(), st.1.clone());
let mut c = F::zero();
let mut e_lcccs = Vec::new();
for r_x in vec_r_x {
e_lcccs.push(eq_eval(r_x, r_x_prime).unwrap());
e_lcccs.push(eq_eval(r_x, r_x_prime)?);
}
for (i, sigmas) in vec_sigmas.iter().enumerate() {
// (sum gamma^j * e_i * sigma_j)
@ -116,7 +80,7 @@ pub fn compute_c(
}
let mu = vec_sigmas.len();
let e2 = eq_eval(beta, r_x_prime).unwrap();
let e2 = eq_eval(beta, r_x_prime)?;
for (k, thetas) in vec_thetas.iter().enumerate() {
// + gamma^{t+1} * e2 * sum c_i * prod theta_j
let mut lhs = F::zero();
@ -130,7 +94,7 @@ pub fn compute_c(
let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]);
c += gamma_t1 * e2 * lhs;
}
c
Ok(c)
}
/// Compute g(x) polynomial for the given inputs.
@ -141,75 +105,76 @@ pub fn compute_g(
z_cccs: &[Vec<C::ScalarField>],
gamma: C::ScalarField,
beta: &[C::ScalarField],
) -> VirtualPolynomial<C::ScalarField> {
let mu = running_instances.len();
let mut vec_Ls: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
for (i, running_instance) in running_instances.iter().enumerate() {
let mut Ls = running_instance.compute_Ls(ccs, &z_lcccs[i]);
vec_Ls.append(&mut Ls);
}
let mut vec_Q: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
for z_cccs_i in z_cccs.iter() {
let Q = ccs.compute_Q(z_cccs_i, beta);
vec_Q.push(Q);
}
let mut g = vec_Ls[0].clone();
// note: the following two loops can be integrated in the previous two loops, but left
// separated for clarity in the PoC implementation.
for (j, L_j) in vec_Ls.iter_mut().enumerate().skip(1) {
let gamma_j = gamma.pow([j as u64]);
L_j.scalar_mul(&gamma_j);
g = g.add(L_j);
) -> Result<VirtualPolynomial<C::ScalarField>, Error>
where
C::ScalarField: PrimeField,
{
assert_eq!(running_instances.len(), z_lcccs.len());
let mut g = VirtualPolynomial::<C::ScalarField>::new(ccs.s);
let mu = z_lcccs.len();
let nu = z_cccs.len();
let mut gamma_pow = C::ScalarField::one();
for i in 0..mu {
// L_j
let eq_rx = build_eq_x_r_vec(&running_instances[i].r_x)?;
let eq_rx_mle = dense_vec_to_dense_mle(ccs.s, &eq_rx);
for M_j in ccs.M.iter() {
let mut L_i_j = vec![dense_vec_to_dense_mle(
ccs.s,
&mat_vec_mul(M_j, &z_lcccs[i])?,
)];
L_i_j.push(eq_rx_mle.clone());
g.add_mle_list(L_i_j.iter().map(|v| Arc::new(v.clone())), gamma_pow)?;
gamma_pow *= gamma;
}
}
for (i, Q_i) in vec_Q.iter_mut().enumerate() {
let gamma_mut_i = gamma.pow([(mu * ccs.t + i) as u64]);
Q_i.scalar_mul(&gamma_mut_i);
g = g.add(Q_i);
let eq_beta = build_eq_x_r_vec(beta)?;
let eq_beta_mle = dense_vec_to_dense_mle(ccs.s, &eq_beta);
#[allow(clippy::needless_range_loop)]
for k in 0..nu {
// Q_k
for i in 0..ccs.q {
let mut Q_k = vec![];
for &j in ccs.S[i].iter() {
Q_k.push(dense_vec_to_dense_mle(
ccs.s,
&mat_vec_mul(&ccs.M[j], &z_cccs[k])?,
));
}
Q_k.push(eq_beta_mle.clone());
g.add_mle_list(
Q_k.iter().map(|v| Arc::new(v.clone())),
ccs.c[i] * gamma_pow,
)?;
}
gamma_pow *= gamma;
}
g
Ok(g)
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_ff::Field;
use ark_pallas::{Fr, Projective};
use ark_std::test_rng;
use ark_std::One;
use ark_std::UniformRand;
use ark_std::Zero;
use super::*;
use crate::ccs::tests::{get_test_ccs, get_test_z};
use crate::commitment::{pedersen::Pedersen, CommitmentScheme};
use crate::folding::hypernova::lcccs::tests::compute_Ls;
use crate::utils::hypercube::BooleanHypercube;
use crate::utils::mle::matrix_to_dense_mle;
use crate::utils::multilinear_polynomial::tests::fix_last_variables;
use crate::utils::virtual_polynomial::eq_eval;
#[test]
fn test_compute_sum_Mz_over_boolean_hypercube() {
let ccs = get_test_ccs::<Fr>();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
let z_mle = dense_vec_to_mle(ccs.s_prime, &z);
// check that evaluating over all the values x over the boolean hypercube, the result of
// the next for loop is equal to 0
for x in BooleanHypercube::new(ccs.s) {
let mut r = Fr::zero();
for i in 0..ccs.q {
let mut Sj_prod = Fr::one();
for j in ccs.S[i].clone() {
let M_j = matrix_to_mle(ccs.M[j].clone());
let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
let sum_Mz_x = sum_Mz.evaluate(&x).unwrap();
Sj_prod *= sum_Mz_x;
}
r += Sj_prod * ccs.c[i];
}
assert_eq!(r, Fr::zero());
}
}
/// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which
/// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements
/// of the column and the values eq_i(r) where i is the row of that element
@ -238,7 +203,7 @@ pub mod tests {
let ccs = get_test_ccs::<Fr>();
let M = ccs.M[0].clone().to_dense();
let M_mle = matrix_to_mle(ccs.M[0].clone());
let M_mle = matrix_to_dense_mle(ccs.M[0].clone());
// Fix the polynomial ~M(r,y)
let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
@ -259,7 +224,7 @@ pub mod tests {
}
#[test]
fn test_compute_sigmas_and_thetas() {
fn test_compute_sigmas_thetas() {
let ccs = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
@ -277,7 +242,7 @@ pub mod tests {
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime).unwrap();
let g = compute_g(
&ccs,
@ -286,7 +251,8 @@ pub mod tests {
&[z2.clone()],
gamma,
&beta,
);
)
.unwrap();
// we expect g(r_x_prime) to be equal to:
// c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j
@ -299,19 +265,22 @@ pub mod tests {
&beta,
&vec![lcccs_instance.r_x],
&r_x_prime,
);
)
.unwrap();
assert_eq!(c, expected_c);
}
#[test]
fn test_compute_g() {
let ccs = get_test_ccs();
let mut rng = test_rng();
// generate test CCS & z vectors
let ccs: CCS<Fr> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let mut rng = test_rng(); // TMP
let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
@ -320,12 +289,6 @@ pub mod tests {
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let mut sum_v_j_gamma = Fr::zero();
for j in 0..lcccs_instance.v.len() {
let gamma_j = gamma.pow([j as u64]);
sum_v_j_gamma += lcccs_instance.v[j] * gamma_j;
}
// Compute g(x) with that r_x
let g = compute_g::<Projective>(
&ccs,
@ -334,7 +297,8 @@ pub mod tests {
&[z2.clone()],
gamma,
&beta,
);
)
.unwrap();
// evaluate g(x) over x \in {0,1}^s
let mut g_on_bhc = Fr::zero();
@ -342,9 +306,22 @@ pub mod tests {
g_on_bhc += g.evaluate(&x).unwrap();
}
// Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q'
assert_ne!(g_on_bhc, Fr::zero());
let mut sum_v_j_gamma = Fr::zero();
for j in 0..lcccs_instance.v.len() {
let gamma_j = gamma.pow([j as u64]);
sum_v_j_gamma += lcccs_instance.v[j] * gamma_j;
}
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
// sum of gamma^j * v_j over j \in [t]
assert_eq!(g_on_bhc, sum_v_j_gamma);
// evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s
let mut sum_Lj_on_bhc = Fr::zero();
let vec_L = lcccs_instance.compute_Ls(&ccs, &z1);
let vec_L = compute_Ls(&ccs, &lcccs_instance, &z1);
for x in BooleanHypercube::new(ccs.s) {
for (j, Lj) in vec_L.iter().enumerate() {
let gamma_j = gamma.pow([j as u64]);
@ -352,15 +329,8 @@ pub mod tests {
}
}
// Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q'
assert_ne!(g_on_bhc, Fr::zero());
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
// sum of gamma^j * Lj(x) over the boolean hypercube
assert_eq!(g_on_bhc, sum_Lj_on_bhc);
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
// sum of gamma^j * v_j over j \in [t]
assert_eq!(g_on_bhc, sum_v_j_gamma);
}
}

+ 7
- 7
folding-schemes/src/folding/nova/nifs.rs

@ -7,7 +7,7 @@ use super::{CommittedInstance, Witness};
use crate::ccs::r1cs::R1CS;
use crate::commitment::CommitmentScheme;
use crate::transcript::Transcript;
use crate::utils::vec::*;
use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, vec_sub};
use crate::Error;
/// Implements the Non-Interactive Folding Scheme described in section 4 of
@ -32,12 +32,12 @@ where
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
// this is parallelizable (for the future)
let Az1 = mat_vec_mul_sparse(&A, z1)?;
let Bz1 = mat_vec_mul_sparse(&B, z1)?;
let Cz1 = mat_vec_mul_sparse(&C, z1)?;
let Az2 = mat_vec_mul_sparse(&A, z2)?;
let Bz2 = mat_vec_mul_sparse(&B, z2)?;
let Cz2 = mat_vec_mul_sparse(&C, z2)?;
let Az1 = mat_vec_mul(&A, z1)?;
let Bz1 = mat_vec_mul(&B, z1)?;
let Cz1 = mat_vec_mul(&C, z1)?;
let Az2 = mat_vec_mul(&A, z2)?;
let Bz2 = mat_vec_mul(&B, z2)?;
let Cz2 = mat_vec_mul(&C, z2)?;
let Az1_Bz2 = hadamard(&Az1, &Bz2)?;
let Az2_Bz1 = hadamard(&Az2, &Bz1)?;

+ 3
- 3
folding-schemes/src/folding/protogalaxy/folding.rs

@ -370,9 +370,9 @@ fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec
// f(w) in R1CS context. For the moment we use R1CS, in the future we will abstract this with a
// trait
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, w: &[F]) -> Result<Vec<F>, Error> {
let Az = mat_vec_mul_sparse(&r1cs.A, w)?;
let Bz = mat_vec_mul_sparse(&r1cs.B, w)?;
let Cz = mat_vec_mul_sparse(&r1cs.C, w)?;
let Az = mat_vec_mul(&r1cs.A, w)?;
let Bz = mat_vec_mul(&r1cs.B, w)?;
let Cz = mat_vec_mul(&r1cs.C, w)?;
let AzBz = hadamard(&Az, &Bz)?;
vec_sub(&AzBz, &Cz)
}

+ 2
- 0
folding-schemes/src/lib.rs

@ -68,6 +68,8 @@ pub enum Error {
NewDomainFail,
#[error("The number of folded steps must be greater than 1")]
NotEnoughSteps,
#[error("Evaluation failed")]
EvaluationFail,
// Commitment errors
#[error("Pedersen parameters length is not sufficient (generators.len={0} < vector.len={1} unsatisfied)")]

+ 5
- 2
folding-schemes/src/utils/espresso/virtual_polynomial.rs

@ -325,12 +325,15 @@ pub fn eq_eval(x: &[F], y: &[F]) -> Result {
/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
/// over r, which is
/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
fn build_eq_x_r<F: PrimeField>(r: &[F]) -> Result<Arc<DenseMultilinearExtension<F>>, ArithErrors> {
pub fn build_eq_x_r<F: PrimeField>(
r: &[F],
) -> Result<Arc<DenseMultilinearExtension<F>>, ArithErrors> {
let evals = build_eq_x_r_vec(r)?;
let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), evals);
Ok(Arc::new(mle))
}
/// This function build the eq(x, r) polynomial for any given r, and output the
/// evaluation of eq(x, r) in its vector form.
///
@ -338,7 +341,7 @@ fn build_eq_x_r(r: &[F]) -> Result
/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
/// over r, which is
/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
fn build_eq_x_r_vec<F: PrimeField>(r: &[F]) -> Result<Vec<F>, ArithErrors> {
pub fn build_eq_x_r_vec<F: PrimeField>(r: &[F]) -> Result<Vec<F>, ArithErrors> {
// we build eq(x,r) from its evaluations
// we want to evaluate eq(x,r) over x \in {0, 1}^num_vars
// for example, with num_vars = 4, x is a binary vector of 4, then

+ 48
- 8
folding-schemes/src/utils/mle.rs

@ -1,6 +1,6 @@
/// Some basic MLE utilities
use ark_ff::PrimeField;
use ark_poly::DenseMultilinearExtension;
use ark_poly::{DenseMultilinearExtension, SparseMultilinearExtension};
use ark_std::log2;
use super::vec::SparseMatrix;
@ -15,7 +15,7 @@ pub fn pad_matrix(m: &SparseMatrix) -> SparseMatrix {
/// Returns the dense multilinear extension from the given matrix, without modifying the original
/// matrix.
pub fn matrix_to_mle<F: PrimeField>(matrix: SparseMatrix<F>) -> DenseMultilinearExtension<F> {
pub fn matrix_to_dense_mle<F: PrimeField>(matrix: SparseMatrix<F>) -> DenseMultilinearExtension<F> {
let n_vars: usize = (log2(matrix.n_rows) + log2(matrix.n_cols)) as usize; // n_vars = s + s'
// Matrices might need to get padded before turned into an MLE
@ -30,11 +30,11 @@ pub fn matrix_to_mle(matrix: SparseMatrix) -> DenseMultilinear
}
// convert the dense vector into a mle
vec_to_mle(n_vars, &v)
vec_to_dense_mle(n_vars, &v)
}
/// Takes the n_vars and a dense vector and returns its dense MLE.
pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &[F]) -> DenseMultilinearExtension<F> {
pub fn vec_to_dense_mle<F: PrimeField>(n_vars: usize, v: &[F]) -> DenseMultilinearExtension<F> {
let v_padded: Vec<F> = if v.len() != (1 << n_vars) {
// pad to 2^n_vars
[
@ -50,7 +50,35 @@ pub fn vec_to_mle(n_vars: usize, v: &[F]) -> DenseMultilinearExte
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
}
pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &[F]) -> DenseMultilinearExtension<F> {
/// Returns the sparse multilinear extension from the given matrix, without modifying the original
/// matrix.
pub fn matrix_to_mle<F: PrimeField>(m: SparseMatrix<F>) -> SparseMultilinearExtension<F> {
let n_rows = m.n_rows.next_power_of_two();
let n_cols = m.n_cols.next_power_of_two();
let n_vars: usize = (log2(n_rows) + log2(n_cols)) as usize; // n_vars = s + s'
// build the sparse vec representing the sparse matrix
let mut v: Vec<(usize, F)> = Vec::new();
for (i, row) in m.coeffs.iter().enumerate() {
for (val, j) in row.iter() {
v.push((i * n_cols + j, *val));
}
}
// convert the dense vector into a mle
vec_to_mle(n_vars, &v)
}
/// Takes the n_vars and a sparse vector and returns its sparse MLE.
pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &[(usize, F)]) -> SparseMultilinearExtension<F> {
SparseMultilinearExtension::<F>::from_evaluations(n_vars, v)
}
/// Takes the n_vars and a dense vector and returns its dense MLE.
pub fn dense_vec_to_dense_mle<F: PrimeField>(
n_vars: usize,
v: &[F],
) -> DenseMultilinearExtension<F> {
// Pad to 2^n_vars
let v_padded: Vec<F> = [
v.to_owned(),
@ -62,6 +90,16 @@ pub fn dense_vec_to_mle(n_vars: usize, v: &[F]) -> DenseMultiline
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
}
/// Takes the n_vars and a dense vector and returns its sparse MLE.
pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &[F]) -> SparseMultilinearExtension<F> {
let v_sparse = v
.iter()
.enumerate()
.map(|(i, v_i)| (i, *v_i))
.collect::<Vec<(usize, F)>>();
SparseMultilinearExtension::<F>::from_evaluations(n_vars, &v_sparse)
}
#[cfg(test)]
mod tests {
use super::*;
@ -86,7 +124,8 @@ mod tests {
]);
let A_mle = matrix_to_mle(A);
assert_eq!(A_mle.evaluations.len(), 16); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals
assert_eq!(A_mle.evaluations.len(), 15); // 15 non-zero elements
assert_eq!(A_mle.num_vars, 4); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals
let A = to_F_matrix::<Fr>(vec![
vec![2, 3, 4, 4, 1],
@ -96,7 +135,8 @@ mod tests {
vec![420, 4, 2, 0, 5],
]);
let A_mle = matrix_to_mle(A.clone());
assert_eq!(A_mle.evaluations.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals
assert_eq!(A_mle.evaluations.len(), 23); // 23 non-zero elements
assert_eq!(A_mle.num_vars, 6); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals
// check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values
let bhc = BooleanHypercube::new(A_mle.num_vars);
@ -138,7 +178,7 @@ mod tests {
vec![420, 4, 2, 0],
]);
let A_mle = matrix_to_mle(A.clone());
let A_mle = matrix_to_dense_mle(A.clone());
let A = A.to_dense();
let bhc = BooleanHypercube::new(2);
for (i, y) in bhc.enumerate() {

+ 27
- 9
folding-schemes/src/utils/vec.rs

@ -4,6 +4,7 @@ use ark_poly::{
};
pub use ark_relations::r1cs::Matrix as R1CSMatrix;
use ark_std::cfg_iter;
use ark_std::rand::Rng;
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
use crate::Error;
@ -18,6 +19,23 @@ pub struct SparseMatrix {
}
impl<F: PrimeField> SparseMatrix<F> {
pub fn rand<R: Rng>(rng: &mut R, n_rows: usize, n_cols: usize) -> Self {
const ZERO_VAL_PROBABILITY: f64 = 0.8f64;
let dense = (0..n_rows)
.map(|_| {
(0..n_cols)
.map(|_| {
if !rng.gen_bool(ZERO_VAL_PROBABILITY) {
return F::rand(rng);
}
F::zero()
})
.collect::<Vec<F>>()
})
.collect::<Vec<Vec<F>>>();
dense_matrix_to_sparse(dense)
}
pub fn to_dense(&self) -> Vec<Vec<F>> {
let mut r: Vec<Vec<F>> = vec![vec![F::zero(); self.n_cols]; self.n_rows];
for (row_i, row) in self.coeffs.iter().enumerate() {
@ -79,7 +97,7 @@ pub fn is_zero_vec(vec: &[F]) -> bool {
vec.iter().all(|a| a.is_zero())
}
pub fn mat_vec_mul<F: PrimeField>(M: &[Vec<F>], z: &[F]) -> Result<Vec<F>, Error> {
pub fn mat_vec_mul_dense<F: PrimeField>(M: &[Vec<F>], z: &[F]) -> Result<Vec<F>, Error> {
if M.is_empty() {
return Err(Error::Empty);
}
@ -101,7 +119,7 @@ pub fn mat_vec_mul(M: &[Vec], z: &[F]) -> Result, Error
Ok(r)
}
pub fn mat_vec_mul_sparse<F: PrimeField>(M: &SparseMatrix<F>, z: &[F]) -> Result<Vec<F>, Error> {
pub fn mat_vec_mul<F: PrimeField>(M: &SparseMatrix<F>, z: &[F]) -> Result<Vec<F>, Error> {
if M.n_cols != z.len() {
return Err(Error::NotSameLength(
"M.n_cols".to_string(),
@ -191,9 +209,12 @@ pub mod tests {
])
.to_dense();
let z = to_F_vec(vec![1, 3, 35, 9, 27, 30]);
assert_eq!(mat_vec_mul(&A, &z).unwrap(), to_F_vec(vec![3, 9, 30, 35]));
assert_eq!(
mat_vec_mul_sparse(&dense_matrix_to_sparse(A), &z).unwrap(),
mat_vec_mul_dense(&A, &z).unwrap(),
to_F_vec(vec![3, 9, 30, 35])
);
assert_eq!(
mat_vec_mul(&dense_matrix_to_sparse(A), &z).unwrap(),
to_F_vec(vec![3, 9, 30, 35])
);
@ -201,13 +222,10 @@ pub mod tests {
let v = to_F_vec(vec![19, 55, 50, 3]);
assert_eq!(
mat_vec_mul(&A.to_dense(), &v).unwrap(),
to_F_vec(vec![418, 1158, 979])
);
assert_eq!(
mat_vec_mul_sparse(&A, &v).unwrap(),
mat_vec_mul_dense(&A.to_dense(), &v).unwrap(),
to_F_vec(vec![418, 1158, 979])
);
assert_eq!(mat_vec_mul(&A, &v).unwrap(), to_F_vec(vec![418, 1158, 979]));
}
#[test]

Loading…
Cancel
Save