* Port HyperNova's multifolding from https://github.com/privacy-scaling-explorations/multifolding-poc adapting and refactoring some of its methods and structs. Note: adapted mle.rs methods from dense to sparse repr. Co-authored-by: George Kadianakis <desnacked@riseup.net> * HyperNova: move CCS struct outside of LCCCS & CCCS HyperNova nimfs: move CCS structure outside of LCCCS & CCCS, to avoid carrying around the whole CCS and duplicating data when is not needed. Also add feature flags for the folding schemes. --------- Co-authored-by: George Kadianakis <desnacked@riseup.net>main
| @ -0,0 +1,226 @@ | |||
| use ark_ec::CurveGroup;
 | |||
| use ark_ff::PrimeField;
 | |||
| use ark_std::One;
 | |||
| use ark_std::Zero;
 | |||
| use std::ops::Add;
 | |||
| use std::sync::Arc;
 | |||
| 
 | |||
| use ark_std::{rand::Rng, UniformRand};
 | |||
| 
 | |||
| use super::utils::compute_sum_Mz;
 | |||
| use crate::ccs::CCS;
 | |||
| use crate::pedersen::{Params as PedersenParams, Pedersen};
 | |||
| use crate::utils::hypercube::BooleanHypercube;
 | |||
| use crate::utils::mle::matrix_to_mle;
 | |||
| use crate::utils::mle::vec_to_mle;
 | |||
| use crate::utils::virtual_polynomial::VirtualPolynomial;
 | |||
| use crate::Error;
 | |||
| 
 | |||
| /// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment.
 | |||
| #[derive(Debug, Clone)]
 | |||
| pub struct Witness<F: PrimeField> {
 | |||
|     pub w: Vec<F>,
 | |||
|     pub r_w: F, // randomness used in the Pedersen commitment of w
 | |||
| }
 | |||
| 
 | |||
| /// Committed CCS instance
 | |||
| #[derive(Debug, Clone)]
 | |||
| pub struct CCCS<C: CurveGroup> {
 | |||
|     // Commitment to witness
 | |||
|     pub C: C,
 | |||
|     // Public input/output
 | |||
|     pub x: Vec<C::ScalarField>,
 | |||
| }
 | |||
| 
 | |||
| impl<C: CurveGroup> CCS<C> {
 | |||
|     pub fn to_cccs<R: Rng>(
 | |||
|         &self,
 | |||
|         rng: &mut R,
 | |||
|         pedersen_params: &PedersenParams<C>,
 | |||
|         z: &[C::ScalarField],
 | |||
|     ) -> (CCCS<C>, Witness<C::ScalarField>) {
 | |||
|         let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
 | |||
|         let r_w = C::ScalarField::rand(rng);
 | |||
|         let C = Pedersen::<C>::commit(pedersen_params, &w, &r_w);
 | |||
| 
 | |||
|         (
 | |||
|             CCCS::<C> {
 | |||
|                 C,
 | |||
|                 x: z[1..(1 + self.l)].to_vec(),
 | |||
|             },
 | |||
|             Witness::<C::ScalarField> { w, r_w },
 | |||
|         )
 | |||
|     }
 | |||
| 
 | |||
|     /// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
 | |||
|     /// polynomial over x
 | |||
|     pub fn compute_q(&self, z: &Vec<C::ScalarField>) -> VirtualPolynomial<C::ScalarField> {
 | |||
|         let z_mle = vec_to_mle(self.s_prime, z);
 | |||
|         let mut q = VirtualPolynomial::<C::ScalarField>::new(self.s);
 | |||
| 
 | |||
|         for i in 0..self.q {
 | |||
|             let mut prod: VirtualPolynomial<C::ScalarField> =
 | |||
|                 VirtualPolynomial::<C::ScalarField>::new(self.s);
 | |||
|             for j in self.S[i].clone() {
 | |||
|                 let M_j = matrix_to_mle(self.M[j].clone());
 | |||
| 
 | |||
|                 let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.s_prime);
 | |||
| 
 | |||
|                 // Fold this sum into the running product
 | |||
|                 if prod.products.is_empty() {
 | |||
|                     // If this is the first time we are adding something to this virtual polynomial, we need to
 | |||
|                     // explicitly add the products using add_mle_list()
 | |||
|                     // XXX is this true? improve API
 | |||
|                     prod.add_mle_list([Arc::new(sum_Mz)], C::ScalarField::one())
 | |||
|                         .unwrap();
 | |||
|                 } else {
 | |||
|                     prod.mul_by_mle(Arc::new(sum_Mz), C::ScalarField::one())
 | |||
|                         .unwrap();
 | |||
|                 }
 | |||
|             }
 | |||
|             // Multiply by the product by the coefficient c_i
 | |||
|             prod.scalar_mul(&self.c[i]);
 | |||
|             // Add it to the running sum
 | |||
|             q = q.add(&prod);
 | |||
|         }
 | |||
|         q
 | |||
|     }
 | |||
| 
 | |||
|     /// Computes Q(x) = eq(beta, x) * q(x)
 | |||
|     ///               = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
 | |||
|     /// polynomial over x
 | |||
|     pub fn compute_Q(
 | |||
|         &self,
 | |||
|         z: &Vec<C::ScalarField>,
 | |||
|         beta: &[C::ScalarField],
 | |||
|     ) -> VirtualPolynomial<C::ScalarField> {
 | |||
|         let q = self.compute_q(z);
 | |||
|         q.build_f_hat(beta).unwrap()
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| impl<C: CurveGroup> CCCS<C> {
 | |||
|     /// Perform the check of the CCCS instance described at section 4.1
 | |||
|     pub fn check_relation(
 | |||
|         &self,
 | |||
|         pedersen_params: &PedersenParams<C>,
 | |||
|         ccs: &CCS<C>,
 | |||
|         w: &Witness<C::ScalarField>,
 | |||
|     ) -> Result<(), Error> {
 | |||
|         // check that C is the commitment of w. Notice that this is not verifying a Pedersen
 | |||
|         // opening, but checking that the Commmitment comes from committing to the witness.
 | |||
|         assert_eq!(self.C, Pedersen::commit(pedersen_params, &w.w, &w.r_w));
 | |||
| 
 | |||
|         // check CCCS relation
 | |||
|         let z: Vec<C::ScalarField> =
 | |||
|             [vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat();
 | |||
| 
 | |||
|         // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube
 | |||
|         let q_x = ccs.compute_q(&z);
 | |||
|         for x in BooleanHypercube::new(ccs.s) {
 | |||
|             if !q_x.evaluate(&x).unwrap().is_zero() {
 | |||
|                 return Err(Error::NotSatisfied);
 | |||
|             }
 | |||
|         }
 | |||
| 
 | |||
|         Ok(())
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| #[cfg(test)]
 | |||
| pub mod tests {
 | |||
|     use super::*;
 | |||
|     use crate::ccs::tests::{get_test_ccs, get_test_z};
 | |||
|     use ark_std::test_rng;
 | |||
|     use ark_std::UniformRand;
 | |||
| 
 | |||
|     use ark_pallas::{Fr, Projective};
 | |||
| 
 | |||
|     /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the
 | |||
|     /// hypercube, but to not-zero outside the hypercube.
 | |||
|     #[test]
 | |||
|     fn test_compute_q() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
|         let z = get_test_z(3);
 | |||
| 
 | |||
|         let q = ccs.compute_q(&z);
 | |||
| 
 | |||
|         // Evaluate inside the hypercube
 | |||
|         for x in BooleanHypercube::new(ccs.s) {
 | |||
|             assert_eq!(Fr::zero(), q.evaluate(&x).unwrap());
 | |||
|         }
 | |||
| 
 | |||
|         // Evaluate outside the hypercube
 | |||
|         let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
|         assert_ne!(Fr::zero(), q.evaluate(&beta).unwrap());
 | |||
|     }
 | |||
| 
 | |||
|     /// Perform some sanity checks on Q(x).
 | |||
|     #[test]
 | |||
|     fn test_compute_Q() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         let ccs: CCS<Projective> = get_test_ccs();
 | |||
|         let z = get_test_z(3);
 | |||
|         ccs.check_relation(&z).unwrap();
 | |||
| 
 | |||
|         let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
| 
 | |||
|         // Compute Q(x) = eq(beta, x) * q(x).
 | |||
|         let Q = ccs.compute_Q(&z, &beta);
 | |||
| 
 | |||
|         // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y)
 | |||
|         // which interpolates the multivariate polynomial q(x) inside the hypercube.
 | |||
|         //
 | |||
|         // Observe that summing Q(x) inside the hypercube, directly computes G(\beta).
 | |||
|         //
 | |||
|         // Now, G(x) is multilinear and agrees with q(x) inside the hypercube. Since q(x) vanishes inside the
 | |||
|         // hypercube, this means that G(x) also vanishes in the hypercube. Since G(x) is multilinear and vanishes
 | |||
|         // inside the hypercube, this makes it the zero polynomial.
 | |||
|         //
 | |||
|         // Hence, evaluating G(x) at a random beta should give zero.
 | |||
| 
 | |||
|         // Now sum Q(x) evaluations in the hypercube and expect it to be 0
 | |||
|         let r = BooleanHypercube::new(ccs.s)
 | |||
|             .map(|x| Q.evaluate(&x).unwrap())
 | |||
|             .fold(Fr::zero(), |acc, result| acc + result);
 | |||
|         assert_eq!(r, Fr::zero());
 | |||
|     }
 | |||
| 
 | |||
|     /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube.
 | |||
|     /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point.
 | |||
|     /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside
 | |||
|     #[test]
 | |||
|     fn test_Q_against_q() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         let ccs: CCS<Projective> = get_test_ccs();
 | |||
|         let z = get_test_z(3);
 | |||
|         ccs.check_relation(&z).unwrap();
 | |||
| 
 | |||
|         // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which
 | |||
|         // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube
 | |||
|         let q = ccs.compute_q(&z);
 | |||
|         for d in BooleanHypercube::new(ccs.s) {
 | |||
|             let Q_at_d = ccs.compute_Q(&z, &d);
 | |||
| 
 | |||
|             // Get G(d) by summing over Q_d(x) over the hypercube
 | |||
|             let G_at_d = BooleanHypercube::new(ccs.s)
 | |||
|                 .map(|x| Q_at_d.evaluate(&x).unwrap())
 | |||
|                 .fold(Fr::zero(), |acc, result| acc + result);
 | |||
|             assert_eq!(G_at_d, q.evaluate(&d).unwrap());
 | |||
|         }
 | |||
| 
 | |||
|         // Now test that they should disagree outside of the hypercube
 | |||
|         let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
|         let Q_at_r = ccs.compute_Q(&z, &r);
 | |||
| 
 | |||
|         // Get G(d) by summing over Q_d(x) over the hypercube
 | |||
|         let G_at_r = BooleanHypercube::new(ccs.s)
 | |||
|             .map(|x| Q_at_r.evaluate(&x).unwrap())
 | |||
|             .fold(Fr::zero(), |acc, result| acc + result);
 | |||
|         assert_ne!(G_at_r, q.evaluate(&r).unwrap());
 | |||
|     }
 | |||
| }
 | |||
| @ -0,0 +1,181 @@ | |||
| use ark_ec::CurveGroup;
 | |||
| use ark_poly::DenseMultilinearExtension;
 | |||
| use ark_std::One;
 | |||
| use std::sync::Arc;
 | |||
| 
 | |||
| use ark_std::{rand::Rng, UniformRand};
 | |||
| 
 | |||
| use super::cccs::Witness;
 | |||
| use super::utils::{compute_all_sum_Mz_evals, compute_sum_Mz};
 | |||
| use crate::ccs::CCS;
 | |||
| use crate::pedersen::{Params as PedersenParams, Pedersen};
 | |||
| use crate::utils::mle::{matrix_to_mle, vec_to_mle};
 | |||
| use crate::utils::virtual_polynomial::VirtualPolynomial;
 | |||
| use crate::Error;
 | |||
| 
 | |||
| /// Linearized Committed CCS instance
 | |||
| #[derive(Debug, Clone, Eq, PartialEq)]
 | |||
| pub struct LCCCS<C: CurveGroup> {
 | |||
|     // Commitment to witness
 | |||
|     pub C: C,
 | |||
|     // Relaxation factor of z for folded LCCCS
 | |||
|     pub u: C::ScalarField,
 | |||
|     // Public input/output
 | |||
|     pub x: Vec<C::ScalarField>,
 | |||
|     // Random evaluation point for the v_i
 | |||
|     pub r_x: Vec<C::ScalarField>,
 | |||
|     // Vector of v_i
 | |||
|     pub v: Vec<C::ScalarField>,
 | |||
| }
 | |||
| 
 | |||
| impl<C: CurveGroup> CCS<C> {
 | |||
|     /// Compute v_j values of the linearized committed CCS form
 | |||
|     /// Given `r`, compute:  \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
 | |||
|     fn compute_v_j(&self, z: &[C::ScalarField], r: &[C::ScalarField]) -> Vec<C::ScalarField> {
 | |||
|         compute_all_sum_Mz_evals(&self.M, &z.to_vec(), r, self.s_prime)
 | |||
|     }
 | |||
| 
 | |||
|     pub fn to_lcccs<R: Rng>(
 | |||
|         &self,
 | |||
|         rng: &mut R,
 | |||
|         pedersen_params: &PedersenParams<C>,
 | |||
|         z: &[C::ScalarField],
 | |||
|     ) -> (LCCCS<C>, Witness<C::ScalarField>) {
 | |||
|         let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
 | |||
|         let r_w = C::ScalarField::rand(rng);
 | |||
|         let C = Pedersen::commit(pedersen_params, &w, &r_w);
 | |||
| 
 | |||
|         let r_x: Vec<C::ScalarField> = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect();
 | |||
|         let v = self.compute_v_j(z, &r_x);
 | |||
| 
 | |||
|         (
 | |||
|             LCCCS::<C> {
 | |||
|                 C,
 | |||
|                 u: C::ScalarField::one(),
 | |||
|                 x: z[1..(1 + self.l)].to_vec(),
 | |||
|                 r_x,
 | |||
|                 v,
 | |||
|             },
 | |||
|             Witness::<C::ScalarField> { w, r_w },
 | |||
|         )
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| impl<C: CurveGroup> LCCCS<C> {
 | |||
|     /// Compute all L_j(x) polynomials
 | |||
|     pub fn compute_Ls(
 | |||
|         &self,
 | |||
|         ccs: &CCS<C>,
 | |||
|         z: &Vec<C::ScalarField>,
 | |||
|     ) -> Vec<VirtualPolynomial<C::ScalarField>> {
 | |||
|         let z_mle = vec_to_mle(ccs.s_prime, z);
 | |||
|         // Convert all matrices to MLE
 | |||
|         let M_x_y_mle: Vec<DenseMultilinearExtension<C::ScalarField>> =
 | |||
|             ccs.M.clone().into_iter().map(matrix_to_mle).collect();
 | |||
| 
 | |||
|         let mut vec_L_j_x = Vec::with_capacity(ccs.t);
 | |||
|         for M_j in M_x_y_mle {
 | |||
|             let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
 | |||
|             let sum_Mz_virtual =
 | |||
|                 VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), C::ScalarField::one());
 | |||
|             let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap();
 | |||
|             vec_L_j_x.push(L_j_x);
 | |||
|         }
 | |||
| 
 | |||
|         vec_L_j_x
 | |||
|     }
 | |||
| 
 | |||
|     /// Perform the check of the LCCCS instance described at section 4.2
 | |||
|     pub fn check_relation(
 | |||
|         &self,
 | |||
|         pedersen_params: &PedersenParams<C>,
 | |||
|         ccs: &CCS<C>,
 | |||
|         w: &Witness<C::ScalarField>,
 | |||
|     ) -> Result<(), Error> {
 | |||
|         // check that C is the commitment of w. Notice that this is not verifying a Pedersen
 | |||
|         // opening, but checking that the Commmitment comes from committing to the witness.
 | |||
|         assert_eq!(self.C, Pedersen::commit(pedersen_params, &w.w, &w.r_w));
 | |||
| 
 | |||
|         // check CCS relation
 | |||
|         let z: Vec<C::ScalarField> = [vec![self.u], self.x.clone(), w.w.to_vec()].concat();
 | |||
|         let computed_v = compute_all_sum_Mz_evals(&ccs.M, &z, &self.r_x, ccs.s_prime);
 | |||
|         assert_eq!(computed_v, self.v);
 | |||
|         Ok(())
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| #[cfg(test)]
 | |||
| pub mod tests {
 | |||
|     use super::*;
 | |||
|     use ark_std::Zero;
 | |||
| 
 | |||
|     use crate::ccs::tests::{get_test_ccs, get_test_z};
 | |||
|     use crate::utils::hypercube::BooleanHypercube;
 | |||
|     use ark_std::test_rng;
 | |||
| 
 | |||
|     use ark_pallas::{Fr, Projective};
 | |||
| 
 | |||
|     #[test]
 | |||
|     /// Test linearized CCCS v_j against the L_j(x)
 | |||
|     fn test_lcccs_v_j() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         let ccs = get_test_ccs();
 | |||
|         let z = get_test_z(3);
 | |||
|         ccs.check_relation(&z.clone()).unwrap();
 | |||
| 
 | |||
|         let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
|         let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z);
 | |||
|         // with our test vector comming from R1CS, v should have length 3
 | |||
|         assert_eq!(lcccs.v.len(), 3);
 | |||
| 
 | |||
|         let vec_L_j_x = lcccs.compute_Ls(&ccs, &z);
 | |||
|         assert_eq!(vec_L_j_x.len(), lcccs.v.len());
 | |||
| 
 | |||
|         for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
 | |||
|             let sum_L_j_x = BooleanHypercube::new(ccs.s)
 | |||
|                 .map(|y| L_j_x.evaluate(&y).unwrap())
 | |||
|                 .fold(Fr::zero(), |acc, result| acc + result);
 | |||
|             assert_eq!(v_i, sum_L_j_x);
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     /// Given a bad z, check that the v_j should not match with the L_j(x)
 | |||
|     #[test]
 | |||
|     fn test_bad_v_j() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         let ccs = get_test_ccs();
 | |||
|         let z = get_test_z(3);
 | |||
|         ccs.check_relation(&z.clone()).unwrap();
 | |||
| 
 | |||
|         // Mutate z so that the relation does not hold
 | |||
|         let mut bad_z = z.clone();
 | |||
|         bad_z[3] = Fr::zero();
 | |||
|         assert!(ccs.check_relation(&bad_z.clone()).is_err());
 | |||
| 
 | |||
|         let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
|         // Compute v_j with the right z
 | |||
|         let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z);
 | |||
|         // with our test vector comming from R1CS, v should have length 3
 | |||
|         assert_eq!(lcccs.v.len(), 3);
 | |||
| 
 | |||
|         // Bad compute L_j(x) with the bad z
 | |||
|         let vec_L_j_x = lcccs.compute_Ls(&ccs, &bad_z);
 | |||
|         assert_eq!(vec_L_j_x.len(), lcccs.v.len());
 | |||
| 
 | |||
|         // Make sure that the LCCCS is not satisfied given these L_j(x)
 | |||
|         // i.e. summing L_j(x) over the hypercube should not give v_j for all j
 | |||
|         let mut satisfied = true;
 | |||
|         for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
 | |||
|             let sum_L_j_x = BooleanHypercube::new(ccs.s)
 | |||
|                 .map(|y| L_j_x.evaluate(&y).unwrap())
 | |||
|                 .fold(Fr::zero(), |acc, result| acc + result);
 | |||
|             if v_i != sum_L_j_x {
 | |||
|                 satisfied = false;
 | |||
|             }
 | |||
|         }
 | |||
| 
 | |||
|         assert!(!satisfied);
 | |||
|     }
 | |||
| }
 | |||
| @ -0,0 +1,5 @@ | |||
| /// Implements the scheme described in [HyperNova](https://eprint.iacr.org/2023/573.pdf)
 | |||
| pub mod cccs;
 | |||
| pub mod lcccs;
 | |||
| pub mod nimfs;
 | |||
| pub mod utils;
 | |||
| @ -0,0 +1,657 @@ | |||
| use ark_ec::CurveGroup;
 | |||
| use ark_ff::{Field, PrimeField};
 | |||
| use ark_std::{One, Zero};
 | |||
| 
 | |||
| use espresso_subroutines::PolyIOP;
 | |||
| use espresso_transcript::IOPTranscript;
 | |||
| 
 | |||
| use super::cccs::{Witness, CCCS};
 | |||
| use super::lcccs::LCCCS;
 | |||
| use super::utils::{compute_c_from_sigmas_and_thetas, compute_g, compute_sigmas_and_thetas};
 | |||
| use crate::ccs::CCS;
 | |||
| use crate::utils::hypercube::BooleanHypercube;
 | |||
| use crate::utils::sum_check::structs::IOPProof as SumCheckProof;
 | |||
| use crate::utils::sum_check::{verifier::interpolate_uni_poly, SumCheck};
 | |||
| use crate::utils::virtual_polynomial::VPAuxInfo;
 | |||
| 
 | |||
| use std::marker::PhantomData;
 | |||
| 
 | |||
| /// Proof defines a multifolding proof
 | |||
| #[derive(Debug)]
 | |||
| pub struct Proof<C: CurveGroup> {
 | |||
|     pub sc_proof: SumCheckProof<C::ScalarField>,
 | |||
|     pub sigmas_thetas: SigmasThetas<C::ScalarField>,
 | |||
| }
 | |||
| 
 | |||
| #[derive(Debug)]
 | |||
| pub struct SigmasThetas<F: PrimeField>(pub Vec<Vec<F>>, pub Vec<Vec<F>>);
 | |||
| 
 | |||
| #[derive(Debug)]
 | |||
| /// Implements the Non-Interactive Multi Folding Scheme described in section 5 of
 | |||
| /// [HyperNova](https://eprint.iacr.org/2023/573.pdf)
 | |||
| pub struct NIMFS<C: CurveGroup> {
 | |||
|     pub _c: PhantomData<C>,
 | |||
| }
 | |||
| 
 | |||
| impl<C: CurveGroup> NIMFS<C> {
 | |||
|     pub fn fold(
 | |||
|         lcccs: &[LCCCS<C>],
 | |||
|         cccs: &[CCCS<C>],
 | |||
|         sigmas_thetas: &SigmasThetas<C::ScalarField>,
 | |||
|         r_x_prime: Vec<C::ScalarField>,
 | |||
|         rho: C::ScalarField,
 | |||
|     ) -> LCCCS<C> {
 | |||
|         let (sigmas, thetas) = (sigmas_thetas.0.clone(), sigmas_thetas.1.clone());
 | |||
|         let mut C_folded = C::zero();
 | |||
|         let mut u_folded = C::ScalarField::zero();
 | |||
|         let mut x_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); lcccs[0].x.len()];
 | |||
|         let mut v_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); sigmas[0].len()];
 | |||
| 
 | |||
|         for i in 0..(lcccs.len() + cccs.len()) {
 | |||
|             let rho_i = rho.pow([i as u64]);
 | |||
| 
 | |||
|             let c: C;
 | |||
|             let u: C::ScalarField;
 | |||
|             let x: Vec<C::ScalarField>;
 | |||
|             let v: Vec<C::ScalarField>;
 | |||
|             if i < lcccs.len() {
 | |||
|                 c = lcccs[i].C;
 | |||
|                 u = lcccs[i].u;
 | |||
|                 x = lcccs[i].x.clone();
 | |||
|                 v = sigmas[i].clone();
 | |||
|             } else {
 | |||
|                 c = cccs[i - lcccs.len()].C;
 | |||
|                 u = C::ScalarField::one();
 | |||
|                 x = cccs[i - lcccs.len()].x.clone();
 | |||
|                 v = thetas[i - lcccs.len()].clone();
 | |||
|             }
 | |||
| 
 | |||
|             C_folded += c.mul(rho_i);
 | |||
|             u_folded += rho_i * u;
 | |||
|             x_folded = x_folded
 | |||
|                 .iter()
 | |||
|                 .zip(
 | |||
|                     x.iter()
 | |||
|                         .map(|x_i| *x_i * rho_i)
 | |||
|                         .collect::<Vec<C::ScalarField>>(),
 | |||
|                 )
 | |||
|                 .map(|(a_i, b_i)| *a_i + b_i)
 | |||
|                 .collect();
 | |||
| 
 | |||
|             v_folded = v_folded
 | |||
|                 .iter()
 | |||
|                 .zip(
 | |||
|                     v.iter()
 | |||
|                         .map(|x_i| *x_i * rho_i)
 | |||
|                         .collect::<Vec<C::ScalarField>>(),
 | |||
|                 )
 | |||
|                 .map(|(a_i, b_i)| *a_i + b_i)
 | |||
|                 .collect();
 | |||
|         }
 | |||
| 
 | |||
|         LCCCS::<C> {
 | |||
|             C: C_folded,
 | |||
|             u: u_folded,
 | |||
|             x: x_folded,
 | |||
|             r_x: r_x_prime,
 | |||
|             v: v_folded,
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     pub fn fold_witness(
 | |||
|         w_lcccs: &[Witness<C::ScalarField>],
 | |||
|         w_cccs: &[Witness<C::ScalarField>],
 | |||
|         rho: C::ScalarField,
 | |||
|     ) -> Witness<C::ScalarField> {
 | |||
|         let mut w_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w_lcccs[0].w.len()];
 | |||
|         let mut r_w_folded = C::ScalarField::zero();
 | |||
| 
 | |||
|         for i in 0..(w_lcccs.len() + w_cccs.len()) {
 | |||
|             let rho_i = rho.pow([i as u64]);
 | |||
|             let w: Vec<C::ScalarField>;
 | |||
|             let r_w: C::ScalarField;
 | |||
| 
 | |||
|             if i < w_lcccs.len() {
 | |||
|                 w = w_lcccs[i].w.clone();
 | |||
|                 r_w = w_lcccs[i].r_w;
 | |||
|             } else {
 | |||
|                 w = w_cccs[i - w_lcccs.len()].w.clone();
 | |||
|                 r_w = w_cccs[i - w_lcccs.len()].r_w;
 | |||
|             }
 | |||
| 
 | |||
|             w_folded = w_folded
 | |||
|                 .iter()
 | |||
|                 .zip(
 | |||
|                     w.iter()
 | |||
|                         .map(|x_i| *x_i * rho_i)
 | |||
|                         .collect::<Vec<C::ScalarField>>(),
 | |||
|                 )
 | |||
|                 .map(|(a_i, b_i)| *a_i + b_i)
 | |||
|                 .collect();
 | |||
| 
 | |||
|             r_w_folded += rho_i * r_w;
 | |||
|         }
 | |||
|         Witness {
 | |||
|             w: w_folded,
 | |||
|             r_w: r_w_folded,
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     /// Performs the multifolding prover. Given μ LCCCS instances and ν CCS instances, fold them
 | |||
|     /// into a single LCCCS instance. Since this is the prover, also fold their witness.
 | |||
|     /// Returns the final folded LCCCS, the folded witness, the sumcheck proof, and the helper
 | |||
|     /// sumcheck claim sigmas and thetas.
 | |||
|     pub fn prove(
 | |||
|         transcript: &mut IOPTranscript<C::ScalarField>,
 | |||
|         ccs: &CCS<C>,
 | |||
|         running_instances: &[LCCCS<C>],
 | |||
|         new_instances: &[CCCS<C>],
 | |||
|         w_lcccs: &[Witness<C::ScalarField>],
 | |||
|         w_cccs: &[Witness<C::ScalarField>],
 | |||
|     ) -> (Proof<C>, LCCCS<C>, Witness<C::ScalarField>) {
 | |||
|         // TODO appends to transcript
 | |||
| 
 | |||
|         assert!(!running_instances.is_empty());
 | |||
|         assert!(!new_instances.is_empty());
 | |||
| 
 | |||
|         // construct the LCCCS z vector from the relaxation factor, public IO and witness
 | |||
|         // XXX this deserves its own function in LCCCS
 | |||
|         let mut z_lcccs = Vec::new();
 | |||
|         for (i, running_instance) in running_instances.iter().enumerate() {
 | |||
|             let z_1: Vec<C::ScalarField> = [
 | |||
|                 vec![running_instance.u],
 | |||
|                 running_instance.x.clone(),
 | |||
|                 w_lcccs[i].w.to_vec(),
 | |||
|             ]
 | |||
|             .concat();
 | |||
|             z_lcccs.push(z_1);
 | |||
|         }
 | |||
|         // construct the CCCS z vector from the public IO and witness
 | |||
|         let mut z_cccs = Vec::new();
 | |||
|         for (i, new_instance) in new_instances.iter().enumerate() {
 | |||
|             let z_2: Vec<C::ScalarField> = [
 | |||
|                 vec![C::ScalarField::one()],
 | |||
|                 new_instance.x.clone(),
 | |||
|                 w_cccs[i].w.to_vec(),
 | |||
|             ]
 | |||
|             .concat();
 | |||
|             z_cccs.push(z_2);
 | |||
|         }
 | |||
| 
 | |||
|         // Step 1: Get some challenges
 | |||
|         let gamma: C::ScalarField = transcript.get_and_append_challenge(b"gamma").unwrap();
 | |||
|         let beta: Vec<C::ScalarField> = transcript
 | |||
|             .get_and_append_challenge_vectors(b"beta", ccs.s)
 | |||
|             .unwrap();
 | |||
| 
 | |||
|         // Compute g(x)
 | |||
|         let g = compute_g(ccs, running_instances, &z_lcccs, &z_cccs, gamma, &beta);
 | |||
| 
 | |||
|         // Step 3: Run the sumcheck prover
 | |||
|         let sumcheck_proof =
 | |||
|             <PolyIOP<C::ScalarField> as SumCheck<C::ScalarField>>::prove(&g, transcript).unwrap(); // XXX unwrap
 | |||
| 
 | |||
|         // Note: The following two "sanity checks" are done for this prototype, in a final version
 | |||
|         // they should be removed.
 | |||
|         //
 | |||
|         // Sanity check 1: evaluate g(x) over x \in {0,1} (the boolean hypercube), and check that
 | |||
|         // its sum is equal to the extracted_sum from the SumCheck.
 | |||
|         //////////////////////////////////////////////////////////////////////
 | |||
|         let mut g_over_bhc = C::ScalarField::zero();
 | |||
|         for x in BooleanHypercube::new(ccs.s) {
 | |||
|             g_over_bhc += g.evaluate(&x).unwrap();
 | |||
|         }
 | |||
| 
 | |||
|         // note: this is the sum of g(x) over the whole boolean hypercube
 | |||
|         let extracted_sum =
 | |||
|             <PolyIOP<C::ScalarField> as SumCheck<C::ScalarField>>::extract_sum(&sumcheck_proof);
 | |||
|         assert_eq!(extracted_sum, g_over_bhc);
 | |||
|         // Sanity check 2: expect \sum v_j * gamma^j to be equal to the sum of g(x) over the
 | |||
|         // boolean hypercube (and also equal to the extracted_sum from the SumCheck).
 | |||
|         let mut sum_v_j_gamma = C::ScalarField::zero();
 | |||
|         for (i, running_instance) in running_instances.iter().enumerate() {
 | |||
|             for j in 0..running_instance.v.len() {
 | |||
|                 let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
 | |||
|                 sum_v_j_gamma += running_instance.v[j] * gamma_j;
 | |||
|             }
 | |||
|         }
 | |||
|         assert_eq!(g_over_bhc, sum_v_j_gamma);
 | |||
|         assert_eq!(extracted_sum, sum_v_j_gamma);
 | |||
|         //////////////////////////////////////////////////////////////////////
 | |||
| 
 | |||
|         // Step 2: dig into the sumcheck and extract r_x_prime
 | |||
|         let r_x_prime = sumcheck_proof.point.clone();
 | |||
| 
 | |||
|         // Step 4: compute sigmas and thetas
 | |||
|         let sigmas_thetas = compute_sigmas_and_thetas(ccs, &z_lcccs, &z_cccs, &r_x_prime);
 | |||
| 
 | |||
|         // Step 6: Get the folding challenge
 | |||
|         let rho: C::ScalarField = transcript.get_and_append_challenge(b"rho").unwrap();
 | |||
| 
 | |||
|         // Step 7: Create the folded instance
 | |||
|         let folded_lcccs = Self::fold(
 | |||
|             running_instances,
 | |||
|             new_instances,
 | |||
|             &sigmas_thetas,
 | |||
|             r_x_prime,
 | |||
|             rho,
 | |||
|         );
 | |||
| 
 | |||
|         // Step 8: Fold the witnesses
 | |||
|         let folded_witness = Self::fold_witness(w_lcccs, w_cccs, rho);
 | |||
| 
 | |||
|         (
 | |||
|             Proof::<C> {
 | |||
|                 sc_proof: sumcheck_proof,
 | |||
|                 sigmas_thetas,
 | |||
|             },
 | |||
|             folded_lcccs,
 | |||
|             folded_witness,
 | |||
|         )
 | |||
|     }
 | |||
| 
 | |||
|     /// Performs the multifolding verifier. Given μ LCCCS instances and ν CCS instances, fold them
 | |||
|     /// into a single LCCCS instance.
 | |||
|     /// Returns the folded LCCCS instance.
 | |||
|     pub fn verify(
 | |||
|         transcript: &mut IOPTranscript<C::ScalarField>,
 | |||
|         ccs: &CCS<C>,
 | |||
|         running_instances: &[LCCCS<C>],
 | |||
|         new_instances: &[CCCS<C>],
 | |||
|         proof: Proof<C>,
 | |||
|     ) -> LCCCS<C> {
 | |||
|         // TODO appends to transcript
 | |||
| 
 | |||
|         assert!(!running_instances.is_empty());
 | |||
|         assert!(!new_instances.is_empty());
 | |||
| 
 | |||
|         // Step 1: Get some challenges
 | |||
|         let gamma: C::ScalarField = transcript.get_and_append_challenge(b"gamma").unwrap();
 | |||
|         let beta: Vec<C::ScalarField> = transcript
 | |||
|             .get_and_append_challenge_vectors(b"beta", ccs.s)
 | |||
|             .unwrap();
 | |||
| 
 | |||
|         let vp_aux_info = VPAuxInfo::<C::ScalarField> {
 | |||
|             max_degree: ccs.d + 1,
 | |||
|             num_variables: ccs.s,
 | |||
|             phantom: PhantomData::<C::ScalarField>,
 | |||
|         };
 | |||
| 
 | |||
|         // Step 3: Start verifying the sumcheck
 | |||
|         // First, compute the expected sumcheck sum: \sum gamma^j v_j
 | |||
|         let mut sum_v_j_gamma = C::ScalarField::zero();
 | |||
|         for (i, running_instance) in running_instances.iter().enumerate() {
 | |||
|             for j in 0..running_instance.v.len() {
 | |||
|                 let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
 | |||
|                 sum_v_j_gamma += running_instance.v[j] * gamma_j;
 | |||
|             }
 | |||
|         }
 | |||
| 
 | |||
|         // Verify the interactive part of the sumcheck
 | |||
|         let sumcheck_subclaim = <PolyIOP<C::ScalarField> as SumCheck<C::ScalarField>>::verify(
 | |||
|             sum_v_j_gamma,
 | |||
|             &proof.sc_proof,
 | |||
|             &vp_aux_info,
 | |||
|             transcript,
 | |||
|         )
 | |||
|         .unwrap();
 | |||
| 
 | |||
|         // Step 2: Dig into the sumcheck claim and extract the randomness used
 | |||
|         let r_x_prime = sumcheck_subclaim.point.clone();
 | |||
| 
 | |||
|         // Step 5: Finish verifying sumcheck (verify the claim c)
 | |||
|         let c = compute_c_from_sigmas_and_thetas(
 | |||
|             ccs,
 | |||
|             &proof.sigmas_thetas,
 | |||
|             gamma,
 | |||
|             &beta,
 | |||
|             &running_instances
 | |||
|                 .iter()
 | |||
|                 .map(|lcccs| lcccs.r_x.clone())
 | |||
|                 .collect(),
 | |||
|             &r_x_prime,
 | |||
|         );
 | |||
|         // check that the g(r_x') from the sumcheck proof is equal to the computed c from sigmas&thetas
 | |||
|         assert_eq!(c, sumcheck_subclaim.expected_evaluation);
 | |||
| 
 | |||
|         // Sanity check: we can also compute g(r_x') from the proof last evaluation value, and
 | |||
|         // should be equal to the previously obtained values.
 | |||
|         let g_on_rxprime_from_sumcheck_last_eval = interpolate_uni_poly::<C::ScalarField>(
 | |||
|             &proof.sc_proof.proofs.last().unwrap().evaluations,
 | |||
|             *r_x_prime.last().unwrap(),
 | |||
|         )
 | |||
|         .unwrap();
 | |||
|         assert_eq!(g_on_rxprime_from_sumcheck_last_eval, c);
 | |||
|         assert_eq!(
 | |||
|             g_on_rxprime_from_sumcheck_last_eval,
 | |||
|             sumcheck_subclaim.expected_evaluation
 | |||
|         );
 | |||
| 
 | |||
|         // Step 6: Get the folding challenge
 | |||
|         let rho: C::ScalarField = transcript.get_and_append_challenge(b"rho").unwrap();
 | |||
| 
 | |||
|         // Step 7: Compute the folded instance
 | |||
|         Self::fold(
 | |||
|             running_instances,
 | |||
|             new_instances,
 | |||
|             &proof.sigmas_thetas,
 | |||
|             r_x_prime,
 | |||
|             rho,
 | |||
|         )
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| #[cfg(test)]
 | |||
| pub mod tests {
 | |||
|     use super::*;
 | |||
|     use crate::ccs::tests::{get_test_ccs, get_test_z};
 | |||
|     use ark_std::test_rng;
 | |||
|     use ark_std::UniformRand;
 | |||
| 
 | |||
|     use crate::pedersen::Pedersen;
 | |||
|     use ark_pallas::{Fr, Projective};
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_fold() {
 | |||
|         let ccs = get_test_ccs();
 | |||
|         let z1 = get_test_z(3);
 | |||
|         let z2 = get_test_z(4);
 | |||
|         ccs.check_relation(&z1).unwrap();
 | |||
|         ccs.check_relation(&z2).unwrap();
 | |||
| 
 | |||
|         let mut rng = test_rng();
 | |||
|         let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
| 
 | |||
|         let sigmas_thetas =
 | |||
|             compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
 | |||
| 
 | |||
|         let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
| 
 | |||
|         let (lcccs, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1);
 | |||
|         let (cccs, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z2);
 | |||
| 
 | |||
|         lcccs.check_relation(&pedersen_params, &ccs, &w1).unwrap();
 | |||
|         cccs.check_relation(&pedersen_params, &ccs, &w2).unwrap();
 | |||
| 
 | |||
|         let mut rng = test_rng();
 | |||
|         let rho = Fr::rand(&mut rng);
 | |||
| 
 | |||
|         let folded = NIMFS::<Projective>::fold(&[lcccs], &[cccs], &sigmas_thetas, r_x_prime, rho);
 | |||
| 
 | |||
|         let w_folded = NIMFS::<Projective>::fold_witness(&[w1], &[w2], rho);
 | |||
| 
 | |||
|         // check lcccs relation
 | |||
|         folded
 | |||
|             .check_relation(&pedersen_params, &ccs, &w_folded)
 | |||
|             .unwrap();
 | |||
|     }
 | |||
| 
 | |||
|     /// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper)
 | |||
|     #[test]
 | |||
|     pub fn test_basic_multifolding() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         // Create a basic CCS circuit
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
|         let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
| 
 | |||
|         // Generate a satisfying witness
 | |||
|         let z_1 = get_test_z(3);
 | |||
|         // Generate another satisfying witness
 | |||
|         let z_2 = get_test_z(4);
 | |||
| 
 | |||
|         // Create the LCCCS instance out of z_1
 | |||
|         let (running_instance, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1);
 | |||
|         // Create the CCCS instance out of z_2
 | |||
|         let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2);
 | |||
| 
 | |||
|         // Prover's transcript
 | |||
|         let mut transcript_p = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_p.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         // Run the prover side of the multifolding
 | |||
|         let (proof, folded_lcccs, folded_witness) = NIMFS::<Projective>::prove(
 | |||
|             &mut transcript_p,
 | |||
|             &ccs,
 | |||
|             &[running_instance.clone()],
 | |||
|             &[new_instance.clone()],
 | |||
|             &[w1],
 | |||
|             &[w2],
 | |||
|         );
 | |||
| 
 | |||
|         // Verifier's transcript
 | |||
|         let mut transcript_v = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_v.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         // Run the verifier side of the multifolding
 | |||
|         let folded_lcccs_v = NIMFS::<Projective>::verify(
 | |||
|             &mut transcript_v,
 | |||
|             &ccs,
 | |||
|             &[running_instance.clone()],
 | |||
|             &[new_instance.clone()],
 | |||
|             proof,
 | |||
|         );
 | |||
|         assert_eq!(folded_lcccs, folded_lcccs_v);
 | |||
| 
 | |||
|         // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
 | |||
|         folded_lcccs
 | |||
|             .check_relation(&pedersen_params, &ccs, &folded_witness)
 | |||
|             .unwrap();
 | |||
|     }
 | |||
| 
 | |||
|     /// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance
 | |||
|     #[test]
 | |||
|     pub fn test_multifolding_two_instances_multiple_steps() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
| 
 | |||
|         let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
| 
 | |||
|         // LCCCS witness
 | |||
|         let z_1 = get_test_z(2);
 | |||
|         let (mut running_instance, mut w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1);
 | |||
| 
 | |||
|         let mut transcript_p = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         let mut transcript_v = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_p.append_message(b"init", b"init").unwrap();
 | |||
|         transcript_v.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         let n: usize = 10;
 | |||
|         for i in 3..n {
 | |||
|             println!("\niteration: i {}", i); // DBG
 | |||
| 
 | |||
|             // CCS witness
 | |||
|             let z_2 = get_test_z(i);
 | |||
|             println!("z_2 {:?}", z_2); // DBG
 | |||
| 
 | |||
|             let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2);
 | |||
| 
 | |||
|             // run the prover side of the multifolding
 | |||
|             let (proof, folded_lcccs, folded_witness) = NIMFS::<Projective>::prove(
 | |||
|                 &mut transcript_p,
 | |||
|                 &ccs,
 | |||
|                 &[running_instance.clone()],
 | |||
|                 &[new_instance.clone()],
 | |||
|                 &[w1],
 | |||
|                 &[w2],
 | |||
|             );
 | |||
| 
 | |||
|             // run the verifier side of the multifolding
 | |||
|             let folded_lcccs_v = NIMFS::<Projective>::verify(
 | |||
|                 &mut transcript_v,
 | |||
|                 &ccs,
 | |||
|                 &[running_instance.clone()],
 | |||
|                 &[new_instance.clone()],
 | |||
|                 proof,
 | |||
|             );
 | |||
| 
 | |||
|             assert_eq!(folded_lcccs, folded_lcccs_v);
 | |||
| 
 | |||
|             // check that the folded instance with the folded witness holds the LCCCS relation
 | |||
|             println!("check_relation {}", i);
 | |||
|             folded_lcccs
 | |||
|                 .check_relation(&pedersen_params, &ccs, &folded_witness)
 | |||
|                 .unwrap();
 | |||
| 
 | |||
|             running_instance = folded_lcccs;
 | |||
|             w1 = folded_witness;
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step.
 | |||
|     #[test]
 | |||
|     pub fn test_multifolding_mu_nu_instances() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         // Create a basic CCS circuit
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
|         let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
| 
 | |||
|         let mu = 10;
 | |||
|         let nu = 15;
 | |||
| 
 | |||
|         // Generate a mu LCCCS & nu CCCS satisfying witness
 | |||
|         let mut z_lcccs = Vec::new();
 | |||
|         for i in 0..mu {
 | |||
|             let z = get_test_z(i + 3);
 | |||
|             z_lcccs.push(z);
 | |||
|         }
 | |||
|         let mut z_cccs = Vec::new();
 | |||
|         for i in 0..nu {
 | |||
|             let z = get_test_z(nu + i + 3);
 | |||
|             z_cccs.push(z);
 | |||
|         }
 | |||
| 
 | |||
|         // Create the LCCCS instances out of z_lcccs
 | |||
|         let mut lcccs_instances = Vec::new();
 | |||
|         let mut w_lcccs = Vec::new();
 | |||
|         for z_i in z_lcccs.iter() {
 | |||
|             let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i);
 | |||
|             lcccs_instances.push(running_instance);
 | |||
|             w_lcccs.push(w);
 | |||
|         }
 | |||
|         // Create the CCCS instance out of z_cccs
 | |||
|         let mut cccs_instances = Vec::new();
 | |||
|         let mut w_cccs = Vec::new();
 | |||
|         for z_i in z_cccs.iter() {
 | |||
|             let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i);
 | |||
|             cccs_instances.push(new_instance);
 | |||
|             w_cccs.push(w);
 | |||
|         }
 | |||
| 
 | |||
|         // Prover's transcript
 | |||
|         let mut transcript_p = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_p.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         // Run the prover side of the multifolding
 | |||
|         let (proof, folded_lcccs, folded_witness) = NIMFS::<Projective>::prove(
 | |||
|             &mut transcript_p,
 | |||
|             &ccs,
 | |||
|             &lcccs_instances,
 | |||
|             &cccs_instances,
 | |||
|             &w_lcccs,
 | |||
|             &w_cccs,
 | |||
|         );
 | |||
| 
 | |||
|         // Verifier's transcript
 | |||
|         let mut transcript_v = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_v.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         // Run the verifier side of the multifolding
 | |||
|         let folded_lcccs_v = NIMFS::<Projective>::verify(
 | |||
|             &mut transcript_v,
 | |||
|             &ccs,
 | |||
|             &lcccs_instances,
 | |||
|             &cccs_instances,
 | |||
|             proof,
 | |||
|         );
 | |||
|         assert_eq!(folded_lcccs, folded_lcccs_v);
 | |||
| 
 | |||
|         // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
 | |||
|         folded_lcccs
 | |||
|             .check_relation(&pedersen_params, &ccs, &folded_witness)
 | |||
|             .unwrap();
 | |||
|     }
 | |||
| 
 | |||
|     /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step
 | |||
|     /// and repeats the process doing multiple steps.
 | |||
|     #[test]
 | |||
|     pub fn test_multifolding_mu_nu_instances_multiple_steps() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         // Create a basic CCS circuit
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
|         let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
| 
 | |||
|         // Prover's transcript
 | |||
|         let mut transcript_p = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_p.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         // Verifier's transcript
 | |||
|         let mut transcript_v = IOPTranscript::<Fr>::new(b"multifolding");
 | |||
|         transcript_v.append_message(b"init", b"init").unwrap();
 | |||
| 
 | |||
|         let n_steps = 3;
 | |||
| 
 | |||
|         // number of LCCCS & CCCS instances in each multifolding step
 | |||
|         let mu = 10;
 | |||
|         let nu = 15;
 | |||
| 
 | |||
|         // Generate a mu LCCCS & nu CCCS satisfying witness, for each step
 | |||
|         for step in 0..n_steps {
 | |||
|             let mut z_lcccs = Vec::new();
 | |||
|             for i in 0..mu {
 | |||
|                 let z = get_test_z(step + i + 3);
 | |||
|                 z_lcccs.push(z);
 | |||
|             }
 | |||
|             let mut z_cccs = Vec::new();
 | |||
|             for i in 0..nu {
 | |||
|                 let z = get_test_z(nu + i + 3);
 | |||
|                 z_cccs.push(z);
 | |||
|             }
 | |||
| 
 | |||
|             // Create the LCCCS instances out of z_lcccs
 | |||
|             let mut lcccs_instances = Vec::new();
 | |||
|             let mut w_lcccs = Vec::new();
 | |||
|             for z_i in z_lcccs.iter() {
 | |||
|                 let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i);
 | |||
|                 lcccs_instances.push(running_instance);
 | |||
|                 w_lcccs.push(w);
 | |||
|             }
 | |||
|             // Create the CCCS instance out of z_cccs
 | |||
|             let mut cccs_instances = Vec::new();
 | |||
|             let mut w_cccs = Vec::new();
 | |||
|             for z_i in z_cccs.iter() {
 | |||
|                 let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i);
 | |||
|                 cccs_instances.push(new_instance);
 | |||
|                 w_cccs.push(w);
 | |||
|             }
 | |||
| 
 | |||
|             // Run the prover side of the multifolding
 | |||
|             let (proof, folded_lcccs, folded_witness) = NIMFS::<Projective>::prove(
 | |||
|                 &mut transcript_p,
 | |||
|                 &ccs,
 | |||
|                 &lcccs_instances,
 | |||
|                 &cccs_instances,
 | |||
|                 &w_lcccs,
 | |||
|                 &w_cccs,
 | |||
|             );
 | |||
| 
 | |||
|             // Run the verifier side of the multifolding
 | |||
|             let folded_lcccs_v = NIMFS::<Projective>::verify(
 | |||
|                 &mut transcript_v,
 | |||
|                 &ccs,
 | |||
|                 &lcccs_instances,
 | |||
|                 &cccs_instances,
 | |||
|                 proof,
 | |||
|             );
 | |||
|             assert_eq!(folded_lcccs, folded_lcccs_v);
 | |||
| 
 | |||
|             // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
 | |||
|             folded_lcccs
 | |||
|                 .check_relation(&pedersen_params, &ccs, &folded_witness)
 | |||
|                 .unwrap();
 | |||
|         }
 | |||
|     }
 | |||
| }
 | |||
| @ -0,0 +1,360 @@ | |||
| use ark_ec::CurveGroup;
 | |||
| use ark_ff::{Field, PrimeField};
 | |||
| use ark_poly::DenseMultilinearExtension;
 | |||
| use ark_poly::MultilinearExtension;
 | |||
| use ark_std::{One, Zero};
 | |||
| use std::ops::Add;
 | |||
| 
 | |||
| use crate::utils::multilinear_polynomial::fix_variables;
 | |||
| use crate::utils::multilinear_polynomial::scalar_mul;
 | |||
| 
 | |||
| use super::lcccs::LCCCS;
 | |||
| use super::nimfs::SigmasThetas;
 | |||
| use crate::ccs::CCS;
 | |||
| use crate::utils::hypercube::BooleanHypercube;
 | |||
| use crate::utils::mle::dense_vec_to_mle;
 | |||
| use crate::utils::mle::matrix_to_mle;
 | |||
| use crate::utils::vec::SparseMatrix;
 | |||
| use crate::utils::virtual_polynomial::{eq_eval, VirtualPolynomial};
 | |||
| 
 | |||
| /// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) for all j values
 | |||
| /// in 0..self.t
 | |||
| pub fn compute_all_sum_Mz_evals<F: PrimeField>(
 | |||
|     vec_M: &[SparseMatrix<F>],
 | |||
|     z: &Vec<F>,
 | |||
|     r: &[F],
 | |||
|     s_prime: usize,
 | |||
| ) -> Vec<F> {
 | |||
|     // Convert z to MLE
 | |||
|     let z_y_mle = dense_vec_to_mle(s_prime, z);
 | |||
|     // Convert all matrices to MLE
 | |||
|     let M_x_y_mle: Vec<DenseMultilinearExtension<F>> =
 | |||
|         vec_M.iter().cloned().map(matrix_to_mle).collect();
 | |||
| 
 | |||
|     let mut v = Vec::with_capacity(M_x_y_mle.len());
 | |||
|     for M_i in M_x_y_mle {
 | |||
|         let sum_Mz = compute_sum_Mz(M_i, &z_y_mle, s_prime);
 | |||
|         let v_i = sum_Mz.evaluate(r).unwrap();
 | |||
|         v.push(v_i);
 | |||
|     }
 | |||
|     v
 | |||
| }
 | |||
| 
 | |||
| /// Return the multilinear polynomial p(x) = \sum_{y \in {0,1}^s'} M_j(x, y) * z(y)
 | |||
| pub fn compute_sum_Mz<F: PrimeField>(
 | |||
|     M_j: DenseMultilinearExtension<F>,
 | |||
|     z: &DenseMultilinearExtension<F>,
 | |||
|     s_prime: usize,
 | |||
| ) -> DenseMultilinearExtension<F> {
 | |||
|     let mut sum_Mz = DenseMultilinearExtension {
 | |||
|         evaluations: vec![F::zero(); M_j.evaluations.len()],
 | |||
|         num_vars: M_j.num_vars - s_prime,
 | |||
|     };
 | |||
| 
 | |||
|     let bhc = BooleanHypercube::new(s_prime);
 | |||
|     for y in bhc.into_iter() {
 | |||
|         // In a slightly counter-intuitive fashion fix_variables() fixes the right-most variables of the polynomial. So
 | |||
|         // for a polynomial M(x,y) and a random field element r, if we do fix_variables(M,r) we will get M(x,r).
 | |||
|         let M_j_y = fix_variables(&M_j, &y);
 | |||
|         let z_y = z.evaluate(&y).unwrap();
 | |||
|         let M_j_z = scalar_mul(&M_j_y, &z_y);
 | |||
|         sum_Mz = sum_Mz.add(M_j_z);
 | |||
|     }
 | |||
|     sum_Mz
 | |||
| }
 | |||
| 
 | |||
| /// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS
 | |||
| /// instances
 | |||
| pub fn compute_sigmas_and_thetas<C: CurveGroup>(
 | |||
|     ccs: &CCS<C>,
 | |||
|     z_lcccs: &[Vec<C::ScalarField>],
 | |||
|     z_cccs: &[Vec<C::ScalarField>],
 | |||
|     r_x_prime: &[C::ScalarField],
 | |||
| ) -> SigmasThetas<C::ScalarField> {
 | |||
|     let mut sigmas: Vec<Vec<C::ScalarField>> = Vec::new();
 | |||
|     for z_lcccs_i in z_lcccs {
 | |||
|         // sigmas
 | |||
|         let sigma_i = compute_all_sum_Mz_evals(&ccs.M, z_lcccs_i, r_x_prime, ccs.s_prime);
 | |||
|         sigmas.push(sigma_i);
 | |||
|     }
 | |||
|     let mut thetas: Vec<Vec<C::ScalarField>> = Vec::new();
 | |||
|     for z_cccs_i in z_cccs {
 | |||
|         // thetas
 | |||
|         let theta_i = compute_all_sum_Mz_evals(&ccs.M, z_cccs_i, r_x_prime, ccs.s_prime);
 | |||
|         thetas.push(theta_i);
 | |||
|     }
 | |||
|     SigmasThetas(sigmas, thetas)
 | |||
| }
 | |||
| 
 | |||
| /// Compute the right-hand-side of step 5 of the multifolding scheme
 | |||
| pub fn compute_c_from_sigmas_and_thetas<C: CurveGroup>(
 | |||
|     ccs: &CCS<C>,
 | |||
|     st: &SigmasThetas<C::ScalarField>,
 | |||
|     gamma: C::ScalarField,
 | |||
|     beta: &[C::ScalarField],
 | |||
|     vec_r_x: &Vec<Vec<C::ScalarField>>,
 | |||
|     r_x_prime: &[C::ScalarField],
 | |||
| ) -> C::ScalarField {
 | |||
|     let (vec_sigmas, vec_thetas) = (st.0.clone(), st.1.clone());
 | |||
|     let mut c = C::ScalarField::zero();
 | |||
| 
 | |||
|     let mut e_lcccs = Vec::new();
 | |||
|     for r_x in vec_r_x {
 | |||
|         e_lcccs.push(eq_eval(r_x, r_x_prime).unwrap());
 | |||
|     }
 | |||
|     for (i, sigmas) in vec_sigmas.iter().enumerate() {
 | |||
|         // (sum gamma^j * e_i * sigma_j)
 | |||
|         for (j, sigma_j) in sigmas.iter().enumerate() {
 | |||
|             let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
 | |||
|             c += gamma_j * e_lcccs[i] * sigma_j;
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     let mu = vec_sigmas.len();
 | |||
|     let e2 = eq_eval(beta, r_x_prime).unwrap();
 | |||
|     for (k, thetas) in vec_thetas.iter().enumerate() {
 | |||
|         // + gamma^{t+1} * e2 * sum c_i * prod theta_j
 | |||
|         let mut lhs = C::ScalarField::zero();
 | |||
|         for i in 0..ccs.q {
 | |||
|             let mut prod = C::ScalarField::one();
 | |||
|             for j in ccs.S[i].clone() {
 | |||
|                 prod *= thetas[j];
 | |||
|             }
 | |||
|             lhs += ccs.c[i] * prod;
 | |||
|         }
 | |||
|         let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]);
 | |||
|         c += gamma_t1 * e2 * lhs;
 | |||
|     }
 | |||
|     c
 | |||
| }
 | |||
| 
 | |||
| /// Compute g(x) polynomial for the given inputs.
 | |||
| pub fn compute_g<C: CurveGroup>(
 | |||
|     ccs: &CCS<C>,
 | |||
|     running_instances: &[LCCCS<C>],
 | |||
|     z_lcccs: &[Vec<C::ScalarField>],
 | |||
|     z_cccs: &[Vec<C::ScalarField>],
 | |||
|     gamma: C::ScalarField,
 | |||
|     beta: &[C::ScalarField],
 | |||
| ) -> VirtualPolynomial<C::ScalarField> {
 | |||
|     let mu = running_instances.len();
 | |||
|     let mut vec_Ls: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
 | |||
|     for (i, running_instance) in running_instances.iter().enumerate() {
 | |||
|         let mut Ls = running_instance.compute_Ls(ccs, &z_lcccs[i]);
 | |||
|         vec_Ls.append(&mut Ls);
 | |||
|     }
 | |||
|     let mut vec_Q: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
 | |||
|     // for (i, _) in cccs_instances.iter().enumerate() {
 | |||
|     for z_cccs_i in z_cccs.iter() {
 | |||
|         let Q = ccs.compute_Q(z_cccs_i, beta);
 | |||
|         vec_Q.push(Q);
 | |||
|     }
 | |||
|     let mut g = vec_Ls[0].clone();
 | |||
| 
 | |||
|     // note: the following two loops can be integrated in the previous two loops, but left
 | |||
|     // separated for clarity in the PoC implementation.
 | |||
|     for (j, L_j) in vec_Ls.iter_mut().enumerate().skip(1) {
 | |||
|         let gamma_j = gamma.pow([j as u64]);
 | |||
|         L_j.scalar_mul(&gamma_j);
 | |||
|         g = g.add(L_j);
 | |||
|     }
 | |||
|     for (i, Q_i) in vec_Q.iter_mut().enumerate() {
 | |||
|         let gamma_mut_i = gamma.pow([(mu * ccs.t + i) as u64]);
 | |||
|         Q_i.scalar_mul(&gamma_mut_i);
 | |||
|         g = g.add(Q_i);
 | |||
|     }
 | |||
|     g
 | |||
| }
 | |||
| 
 | |||
| #[cfg(test)]
 | |||
| pub mod tests {
 | |||
|     use super::*;
 | |||
| 
 | |||
|     use ark_pallas::{Fr, Projective};
 | |||
|     use ark_std::test_rng;
 | |||
|     use ark_std::One;
 | |||
|     use ark_std::UniformRand;
 | |||
|     use ark_std::Zero;
 | |||
| 
 | |||
|     use crate::ccs::tests::{get_test_ccs, get_test_z};
 | |||
|     use crate::pedersen::Pedersen;
 | |||
|     use crate::utils::multilinear_polynomial::tests::fix_last_variables;
 | |||
|     use crate::utils::virtual_polynomial::eq_eval;
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_compute_sum_Mz_over_boolean_hypercube() {
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
|         let z = get_test_z(3);
 | |||
|         ccs.check_relation(&z).unwrap();
 | |||
|         let z_mle = dense_vec_to_mle(ccs.s_prime, &z);
 | |||
| 
 | |||
|         // check that evaluating over all the values x over the boolean hypercube, the result of
 | |||
|         // the next for loop is equal to 0
 | |||
|         for x in BooleanHypercube::new(ccs.s) {
 | |||
|             let mut r = Fr::zero();
 | |||
|             for i in 0..ccs.q {
 | |||
|                 let mut Sj_prod = Fr::one();
 | |||
|                 for j in ccs.S[i].clone() {
 | |||
|                     let M_j = matrix_to_mle(ccs.M[j].clone());
 | |||
|                     let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
 | |||
|                     let sum_Mz_x = sum_Mz.evaluate(&x).unwrap();
 | |||
|                     Sj_prod *= sum_Mz_x;
 | |||
|                 }
 | |||
|                 r += Sj_prod * ccs.c[i];
 | |||
|             }
 | |||
|             assert_eq!(r, Fr::zero());
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     /// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which
 | |||
|     /// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements
 | |||
|     /// of the column and the values eq_i(r) where i is the row of that element
 | |||
|     ///
 | |||
|     /// For example, for matrix M:
 | |||
|     ///
 | |||
|     /// [2, 3, 4, 4
 | |||
|     ///  4, 4, 3, 2
 | |||
|     ///  2, 8, 9, 2
 | |||
|     ///  9, 4, 2, 0]
 | |||
|     ///
 | |||
|     /// The polynomial ~M(r,y) is a polynomial in F^2 which evaluates to the following values in the hypercube:
 | |||
|     /// - M(00) = 2*eq_00(r) + 4*eq_10(r) + 2*eq_01(r) + 9*eq_11(r)
 | |||
|     /// - M(10) = 3*eq_00(r) + 4*eq_10(r) + 8*eq_01(r) + 4*eq_11(r)
 | |||
|     /// - M(01) = 4*eq_00(r) + 3*eq_10(r) + 9*eq_01(r) + 2*eq_11(r)
 | |||
|     /// - M(11) = 4*eq_00(r) + 2*eq_10(r) + 2*eq_01(r) + 0*eq_11(r)
 | |||
|     ///
 | |||
|     /// This is used by Hypernova in LCCCS to perform a verifier-chosen random linear combination between the columns
 | |||
|     /// of the matrix and the z vector. This technique is also used extensively in "An Algebraic Framework for
 | |||
|     /// Universal and Updatable SNARKs".
 | |||
|     #[test]
 | |||
|     fn test_compute_M_r_y_compression() {
 | |||
|         let mut rng = test_rng();
 | |||
| 
 | |||
|         // s = 2, s' = 3
 | |||
|         let ccs = get_test_ccs::<Projective>();
 | |||
| 
 | |||
|         let M = ccs.M[0].clone().to_dense();
 | |||
|         let M_mle = matrix_to_mle(ccs.M[0].clone());
 | |||
| 
 | |||
|         // Fix the polynomial ~M(r,y)
 | |||
|         let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
|         let M_r_y = fix_last_variables(&M_mle, &r);
 | |||
| 
 | |||
|         // compute M_r_y the other way around
 | |||
|         for j in 0..M[0].len() {
 | |||
|             // Go over every column of M
 | |||
|             let column_j: Vec<Fr> = M.clone().iter().map(|x| x[j]).collect();
 | |||
|             // and perform the random lincomb between the elements of the column and eq_i(r)
 | |||
|             let rlc = BooleanHypercube::new(ccs.s)
 | |||
|                 .enumerate()
 | |||
|                 .map(|(i, x)| column_j[i] * eq_eval(&x, &r).unwrap())
 | |||
|                 .fold(Fr::zero(), |acc, result| acc + result);
 | |||
| 
 | |||
|             assert_eq!(M_r_y.evaluations[j], rlc);
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_compute_sigmas_and_thetas() {
 | |||
|         let ccs = get_test_ccs();
 | |||
|         let z1 = get_test_z(3);
 | |||
|         let z2 = get_test_z(4);
 | |||
|         ccs.check_relation(&z1).unwrap();
 | |||
|         ccs.check_relation(&z2).unwrap();
 | |||
| 
 | |||
|         let mut rng = test_rng();
 | |||
|         let gamma: Fr = Fr::rand(&mut rng);
 | |||
|         let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
|         let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
| 
 | |||
|         // Initialize a multifolding object
 | |||
|         let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
|         let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1);
 | |||
| 
 | |||
|         let sigmas_thetas =
 | |||
|             compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
 | |||
| 
 | |||
|         let g = compute_g(
 | |||
|             &ccs,
 | |||
|             &[lcccs_instance.clone()],
 | |||
|             &[z1.clone()],
 | |||
|             &[z2.clone()],
 | |||
|             gamma,
 | |||
|             &beta,
 | |||
|         );
 | |||
| 
 | |||
|         // we expect g(r_x_prime) to be equal to:
 | |||
|         // c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j
 | |||
|         // from compute_c_from_sigmas_and_thetas
 | |||
|         let expected_c = g.evaluate(&r_x_prime).unwrap();
 | |||
|         let c = compute_c_from_sigmas_and_thetas::<Projective>(
 | |||
|             &ccs,
 | |||
|             &sigmas_thetas,
 | |||
|             gamma,
 | |||
|             &beta,
 | |||
|             &vec![lcccs_instance.r_x],
 | |||
|             &r_x_prime,
 | |||
|         );
 | |||
|         assert_eq!(c, expected_c);
 | |||
|     }
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_compute_g() {
 | |||
|         let ccs = get_test_ccs();
 | |||
|         let z1 = get_test_z(3);
 | |||
|         let z2 = get_test_z(4);
 | |||
|         ccs.check_relation(&z1).unwrap();
 | |||
|         ccs.check_relation(&z2).unwrap();
 | |||
| 
 | |||
|         let mut rng = test_rng(); // TMP
 | |||
|         let gamma: Fr = Fr::rand(&mut rng);
 | |||
|         let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
 | |||
| 
 | |||
|         // Initialize a multifolding object
 | |||
|         let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
 | |||
|         let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1);
 | |||
| 
 | |||
|         let mut sum_v_j_gamma = Fr::zero();
 | |||
|         for j in 0..lcccs_instance.v.len() {
 | |||
|             let gamma_j = gamma.pow([j as u64]);
 | |||
|             sum_v_j_gamma += lcccs_instance.v[j] * gamma_j;
 | |||
|         }
 | |||
| 
 | |||
|         // Compute g(x) with that r_x
 | |||
|         let g = compute_g::<Projective>(
 | |||
|             &ccs,
 | |||
|             &[lcccs_instance.clone()],
 | |||
|             &[z1.clone()],
 | |||
|             &[z2.clone()],
 | |||
|             gamma,
 | |||
|             &beta,
 | |||
|         );
 | |||
| 
 | |||
|         // evaluate g(x) over x \in {0,1}^s
 | |||
|         let mut g_on_bhc = Fr::zero();
 | |||
|         for x in BooleanHypercube::new(ccs.s) {
 | |||
|             g_on_bhc += g.evaluate(&x).unwrap();
 | |||
|         }
 | |||
| 
 | |||
|         // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s
 | |||
|         let mut sum_Lj_on_bhc = Fr::zero();
 | |||
|         let vec_L = lcccs_instance.compute_Ls(&ccs, &z1);
 | |||
|         for x in BooleanHypercube::new(ccs.s) {
 | |||
|             for (j, Lj) in vec_L.iter().enumerate() {
 | |||
|                 let gamma_j = gamma.pow([j as u64]);
 | |||
|                 sum_Lj_on_bhc += Lj.evaluate(&x).unwrap() * gamma_j;
 | |||
|             }
 | |||
|         }
 | |||
| 
 | |||
|         // Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q'
 | |||
|         assert_ne!(g_on_bhc, Fr::zero());
 | |||
| 
 | |||
|         // evaluating g(x) over the boolean hypercube should give the same result as evaluating the
 | |||
|         // sum of gamma^j * Lj(x) over the boolean hypercube
 | |||
|         assert_eq!(g_on_bhc, sum_Lj_on_bhc);
 | |||
| 
 | |||
|         // evaluating g(x) over the boolean hypercube should give the same result as evaluating the
 | |||
|         // sum of gamma^j * v_j over j \in [t]
 | |||
|         assert_eq!(g_on_bhc, sum_v_j_gamma);
 | |||
|     }
 | |||
| }
 | |||
| @ -1,2 +1,5 @@ | |||
| pub mod circuits;
 | |||
| #[cfg(feature = "hypernova")]
 | |||
| pub mod hypernova;
 | |||
| #[cfg(feature = "nova")]
 | |||
| pub mod nova;
 | |||
| @ -0,0 +1,77 @@ | |||
| /// A boolean hypercube structure to create an ergonomic evaluation domain
 | |||
| use crate::utils::virtual_polynomial::bit_decompose;
 | |||
| use ark_ff::PrimeField;
 | |||
| 
 | |||
| use std::marker::PhantomData;
 | |||
| 
 | |||
| /// A boolean hypercube that returns its points as an iterator
 | |||
| /// If you iterate on it for 3 variables you will get points in little-endian order:
 | |||
| /// 000 -> 100 -> 010 -> 110 -> 001 -> 101 -> 011 -> 111
 | |||
| #[derive(Debug, Clone)]
 | |||
| pub struct BooleanHypercube<F: PrimeField> {
 | |||
|     _f: PhantomData<F>,
 | |||
|     n_vars: usize,
 | |||
|     current: u64,
 | |||
|     max: u64,
 | |||
| }
 | |||
| 
 | |||
| impl<F: PrimeField> BooleanHypercube<F> {
 | |||
|     pub fn new(n_vars: usize) -> Self {
 | |||
|         BooleanHypercube::<F> {
 | |||
|             _f: PhantomData::<F>,
 | |||
|             n_vars,
 | |||
|             current: 0,
 | |||
|             max: 2_u32.pow(n_vars as u32) as u64,
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     /// returns the entry at given i (which is the little-endian bit representation of i)
 | |||
|     pub fn at_i(&self, i: usize) -> Vec<F> {
 | |||
|         assert!(i < self.max as usize);
 | |||
|         let bits = bit_decompose((i) as u64, self.n_vars);
 | |||
|         bits.iter().map(|&x| F::from(x)).collect()
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| impl<F: PrimeField> Iterator for BooleanHypercube<F> {
 | |||
|     type Item = Vec<F>;
 | |||
| 
 | |||
|     fn next(&mut self) -> Option<Self::Item> {
 | |||
|         let bits = bit_decompose(self.current, self.n_vars);
 | |||
|         let result: Vec<F> = bits.iter().map(|&x| F::from(x)).collect();
 | |||
|         self.current += 1;
 | |||
| 
 | |||
|         if self.current > self.max {
 | |||
|             return None;
 | |||
|         }
 | |||
| 
 | |||
|         Some(result)
 | |||
|     }
 | |||
| }
 | |||
| 
 | |||
| #[cfg(test)]
 | |||
| mod tests {
 | |||
|     use super::*;
 | |||
|     use crate::utils::vec::tests::to_F_dense_matrix;
 | |||
|     use ark_pallas::Fr;
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_hypercube() {
 | |||
|         let expected_results = to_F_dense_matrix(vec![
 | |||
|             vec![0, 0, 0],
 | |||
|             vec![1, 0, 0],
 | |||
|             vec![0, 1, 0],
 | |||
|             vec![1, 1, 0],
 | |||
|             vec![0, 0, 1],
 | |||
|             vec![1, 0, 1],
 | |||
|             vec![0, 1, 1],
 | |||
|             vec![1, 1, 1],
 | |||
|         ]);
 | |||
| 
 | |||
|         let bhc = BooleanHypercube::<Fr>::new(3);
 | |||
|         for (i, point) in bhc.clone().enumerate() {
 | |||
|             assert_eq!(point, expected_results[i]);
 | |||
|             assert_eq!(point, bhc.at_i(i));
 | |||
|         }
 | |||
|     }
 | |||
| }
 | |||
| @ -0,0 +1,167 @@ | |||
| /// Some basic MLE utilities
 | |||
| use ark_ff::PrimeField;
 | |||
| use ark_poly::DenseMultilinearExtension;
 | |||
| use ark_std::log2;
 | |||
| 
 | |||
| use super::vec::SparseMatrix;
 | |||
| 
 | |||
| /// Pad matrix so that its columns and rows are powers of two
 | |||
| pub fn pad_matrix<F: PrimeField>(m: &SparseMatrix<F>) -> SparseMatrix<F> {
 | |||
|     let mut r = m.clone();
 | |||
|     r.n_rows = m.n_rows.next_power_of_two();
 | |||
|     r.n_cols = m.n_cols.next_power_of_two();
 | |||
|     r
 | |||
| }
 | |||
| 
 | |||
| /// Returns the dense multilinear extension from the given matrix, without modifying the original
 | |||
| /// matrix.
 | |||
| pub fn matrix_to_mle<F: PrimeField>(matrix: SparseMatrix<F>) -> DenseMultilinearExtension<F> {
 | |||
|     let n_vars: usize = (log2(matrix.n_rows) + log2(matrix.n_cols)) as usize; // n_vars = s + s'
 | |||
| 
 | |||
|     // Matrices might need to get padded before turned into an MLE
 | |||
|     let padded_matrix = pad_matrix(&matrix);
 | |||
| 
 | |||
|     // build dense vector representing the sparse padded matrix
 | |||
|     let mut v: Vec<F> = vec![F::zero(); padded_matrix.n_rows * padded_matrix.n_cols];
 | |||
|     for (row_i, row) in padded_matrix.coeffs.iter().enumerate() {
 | |||
|         for &(value, col_i) in row.iter() {
 | |||
|             v[(padded_matrix.n_cols * row_i) + col_i] = value;
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     // convert the dense vector into a mle
 | |||
|     vec_to_mle(n_vars, &v)
 | |||
| }
 | |||
| 
 | |||
| /// Takes the n_vars and a dense vector and returns its dense MLE.
 | |||
| pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
 | |||
|     let v_padded: Vec<F> = if v.len() != (1 << n_vars) {
 | |||
|         // pad to 2^n_vars
 | |||
|         [
 | |||
|             v.clone(),
 | |||
|             std::iter::repeat(F::zero())
 | |||
|                 .take((1 << n_vars) - v.len())
 | |||
|                 .collect(),
 | |||
|         ]
 | |||
|         .concat()
 | |||
|     } else {
 | |||
|         v.clone()
 | |||
|     };
 | |||
|     DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
 | |||
| }
 | |||
| 
 | |||
| pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
 | |||
|     dbg!(n_vars);
 | |||
|     dbg!(v.len());
 | |||
|     // Pad to 2^n_vars
 | |||
|     let v_padded: Vec<F> = [
 | |||
|         v.clone(),
 | |||
|         std::iter::repeat(F::zero())
 | |||
|             .take((1 << n_vars) - v.len())
 | |||
|             .collect(),
 | |||
|     ]
 | |||
|     .concat();
 | |||
|     DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
 | |||
| }
 | |||
| 
 | |||
| #[cfg(test)]
 | |||
| mod tests {
 | |||
|     use super::*;
 | |||
|     use crate::{
 | |||
|         ccs::tests::get_test_z,
 | |||
|         utils::multilinear_polynomial::fix_variables,
 | |||
|         utils::multilinear_polynomial::tests::fix_last_variables,
 | |||
|         utils::{hypercube::BooleanHypercube, vec::tests::to_F_matrix},
 | |||
|     };
 | |||
|     use ark_poly::MultilinearExtension;
 | |||
|     use ark_std::Zero;
 | |||
| 
 | |||
|     use ark_pallas::Fr;
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_matrix_to_mle() {
 | |||
|         let A = to_F_matrix::<Fr>(vec![
 | |||
|             vec![2, 3, 4, 4],
 | |||
|             vec![4, 11, 14, 14],
 | |||
|             vec![2, 8, 17, 17],
 | |||
|             vec![420, 4, 2, 0],
 | |||
|         ]);
 | |||
| 
 | |||
|         let A_mle = matrix_to_mle(A);
 | |||
|         dbg!(&A_mle);
 | |||
|         assert_eq!(A_mle.evaluations.len(), 16); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals
 | |||
| 
 | |||
|         let A = to_F_matrix::<Fr>(vec![
 | |||
|             vec![2, 3, 4, 4, 1],
 | |||
|             vec![4, 11, 14, 14, 2],
 | |||
|             vec![2, 8, 17, 17, 3],
 | |||
|             vec![420, 4, 2, 0, 4],
 | |||
|             vec![420, 4, 2, 0, 5],
 | |||
|         ]);
 | |||
|         let A_mle = matrix_to_mle(A.clone());
 | |||
|         assert_eq!(A_mle.evaluations.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals
 | |||
| 
 | |||
|         // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values
 | |||
|         let bhc = BooleanHypercube::new(A_mle.num_vars);
 | |||
|         let A_padded = pad_matrix(&A);
 | |||
|         let A_padded_dense = A_padded.to_dense();
 | |||
|         for (i, A_row) in A_padded_dense.iter().enumerate() {
 | |||
|             for (j, _) in A_row.iter().enumerate() {
 | |||
|                 let s_i_j = bhc.at_i(i * A_row.len() + j);
 | |||
|                 assert_eq!(A_mle.evaluate(&s_i_j).unwrap(), A_padded_dense[i][j]);
 | |||
|             }
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_vec_to_mle() {
 | |||
|         let z = get_test_z::<Fr>(3);
 | |||
|         let n_vars = 3;
 | |||
|         let z_mle = dense_vec_to_mle(n_vars, &z);
 | |||
| 
 | |||
|         // check that the z_mle evaluated over the boolean hypercube equals the vec z_i values
 | |||
|         let bhc = BooleanHypercube::new(z_mle.num_vars);
 | |||
|         for (i, z_i) in z.iter().enumerate() {
 | |||
|             let s_i = bhc.at_i(i);
 | |||
|             assert_eq!(z_mle.evaluate(&s_i).unwrap(), z_i.clone());
 | |||
|         }
 | |||
|         // for the rest of elements of the boolean hypercube, expect it to evaluate to zero
 | |||
|         for i in (z.len())..(1 << z_mle.num_vars) {
 | |||
|             let s_i = bhc.at_i(i);
 | |||
|             assert_eq!(z_mle.evaluate(&s_i).unwrap(), Fr::zero());
 | |||
|         }
 | |||
|     }
 | |||
| 
 | |||
|     #[test]
 | |||
|     fn test_fix_variables() {
 | |||
|         let A = to_F_matrix(vec![
 | |||
|             vec![2, 3, 4, 4],
 | |||
|             vec![4, 11, 14, 14],
 | |||
|             vec![2, 8, 17, 17],
 | |||
|             vec![420, 4, 2, 0],
 | |||
|         ]);
 | |||
| 
 | |||
|         let A_mle = matrix_to_mle(A.clone());
 | |||
|         let A = A.to_dense();
 | |||
|         let bhc = BooleanHypercube::new(2);
 | |||
|         for (i, y) in bhc.enumerate() {
 | |||
|             // First check that the arkworks and espresso funcs match
 | |||
|             let expected_fix_left = A_mle.fix_variables(&y); // try arkworks fix_variables
 | |||
|             let fix_left = fix_variables(&A_mle, &y); // try espresso fix_variables
 | |||
|             assert_eq!(fix_left, expected_fix_left);
 | |||
| 
 | |||
|             // Check that fixing first variables pins down a column
 | |||
|             // i.e. fixing x to 0 will return the first column
 | |||
|             //      fixing x to 1 will return the second column etc.
 | |||
|             let column_i: Vec<Fr> = A.clone().iter().map(|x| x[i]).collect();
 | |||
|             assert_eq!(fix_left.evaluations, column_i);
 | |||
| 
 | |||
|             // Now check that fixing last variables pins down a row
 | |||
|             // i.e. fixing y to 0 will return the first row
 | |||
|             //      fixing y to 1 will return the second row etc.
 | |||
|             let row_i: Vec<Fr> = A[i].clone();
 | |||
|             let fix_right = fix_last_variables(&A_mle, &y);
 | |||
|             assert_eq!(fix_right.evaluations, row_i);
 | |||
|         }
 | |||
|     }
 | |||
| }
 | |||