Browse Source

Circuit compute_c reduce constraints (#97)

* migrate from CurveGroup to PrimeField in hypernova & ccs when curve whas not needed to simplify the code

* refactor test of compute_c circuit to use multiple lcccs&cccs instances

* refactor hypernova's compute_c circuit to reduce from `110635` to `553` constraints

* apply review nits

* fix clippy lints after rust-toolchain v1.76.0
update-nifs-interface
arnaucube 5 months ago
parent
commit
48947e841c
15 changed files with 221 additions and 394 deletions
  1. +19
    -22
      folding-schemes/src/ccs/mod.rs
  2. +1
    -1
      folding-schemes/src/commitment/ipa.rs
  3. +6
    -5
      folding-schemes/src/folding/circuits/nonnative/affine.rs
  4. +2
    -5
      folding-schemes/src/folding/circuits/sum_check.rs
  5. +7
    -8
      folding-schemes/src/folding/circuits/utils.rs
  6. +19
    -22
      folding-schemes/src/folding/hypernova/cccs.rs
  7. +94
    -246
      folding-schemes/src/folding/hypernova/circuit.rs
  8. +14
    -9
      folding-schemes/src/folding/hypernova/lcccs.rs
  9. +11
    -10
      folding-schemes/src/folding/hypernova/nimfs.rs
  10. +40
    -57
      folding-schemes/src/folding/hypernova/utils.rs
  11. +1
    -1
      folding-schemes/src/folding/protogalaxy/folding.rs
  12. +0
    -1
      folding-schemes/src/utils/espresso/sum_check/verifier.rs
  13. +5
    -5
      folding-schemes/src/utils/mle.rs
  14. +1
    -1
      folding-schemes/src/utils/vec.rs
  15. +1
    -1
      rust-toolchain

+ 19
- 22
folding-schemes/src/ccs/mod.rs

@ -1,7 +1,5 @@
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_std::log2;
use ark_std::{One, Zero};
use std::ops::Neg;
use crate::utils::vec::*;
use crate::Error;
@ -12,7 +10,7 @@ use r1cs::R1CS;
/// CCS represents the Customizable Constraint Systems structure defined in
/// the [CCS paper](https://eprint.iacr.org/2023/552)
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct CCS<C: CurveGroup> {
pub struct CCS<F: PrimeField> {
/// m: number of rows in M_i (such that M_i \in F^{m, n})
pub m: usize,
/// n = |z|, number of cols in M_i
@ -31,25 +29,24 @@ pub struct CCS {
pub s_prime: usize,
/// vector of matrices
pub M: Vec<SparseMatrix<C::ScalarField>>,
pub M: Vec<SparseMatrix<F>>,
/// vector of multisets
pub S: Vec<Vec<usize>>,
/// vector of coefficients
pub c: Vec<C::ScalarField>,
pub c: Vec<F>,
}
impl<C: CurveGroup> CCS<C> {
impl<F: PrimeField> CCS<F> {
/// check that a CCS structure is satisfied by a z vector. Only for testing.
pub fn check_relation(&self, z: &[C::ScalarField]) -> Result<(), Error> {
let mut result = vec![C::ScalarField::zero(); self.m];
pub fn check_relation(&self, z: &[F]) -> Result<(), Error> {
let mut result = vec![F::zero(); self.m];
for i in 0..self.q {
// extract the needed M_j matrices out of S_i
let vec_M_j: Vec<&SparseMatrix<C::ScalarField>> =
self.S[i].iter().map(|j| &self.M[*j]).collect();
let vec_M_j: Vec<&SparseMatrix<F>> = self.S[i].iter().map(|j| &self.M[*j]).collect();
// complete the hadamard chain
let mut hadamard_result = vec![C::ScalarField::one(); self.m];
let mut hadamard_result = vec![F::one(); self.m];
for M_j in vec_M_j.into_iter() {
hadamard_result = hadamard(&hadamard_result, &mat_vec_mul_sparse(M_j, z)?)?;
}
@ -72,8 +69,8 @@ impl CCS {
}
}
impl<C: CurveGroup> CCS<C> {
pub fn from_r1cs(r1cs: R1CS<C::ScalarField>) -> Self {
impl<F: PrimeField> CCS<F> {
pub fn from_r1cs(r1cs: R1CS<F>) -> Self {
let m = r1cs.A.n_rows;
let n = r1cs.A.n_cols;
CCS {
@ -87,13 +84,13 @@ impl CCS {
d: 2,
S: vec![vec![0, 1], vec![2]],
c: vec![C::ScalarField::one(), C::ScalarField::one().neg()],
c: vec![F::one(), F::one().neg()],
M: vec![r1cs.A, r1cs.B, r1cs.C],
}
}
pub fn to_r1cs(self) -> R1CS<C::ScalarField> {
R1CS::<C::ScalarField> {
pub fn to_r1cs(self) -> R1CS<F> {
R1CS::<F> {
l: self.l,
A: self.M[0].clone(),
B: self.M[1].clone(),
@ -107,11 +104,11 @@ pub mod tests {
use super::*;
use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z};
use ark_ff::PrimeField;
use ark_pallas::Projective;
use ark_pallas::Fr;
pub fn get_test_ccs<C: CurveGroup>() -> CCS<C> {
let r1cs = get_test_r1cs::<C::ScalarField>();
CCS::<C>::from_r1cs(r1cs)
pub fn get_test_ccs<F: PrimeField>() -> CCS<F> {
let r1cs = get_test_r1cs::<F>();
CCS::<F>::from_r1cs(r1cs)
}
pub fn get_test_z<F: PrimeField>(input: usize) -> Vec<F> {
r1cs_get_test_z(input)
@ -120,7 +117,7 @@ pub mod tests {
/// Test that a basic CCS relation can be satisfied
#[test]
fn test_ccs_relation() {
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();

+ 1
- 1
folding-schemes/src/commitment/ipa.rs

@ -493,7 +493,7 @@ where
/// there are some constraints saved.
#[allow(clippy::too_many_arguments)]
pub fn verify<const K: usize>(
g: &Vec<GC>, // params.generators
g: &[GC], // params.generators
h: &GC, // params.h
x: &NonNativeFieldVar<C::ScalarField, CF<C>>, // evaluation point, challenge
v: &NonNativeFieldVar<C::ScalarField, CF<C>>, // value at evaluation point

+ 6
- 5
folding-schemes/src/folding/circuits/nonnative/affine.rs

@ -1,4 +1,5 @@
use ark_ec::{AffineRepr, CurveGroup};
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
@ -16,7 +17,7 @@ use super::uint::{nonnative_field_to_field_elements, NonNativeUintVar};
#[derive(Debug, Clone)]
pub struct NonNativeAffineVar<C: CurveGroup>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
{
pub x: NonNativeUintVar<C::ScalarField>,
pub y: NonNativeUintVar<C::ScalarField>,
@ -25,7 +26,7 @@ where
impl<C> AllocVar<C, C::ScalarField> for NonNativeAffineVar<C>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
{
fn new_variable<T: Borrow<C>>(
cs: impl Into<Namespace<C::ScalarField>>,
@ -49,7 +50,7 @@ where
impl<C: CurveGroup> ToConstraintFieldGadget<C::ScalarField> for NonNativeAffineVar<C>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
{
// Used for converting `NonNativeAffineVar` to a vector of `FpVar` with minimum length in
// the circuit.
@ -66,7 +67,7 @@ pub fn nonnative_affine_to_field_elements(
p: C,
) -> Result<(Vec<C::ScalarField>, Vec<C::ScalarField>), SynthesisError>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
{
let affine = p.into_affine();
if affine.is_zero() {
@ -83,7 +84,7 @@ where
impl<C: CurveGroup> NonNativeAffineVar<C>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
{
// A wrapper of `point_to_nonnative_limbs_custom_opt` with constraints-focused optimization
// type (which is the default optimization type for arkworks' Groth16).

+ 2
- 5
folding-schemes/src/folding/circuits/sum_check.rs

@ -1,10 +1,7 @@
use crate::utils::espresso::sum_check::SumCheck;
use crate::utils::virtual_polynomial::VPAuxInfo;
use crate::{
transcript::{
poseidon::{PoseidonTranscript, PoseidonTranscriptVar},
TranscriptVar,
},
transcript::{poseidon::PoseidonTranscript, TranscriptVar},
utils::sum_check::{structs::IOPProof, IOPSumCheck},
};
use ark_crypto_primitives::sponge::Absorb;
@ -150,7 +147,7 @@ impl SumCheckVerifierGadget {
pub fn verify(
iop_proof_var: &IOPProofVar<C>,
poly_aux_info_var: &VPAuxInfoVar<C::ScalarField>,
transcript_var: &mut PoseidonTranscriptVar<C::ScalarField>,
transcript_var: &mut impl TranscriptVar<C::ScalarField>,
) -> Result<(Vec<FpVar<C::ScalarField>>, Vec<FpVar<C::ScalarField>>), SynthesisError> {
let mut e_vars = vec![iop_proof_var.claim.clone()];
let mut r_vars: Vec<FpVar<C::ScalarField>> = Vec::new();

+ 7
- 8
folding-schemes/src/folding/circuits/utils.rs

@ -12,7 +12,7 @@ pub struct EqEvalGadget {
impl<F: PrimeField> EqEvalGadget<F> {
/// Gadget to evaluate eq polynomial.
/// Follows the implementation of `eq_eval` found in this crate.
pub fn eq_eval(x: Vec<FpVar<F>>, y: Vec<FpVar<F>>) -> Result<FpVar<F>, SynthesisError> {
pub fn eq_eval(x: &[FpVar<F>], y: &[FpVar<F>]) -> Result<FpVar<F>, SynthesisError> {
if x.len() != y.len() {
return Err(SynthesisError::Unsatisfiable);
}
@ -30,16 +30,15 @@ impl EqEvalGadget {
#[cfg(test)]
mod tests {
use crate::utils::virtual_polynomial::eq_eval;
use super::EqEvalGadget;
use ark_ff::Field;
use ark_pallas::Fr;
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
use super::EqEvalGadget;
use crate::utils::virtual_polynomial::eq_eval;
#[test]
pub fn test_eq_eval_gadget() {
let mut rng = test_rng();
@ -57,19 +56,19 @@ mod tests {
.map(|y| FpVar::<Fr>::new_witness(cs.clone(), || Ok(y)).unwrap())
.collect();
let expected_eq_eval = eq_eval::<Fr>(&x_vec, &y_vec).unwrap();
let gadget_eq_eval: FpVar<Fr> = EqEvalGadget::<Fr>::eq_eval(x, y).unwrap();
let gadget_eq_eval: FpVar<Fr> = EqEvalGadget::<Fr>::eq_eval(&x, &y).unwrap();
assert_eq!(expected_eq_eval, gadget_eq_eval.value().unwrap());
}
let x: Vec<FpVar<Fr>> = vec![];
let y: Vec<FpVar<Fr>> = vec![];
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(x, y);
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(&x, &y);
assert!(gadget_eq_eval.is_err());
let x: Vec<FpVar<Fr>> = vec![];
let y: Vec<FpVar<Fr>> =
vec![FpVar::<Fr>::new_witness(cs.clone(), || Ok(&Fr::ONE)).unwrap()];
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(x, y);
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(&x, &y);
assert!(gadget_eq_eval.is_err());
}
}

+ 19
- 22
folding-schemes/src/folding/hypernova/cccs.rs

@ -5,7 +5,7 @@ use ark_std::Zero;
use std::ops::Add;
use std::sync::Arc;
use ark_std::{rand::Rng, UniformRand};
use ark_std::rand::Rng;
use super::utils::compute_sum_Mz;
use crate::ccs::CCS;
@ -35,13 +35,17 @@ pub struct CCCS {
pub x: Vec<C::ScalarField>,
}
impl<C: CurveGroup> CCS<C> {
pub fn to_cccs<R: Rng>(
impl<F: PrimeField> CCS<F> {
pub fn to_cccs<R: Rng, C: CurveGroup>(
&self,
rng: &mut R,
pedersen_params: &PedersenParams<C>,
z: &[C::ScalarField],
) -> Result<(CCCS<C>, Witness<C::ScalarField>), Error> {
) -> Result<(CCCS<C>, Witness<C::ScalarField>), Error>
where
// enforce that CCS's F is the C::ScalarField
C: CurveGroup<ScalarField = F>,
{
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
let r_w = C::ScalarField::rand(rng);
let C = Pedersen::<C, true>::commit(pedersen_params, &w, &r_w)?;
@ -57,13 +61,12 @@ impl CCS {
/// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
/// polynomial over x
pub fn compute_q(&self, z: &Vec<C::ScalarField>) -> VirtualPolynomial<C::ScalarField> {
pub fn compute_q(&self, z: &[F]) -> VirtualPolynomial<F> {
let z_mle = vec_to_mle(self.s_prime, z);
let mut q = VirtualPolynomial::<C::ScalarField>::new(self.s);
let mut q = VirtualPolynomial::<F>::new(self.s);
for i in 0..self.q {
let mut prod: VirtualPolynomial<C::ScalarField> =
VirtualPolynomial::<C::ScalarField>::new(self.s);
let mut prod: VirtualPolynomial<F> = VirtualPolynomial::<F>::new(self.s);
for j in self.S[i].clone() {
let M_j = matrix_to_mle(self.M[j].clone());
@ -74,11 +77,9 @@ impl CCS {
// If this is the first time we are adding something to this virtual polynomial, we need to
// explicitly add the products using add_mle_list()
// XXX is this true? improve API
prod.add_mle_list([Arc::new(sum_Mz)], C::ScalarField::one())
.unwrap();
prod.add_mle_list([Arc::new(sum_Mz)], F::one()).unwrap();
} else {
prod.mul_by_mle(Arc::new(sum_Mz), C::ScalarField::one())
.unwrap();
prod.mul_by_mle(Arc::new(sum_Mz), F::one()).unwrap();
}
}
// Multiply by the product by the coefficient c_i
@ -92,11 +93,7 @@ impl CCS {
/// Computes Q(x) = eq(beta, x) * q(x)
/// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
/// polynomial over x
pub fn compute_Q(
&self,
z: &Vec<C::ScalarField>,
beta: &[C::ScalarField],
) -> VirtualPolynomial<C::ScalarField> {
pub fn compute_Q(&self, z: &[F], beta: &[F]) -> VirtualPolynomial<F> {
let q = self.compute_q(z);
q.build_f_hat(beta).unwrap()
}
@ -107,7 +104,7 @@ impl CCCS {
pub fn check_relation(
&self,
pedersen_params: &PedersenParams<C>,
ccs: &CCS<C>,
ccs: &CCS<C::ScalarField>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
@ -139,7 +136,7 @@ pub mod tests {
use ark_std::test_rng;
use ark_std::UniformRand;
use ark_pallas::{Fr, Projective};
use ark_pallas::Fr;
/// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the
/// hypercube, but to not-zero outside the hypercube.
@ -147,7 +144,7 @@ pub mod tests {
fn test_compute_q() {
let mut rng = test_rng();
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let z = get_test_z(3);
let q = ccs.compute_q(&z);
@ -167,7 +164,7 @@ pub mod tests {
fn test_compute_Q() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let ccs: CCS<Fr> = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
@ -201,7 +198,7 @@ pub mod tests {
fn test_Q_against_q() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let ccs: CCS<Fr> = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();

+ 94
- 246
folding-schemes/src/folding/hypernova/circuit.rs

@ -1,286 +1,133 @@
// hypernova nimfs verifier circuit
// see section 5 in https://eprint.iacr.org/2023/573.pdf
use crate::{ccs::CCS, folding::circuits::utils::EqEvalGadget};
use ark_ec::CurveGroup;
/// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) NIMFS verifier circuit
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::AllocVar,
fields::{fp::FpVar, FieldVar},
ToBitsGadget,
};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_std::Zero;
use std::marker::PhantomData;
/// Gadget to compute $\sum_{j \in [t]} \gamma^{j} \cdot e_1 \cdot \sigma_j + \gamma^{t+1} \cdot e_2 \cdot \sum_{i=1}^{q} c_i * \prod_{j \in S_i} \theta_j$.
/// This is the sum computed by the verifier and laid out in section 5, step 5 of "A multi-folding scheme for CCS".
pub struct ComputeCFromSigmasAndThetasGadget<C: CurveGroup> {
_c: PhantomData<C>,
}
impl<C: CurveGroup> ComputeCFromSigmasAndThetasGadget<C> {
/// Computes the sum $\sum_{j}^{j + n} \gamma^{j} \cdot eq_eval \cdot \sigma_{j}$, where $n$ is the length of the `sigmas` vector
/// It corresponds to the first term of the sum that $\mathcal{V}$ has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
///
/// # Arguments
/// - `sigmas`: vector of $\sigma_j$ values
/// - `eq_eval`: the value of $\tilde{eq}(x_j, x^{\prime})$
/// - `gamma`: value $\gamma$
/// - `j`: the power at which we start to compute $\gamma^{j}$. This is needed in the context of multifolding.
///
/// # Notes
/// In the context of multifolding, `j` corresponds to `ccs.t` in `compute_c_from_sigmas_and_thetas`
fn sum_muls_gamma_pows_eq_sigma(
gamma: FpVar<C::ScalarField>,
eq_eval: FpVar<C::ScalarField>,
sigmas: Vec<FpVar<C::ScalarField>>,
j: FpVar<C::ScalarField>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
let mut result = FpVar::<C::ScalarField>::zero();
let mut gamma_pow = gamma.pow_le(&j.to_bits_le()?)?;
for sigma in sigmas {
result += gamma_pow.clone() * eq_eval.clone() * sigma;
gamma_pow *= gamma.clone();
}
Ok(result)
use crate::ccs::CCS;
use crate::folding::circuits::utils::EqEvalGadget;
/// computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS
/// instances:
/// $$
/// c = \sum_{i \in [\mu]} \left(\sum_{j \in [t]} \gamma^{i \cdot t + j} \cdot e_i \cdot \sigma_{i,j} \right)
/// + \sum_{k \in [\nu]} \gamma^{\mu \cdot t+k} \cdot e_k \cdot \left( \sum_{i=1}^q c_i \cdot \prod_{j \in S_i}
/// \theta_{k,j} \right)
/// $$
#[allow(dead_code)] // TMP while the other circuits are not ready
#[allow(clippy::too_many_arguments)]
fn compute_c_gadget<F: PrimeField>(
cs: ConstraintSystemRef<F>,
ccs: &CCS<F>,
vec_sigmas: Vec<Vec<FpVar<F>>>,
vec_thetas: Vec<Vec<FpVar<F>>>,
gamma: FpVar<F>,
beta: Vec<FpVar<F>>,
vec_r_x: Vec<Vec<FpVar<F>>>,
vec_r_x_prime: Vec<FpVar<F>>,
) -> Result<FpVar<F>, SynthesisError> {
let mut e_lcccs = Vec::new();
for r_x in vec_r_x.iter() {
e_lcccs.push(EqEvalGadget::eq_eval(r_x, &vec_r_x_prime)?);
}
/// Computes $\sum_{i=1}^{q} c_i * \prod_{j \in S_i} theta_j$
///
/// # Arguments
/// - `c_i`: vector of $c_i$ values
/// - `thetas`: vector of pre-processed $\thetas[j]$ values corresponding to a particular `ccs.S[i]`
///
/// # Notes
/// This is a part of the second term of the sum that $\mathcal{V}$ has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
/// The first term is computed by `SumMulsGammaPowsEqSigmaGadget::sum_muls_gamma_pows_eq_sigma`.
/// This is a doct product between a vector of c_i values and a vector of pre-processed $\theta_j$ values, where $j$ is a value from $S_i$.
/// Hence, this requires some pre-processing of the $\theta_j$ values, before running this gadget.
fn sum_ci_mul_prod_thetaj(
c_i: Vec<FpVar<C::ScalarField>>,
thetas: Vec<Vec<FpVar<C::ScalarField>>>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
let mut result = FpVar::<C::ScalarField>::zero();
for (i, c_i) in c_i.iter().enumerate() {
let prod = &thetas[i].iter().fold(FpVar::one(), |acc, e| acc * e);
result += c_i * prod;
let mut c = FpVar::<F>::zero();
let mut current_gamma = FpVar::<F>::one();
for i in 0..vec_sigmas.len() {
for j in 0..ccs.t {
c += current_gamma.clone() * e_lcccs[i].clone() * vec_sigmas[i][j].clone();
current_gamma *= gamma.clone();
}
Ok(result)
}
/// Computes the sum that the verifier has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
///
/// # Arguments
/// - `cs`: constraint system
/// - `ccs`: the CCS instance
/// - `vec_sigmas`: vector of $\sigma_j$ values
/// - `vec_thetas`: vector of $\theta_j$ values
/// - `gamma`: value $\gamma$
/// - `beta`: vector of $\beta_j$ values
/// - `vec_r_x`: vector of $r_{x_j}$ values
/// - `vec_r_x_prime`: vector of $r_{x_j}^{\prime}$ values
///
/// # Notes
/// Arguments to this function are *almost* the same as the arguments to `compute_c_from_sigmas_and_thetas` in `utils.rs`.
#[allow(clippy::too_many_arguments)]
pub fn compute_c_from_sigmas_and_thetas(
cs: ConstraintSystemRef<C::ScalarField>,
ccs: &CCS<C>,
vec_sigmas: Vec<Vec<FpVar<C::ScalarField>>>,
vec_thetas: Vec<Vec<FpVar<C::ScalarField>>>,
gamma: FpVar<C::ScalarField>,
beta: Vec<FpVar<C::ScalarField>>,
vec_r_x: Vec<Vec<FpVar<C::ScalarField>>>,
vec_r_x_prime: Vec<FpVar<C::ScalarField>>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
let mut c =
FpVar::<C::ScalarField>::new_witness(cs.clone(), || Ok(C::ScalarField::zero()))?;
let t = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(ccs.t as u64))
})?;
let mut e_lcccs = Vec::new();
for r_x in vec_r_x.iter() {
let e_1 = EqEvalGadget::eq_eval(r_x.to_vec(), vec_r_x_prime.to_vec())?;
e_lcccs.push(e_1);
}
for (i, sigmas) in vec_sigmas.iter().enumerate() {
let i_var = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(i as u64))
})?;
let pow = i_var * t.clone();
c += Self::sum_muls_gamma_pows_eq_sigma(
gamma.clone(),
e_lcccs[i].clone(),
sigmas.to_vec(),
pow,
)?;
}
let mu = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(vec_sigmas.len() as u64))
})?;
let e_2 = EqEvalGadget::eq_eval(beta, vec_r_x_prime)?;
for (k, thetas) in vec_thetas.iter().enumerate() {
// get prepared thetas. only step different from original `compute_c_from_sigmas_and_thetas`
let mut prepared_thetas = Vec::new();
for i in 0..ccs.q {
let prepared: Vec<FpVar<C::ScalarField>> =
ccs.S[i].iter().map(|j| thetas[*j].clone()).collect();
prepared_thetas.push(prepared.to_vec());
let ccs_c = Vec::<FpVar<F>>::new_constant(cs.clone(), ccs.c.clone())?;
let e_k = EqEvalGadget::eq_eval(&beta, &vec_r_x_prime)?;
#[allow(clippy::needless_range_loop)]
for k in 0..vec_thetas.len() {
let mut sum = FpVar::<F>::zero();
for i in 0..ccs.q {
let mut prod = FpVar::<F>::one();
for j in ccs.S[i].clone() {
prod *= vec_thetas[k][j].clone();
}
let c_i = Vec::<FpVar<C::ScalarField>>::new_witness(cs.clone(), || Ok(ccs.c.clone()))
.unwrap();
let lhs = Self::sum_ci_mul_prod_thetaj(c_i.clone(), prepared_thetas.clone())?;
// compute gamma^(t+1)
let pow = mu.clone() * t.clone()
+ FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(k as u64))
})?;
let gamma_t1 = gamma.pow_le(&pow.to_bits_le()?)?;
c += gamma_t1.clone() * e_2.clone() * lhs.clone();
sum += ccs_c[i].clone() * prod;
}
Ok(c)
c += current_gamma.clone() * e_k.clone() * sum;
current_gamma *= gamma.clone();
}
Ok(c)
}
#[cfg(test)]
mod tests {
use super::ComputeCFromSigmasAndThetasGadget;
use ark_pallas::{Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
use super::*;
use crate::{
ccs::{
tests::{get_test_ccs, get_test_z},
CCS,
},
commitment::{pedersen::Pedersen, CommitmentScheme},
folding::hypernova::utils::{
compute_c_from_sigmas_and_thetas, compute_sigmas_and_thetas, sum_ci_mul_prod_thetaj,
sum_muls_gamma_pows_eq_sigma,
},
utils::virtual_polynomial::eq_eval,
folding::hypernova::utils::{compute_c, compute_sigmas_and_thetas},
};
use ark_pallas::{Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
#[test]
pub fn test_sum_muls_gamma_pow_eq_sigma_gadget() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let gamma: Fr = Fr::rand(&mut rng);
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let mut e_lcccs = Vec::new();
for r_x in &vec![lcccs_instance.r_x] {
e_lcccs.push(eq_eval(r_x, &r_x_prime).unwrap());
pub fn test_compute_c_gadget() {
// number of LCCCS & CCCS instances to fold in a single step
let mu = 32;
let nu = 42;
let mut z_lcccs = Vec::new();
for i in 0..mu {
let z = get_test_z(i + 3);
z_lcccs.push(z);
}
// Initialize cs and gamma
let cs = ConstraintSystem::<Fr>::new_ref();
let gamma_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(gamma)).unwrap();
for (i, sigmas) in sigmas_thetas.0.iter().enumerate() {
let expected =
sum_muls_gamma_pows_eq_sigma(gamma, e_lcccs[i], sigmas, (i * ccs.t) as u64);
let sigmas_var =
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(sigmas.clone())).unwrap();
let eq_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(e_lcccs[i])).unwrap();
let pow =
FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::from((i * ccs.t) as u64))).unwrap();
let computed =
ComputeCFromSigmasAndThetasGadget::<Projective>::sum_muls_gamma_pows_eq_sigma(
gamma_var.clone(),
eq_var,
sigmas_var,
pow,
)
.unwrap();
assert_eq!(expected, computed.value().unwrap());
let mut z_cccs = Vec::new();
for i in 0..nu {
let z = get_test_z(i + 3);
z_cccs.push(z);
}
}
#[test]
pub fn test_sum_ci_mul_prod_thetaj_gadget() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let ccs: CCS<Fr> = get_test_ccs();
let mut rng = test_rng();
let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let mut e_lcccs = Vec::new();
for r_x in &vec![lcccs_instance.r_x] {
e_lcccs.push(eq_eval(r_x, &r_x_prime).unwrap());
// Create the LCCCS instances out of z_lcccs
let mut lcccs_instances = Vec::new();
for z_i in z_lcccs.iter() {
let (inst, _) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap();
lcccs_instances.push(inst);
}
// Initialize cs
let cs = ConstraintSystem::<Fr>::new_ref();
let vec_thetas = sigmas_thetas.1;
for thetas in vec_thetas.iter() {
// sum c_i * prod theta_j
let expected = sum_ci_mul_prod_thetaj(&ccs, thetas); // from `compute_c_from_sigmas_and_thetas`
let mut prepared_thetas = Vec::new();
for i in 0..ccs.q {
let prepared: Vec<Fr> = ccs.S[i].iter().map(|j| thetas[*j]).collect();
prepared_thetas
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(prepared)).unwrap());
}
let computed = ComputeCFromSigmasAndThetasGadget::<Projective>::sum_ci_mul_prod_thetaj(
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(ccs.c.clone())).unwrap(),
prepared_thetas,
)
.unwrap();
assert_eq!(expected, computed.value().unwrap());
// Create the CCCS instance out of z_cccs
let mut cccs_instances = Vec::new();
for z_i in z_cccs.iter() {
let (inst, _) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap();
cccs_instances.push(inst);
}
}
#[test]
pub fn test_compute_c_from_sigmas_and_thetas_gadget() {
let ccs: CCS<Projective> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let mut rng = test_rng();
let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let sigmas_thetas = compute_sigmas_and_thetas(&ccs, &z_lcccs, &z_cccs, &r_x_prime);
let expected_c = compute_c_from_sigmas_and_thetas(
let expected_c = compute_c(
&ccs,
&sigmas_thetas,
gamma,
&beta,
&vec![lcccs_instance.r_x.clone()],
&lcccs_instances
.iter()
.map(|lcccs| lcccs.r_x.clone())
.collect(),
&r_x_prime,
);
@ -295,17 +142,18 @@ mod tests {
vec_thetas
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(thetas.clone())).unwrap());
}
let vec_r_x =
vec![
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(lcccs_instance.r_x.clone()))
.unwrap(),
];
let vec_r_x: Vec<Vec<FpVar<Fr>>> = lcccs_instances
.iter()
.map(|lcccs| {
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(lcccs.r_x.clone())).unwrap()
})
.collect();
let vec_r_x_prime =
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(r_x_prime.clone())).unwrap();
let gamma_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(gamma)).unwrap();
let beta_var = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(beta.clone())).unwrap();
let computed_c = ComputeCFromSigmasAndThetasGadget::compute_c_from_sigmas_and_thetas(
cs,
let computed_c = compute_c_gadget(
cs.clone(),
&ccs,
vec_sigmas,
vec_thetas,

+ 14
- 9
folding-schemes/src/folding/hypernova/lcccs.rs

@ -1,9 +1,10 @@
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_poly::DenseMultilinearExtension;
use ark_std::One;
use std::sync::Arc;
use ark_std::{rand::Rng, UniformRand};
use ark_std::rand::Rng;
use super::cccs::Witness;
use super::utils::{compute_all_sum_Mz_evals, compute_sum_Mz};
@ -31,19 +32,23 @@ pub struct LCCCS {
pub v: Vec<C::ScalarField>,
}
impl<C: CurveGroup> CCS<C> {
impl<F: PrimeField> CCS<F> {
/// Compute v_j values of the linearized committed CCS form
/// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
fn compute_v_j(&self, z: &[C::ScalarField], r: &[C::ScalarField]) -> Vec<C::ScalarField> {
compute_all_sum_Mz_evals(&self.M, &z.to_vec(), r, self.s_prime)
fn compute_v_j(&self, z: &[F], r: &[F]) -> Vec<F> {
compute_all_sum_Mz_evals(&self.M, z, r, self.s_prime)
}
pub fn to_lcccs<R: Rng>(
pub fn to_lcccs<R: Rng, C: CurveGroup>(
&self,
rng: &mut R,
pedersen_params: &PedersenParams<C>,
z: &[C::ScalarField],
) -> Result<(LCCCS<C>, Witness<C::ScalarField>), Error> {
) -> Result<(LCCCS<C>, Witness<C::ScalarField>), Error>
where
// enforce that CCS's F is the C::ScalarField
C: CurveGroup<ScalarField = F>,
{
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
let r_w = C::ScalarField::rand(rng);
let C = Pedersen::<C, true>::commit(pedersen_params, &w, &r_w)?;
@ -68,8 +73,8 @@ impl LCCCS {
/// Compute all L_j(x) polynomials
pub fn compute_Ls(
&self,
ccs: &CCS<C>,
z: &Vec<C::ScalarField>,
ccs: &CCS<C::ScalarField>,
z: &[C::ScalarField],
) -> Vec<VirtualPolynomial<C::ScalarField>> {
let z_mle = vec_to_mle(ccs.s_prime, z);
// Convert all matrices to MLE
@ -92,7 +97,7 @@ impl LCCCS {
pub fn check_relation(
&self,
pedersen_params: &PedersenParams<C>,
ccs: &CCS<C>,
ccs: &CCS<C::ScalarField>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
// check that C is the commitment of w. Notice that this is not verifying a Pedersen

+ 11
- 10
folding-schemes/src/folding/hypernova/nimfs.rs

@ -7,7 +7,7 @@ use ark_std::{One, Zero};
use super::cccs::{Witness, CCCS};
use super::lcccs::LCCCS;
use super::utils::{compute_c_from_sigmas_and_thetas, compute_g, compute_sigmas_and_thetas};
use super::utils::{compute_c, compute_g, compute_sigmas_and_thetas};
use crate::ccs::CCS;
use crate::transcript::Transcript;
use crate::utils::hypercube::BooleanHypercube;
@ -20,13 +20,13 @@ use std::fmt::Debug;
use std::marker::PhantomData;
/// Proof defines a multifolding proof
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct Proof<C: CurveGroup> {
pub sc_proof: SumCheckProof<C::ScalarField>,
pub sigmas_thetas: SigmasThetas<C::ScalarField>,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct SigmasThetas<F: PrimeField>(pub Vec<Vec<F>>, pub Vec<Vec<F>>);
#[derive(Debug)]
@ -151,7 +151,7 @@ where
#[allow(clippy::type_complexity)]
pub fn prove(
transcript: &mut impl Transcript<C>,
ccs: &CCS<C>,
ccs: &CCS<C::ScalarField>,
running_instances: &[LCCCS<C>],
new_instances: &[CCCS<C>],
w_lcccs: &[Witness<C::ScalarField>],
@ -277,7 +277,7 @@ where
/// Returns the folded LCCCS instance.
pub fn verify(
transcript: &mut impl Transcript<C>,
ccs: &CCS<C>,
ccs: &CCS<C::ScalarField>,
running_instances: &[LCCCS<C>],
new_instances: &[CCCS<C>],
proof: Proof<C>,
@ -325,7 +325,7 @@ where
let r_x_prime = sumcheck_subclaim.point.clone();
// Step 5: Finish verifying sumcheck (verify the claim c)
let c = compute_c_from_sigmas_and_thetas(
let c = compute_c(
ccs,
&proof.sigmas_thetas,
gamma,
@ -336,6 +336,7 @@ where
.collect(),
&r_x_prime,
);
// check that the g(r_x') from the sumcheck proof is equal to the computed c from sigmas&thetas
if c != sumcheck_subclaim.expected_evaluation {
return Err(Error::NotEqual);
@ -430,7 +431,7 @@ pub mod tests {
let mut rng = test_rng();
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
@ -489,7 +490,7 @@ pub mod tests {
pub fn test_multifolding_two_instances_multiple_steps() {
let mut rng = test_rng();
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
@ -559,7 +560,7 @@ pub mod tests {
let mut rng = test_rng();
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
@ -642,7 +643,7 @@ pub mod tests {
let mut rng = test_rng();
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();

+ 40
- 57
folding-schemes/src/folding/hypernova/utils.rs

@ -2,7 +2,6 @@ use ark_ec::CurveGroup;
use ark_ff::{Field, PrimeField};
use ark_poly::DenseMultilinearExtension;
use ark_poly::MultilinearExtension;
use ark_std::{One, Zero};
use std::ops::Add;
use crate::utils::multilinear_polynomial::fix_variables;
@ -21,7 +20,7 @@ use crate::utils::virtual_polynomial::{eq_eval, VirtualPolynomial};
/// in 0..self.t
pub fn compute_all_sum_Mz_evals<F: PrimeField>(
vec_M: &[SparseMatrix<F>],
z: &Vec<F>,
z: &[F],
r: &[F],
s_prime: usize,
) -> Vec<F> {
@ -65,19 +64,19 @@ pub fn compute_sum_Mz(
/// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS
/// instances
pub fn compute_sigmas_and_thetas<C: CurveGroup>(
ccs: &CCS<C>,
z_lcccs: &[Vec<C::ScalarField>],
z_cccs: &[Vec<C::ScalarField>],
r_x_prime: &[C::ScalarField],
) -> SigmasThetas<C::ScalarField> {
let mut sigmas: Vec<Vec<C::ScalarField>> = Vec::new();
pub fn compute_sigmas_and_thetas<F: PrimeField>(
ccs: &CCS<F>,
z_lcccs: &[Vec<F>],
z_cccs: &[Vec<F>],
r_x_prime: &[F],
) -> SigmasThetas<F> {
let mut sigmas: Vec<Vec<F>> = Vec::new();
for z_lcccs_i in z_lcccs {
// sigmas
let sigma_i = compute_all_sum_Mz_evals(&ccs.M, z_lcccs_i, r_x_prime, ccs.s_prime);
sigmas.push(sigma_i);
}
let mut thetas: Vec<Vec<C::ScalarField>> = Vec::new();
let mut thetas: Vec<Vec<F>> = Vec::new();
for z_cccs_i in z_cccs {
// thetas
let theta_i = compute_all_sum_Mz_evals(&ccs.M, z_cccs_i, r_x_prime, ccs.s_prime);
@ -86,49 +85,23 @@ pub fn compute_sigmas_and_thetas(
SigmasThetas(sigmas, thetas)
}
/// Computes the sum $\sum_{j = 0}^{n} \gamma^{\text{pow} + j} \cdot eq_eval \cdot \sigma_{j}$
/// `pow` corresponds to `i * ccs.t` in `compute_c_from_sigmas_and_thetas`
pub fn sum_muls_gamma_pows_eq_sigma<F: PrimeField>(
/// computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS
/// instances:
/// $$
/// c = \sum_{i \in [\mu]} \left(\sum_{j \in [t]} \gamma^{i \cdot t + j} \cdot e_i \cdot \sigma_{i,j} \right)
/// + \sum_{k \in [\nu]} \gamma^{\mu \cdot t+k} \cdot e_k \cdot \left( \sum_{i=1}^q c_i \cdot \prod_{j \in S_i}
/// \theta_{k,j} \right)
/// $$
pub fn compute_c<F: PrimeField>(
ccs: &CCS<F>,
st: &SigmasThetas<F>,
gamma: F,
eq_eval: F,
sigmas: &[F],
pow: u64,
beta: &[F],
vec_r_x: &Vec<Vec<F>>,
r_x_prime: &[F],
) -> F {
let mut result = F::zero();
for (j, sigma_j) in sigmas.iter().enumerate() {
let gamma_j = gamma.pow([(pow + (j as u64))]);
result += gamma_j * eq_eval * sigma_j;
}
result
}
/// Computes $\sum_{i=1}^{q} c_i * \prod_{j \in S_i} theta_j$
pub fn sum_ci_mul_prod_thetaj<C: CurveGroup>(
ccs: &CCS<C>,
thetas: &[C::ScalarField],
) -> C::ScalarField {
let mut result = C::ScalarField::zero();
for i in 0..ccs.q {
let mut prod = C::ScalarField::one();
for j in ccs.S[i].clone() {
prod *= thetas[j];
}
result += ccs.c[i] * prod;
}
result
}
/// Compute the right-hand-side of step 5 of the multifolding scheme
pub fn compute_c_from_sigmas_and_thetas<C: CurveGroup>(
ccs: &CCS<C>,
st: &SigmasThetas<C::ScalarField>,
gamma: C::ScalarField,
beta: &[C::ScalarField],
vec_r_x: &Vec<Vec<C::ScalarField>>,
r_x_prime: &[C::ScalarField],
) -> C::ScalarField {
let (vec_sigmas, vec_thetas) = (st.0.clone(), st.1.clone());
let mut c = C::ScalarField::zero();
let mut c = F::zero();
let mut e_lcccs = Vec::new();
for r_x in vec_r_x {
@ -136,14 +109,24 @@ pub fn compute_c_from_sigmas_and_thetas(
}
for (i, sigmas) in vec_sigmas.iter().enumerate() {
// (sum gamma^j * e_i * sigma_j)
c += sum_muls_gamma_pows_eq_sigma(gamma, e_lcccs[i], sigmas, (i * ccs.t) as u64);
for (j, sigma_j) in sigmas.iter().enumerate() {
let gamma_j = gamma.pow([((i * ccs.t + j) as u64)]);
c += gamma_j * e_lcccs[i] * sigma_j;
}
}
let mu = vec_sigmas.len();
let e2 = eq_eval(beta, r_x_prime).unwrap();
for (k, thetas) in vec_thetas.iter().enumerate() {
// + gamma^{t+1} * e2 * sum c_i * prod theta_j
let lhs = sum_ci_mul_prod_thetaj(ccs, thetas);
let mut lhs = F::zero();
for i in 0..ccs.q {
let mut prod = F::one();
for j in ccs.S[i].clone() {
prod *= thetas[j];
}
lhs += ccs.c[i] * prod;
}
let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]);
c += gamma_t1 * e2 * lhs;
}
@ -152,7 +135,7 @@ pub fn compute_c_from_sigmas_and_thetas(
/// Compute g(x) polynomial for the given inputs.
pub fn compute_g<C: CurveGroup>(
ccs: &CCS<C>,
ccs: &CCS<C::ScalarField>,
running_instances: &[LCCCS<C>],
z_lcccs: &[Vec<C::ScalarField>],
z_cccs: &[Vec<C::ScalarField>],
@ -205,7 +188,7 @@ pub mod tests {
#[test]
fn test_compute_sum_Mz_over_boolean_hypercube() {
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
let z_mle = dense_vec_to_mle(ccs.s_prime, &z);
@ -253,7 +236,7 @@ pub mod tests {
let mut rng = test_rng();
// s = 2, s' = 3
let ccs = get_test_ccs::<Projective>();
let ccs = get_test_ccs::<Fr>();
let M = ccs.M[0].clone().to_dense();
let M_mle = matrix_to_mle(ccs.M[0].clone());
@ -308,9 +291,9 @@ pub mod tests {
// we expect g(r_x_prime) to be equal to:
// c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j
// from compute_c_from_sigmas_and_thetas
// from compute_c
let expected_c = g.evaluate(&r_x_prime).unwrap();
let c = compute_c_from_sigmas_and_thetas::<Projective>(
let c = compute_c::<Fr>(
&ccs,
&sigmas_thetas,
gamma,

+ 1
- 1
folding-schemes/src/folding/protogalaxy/folding.rs

@ -288,7 +288,7 @@ where
}
// naive impl of pow_i for betas, assuming that betas=(b, b^2, b^4, ..., b^{2^{t-1}})
fn pow_i<F: PrimeField>(i: usize, betas: &Vec<F>) -> F {
fn pow_i<F: PrimeField>(i: usize, betas: &[F]) -> F {
// WIP check if makes more sense to do it with ifs instead of arithmetic
let n = 2_u64.pow(betas.len() as u32);

+ 0
- 1
folding-schemes/src/utils/espresso/sum_check/verifier.rs

@ -140,7 +140,6 @@ impl SumCheckVerifier for IOPVerifierState {
let eval_at_zero: C::ScalarField = poly.coeffs[0];
let eval = eval_at_one + eval_at_zero;
println!("evaluations: {:?}, expected: {:?}", eval, expected);
// the deferred check during the interactive phase:
// 1. check if the received 'P(0) + P(1) = expected`.
if eval != expected {

+ 5
- 5
folding-schemes/src/utils/mle.rs

@ -34,26 +34,26 @@ pub fn matrix_to_mle(matrix: SparseMatrix) -> DenseMultilinear
}
/// Takes the n_vars and a dense vector and returns its dense MLE.
pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &[F]) -> DenseMultilinearExtension<F> {
let v_padded: Vec<F> = if v.len() != (1 << n_vars) {
// pad to 2^n_vars
[
v.clone(),
v.to_owned(),
std::iter::repeat(F::zero())
.take((1 << n_vars) - v.len())
.collect(),
]
.concat()
} else {
v.clone()
v.to_owned()
};
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
}
pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &[F]) -> DenseMultilinearExtension<F> {
// Pad to 2^n_vars
let v_padded: Vec<F> = [
v.clone(),
v.to_owned(),
std::iter::repeat(F::zero())
.take((1 << n_vars) - v.len())
.collect(),

+ 1
- 1
folding-schemes/src/utils/vec.rs

@ -79,7 +79,7 @@ pub fn is_zero_vec(vec: &[F]) -> bool {
vec.iter().all(|a| a.is_zero())
}
pub fn mat_vec_mul<F: PrimeField>(M: &Vec<Vec<F>>, z: &[F]) -> Result<Vec<F>, Error> {
pub fn mat_vec_mul<F: PrimeField>(M: &[Vec<F>], z: &[F]) -> Result<Vec<F>, Error> {
if M.is_empty() {
return Err(Error::Empty);
}

+ 1
- 1
rust-toolchain

@ -1 +1 @@
1.74.0
1.76.0

Loading…
Cancel
Save