mirror of
https://github.com/arnaucube/sonobe.git
synced 2026-01-11 16:31:32 +01:00
Add solidity groth16, kzg10 and final decider verifiers in a dedicated workspace (#70)
* change: Refactor structure into workspace * chore: Add empty readme * change: Transform repo into workspace * add: Create folding-verifier-solidity crate * add: Include askama.toml for `sol` extension escaper * add: Jordi's old Groth16 verifier .sol template and adapt it * tmp: create simple template struct to test * Update FoldingSchemes trait, fit Nova+CycleFold - update lib.rs's `FoldingScheme` trait interface - fit Nova+CycleFold into the `FoldingScheme` trait - refactor `src/nova/*` * chore: add serialization assets for testing Now we include an `assets` folder with a serialized proof & vk for tests * Add `examples` dir, with Nova's `FoldingScheme` example * polishing * expose poseidon_test_config outside tests * change: Refactor structure into workspace * chore: Add empty readme * change: Transform repo into workspace * add: Create folding-verifier-solidity crate * add: Include askama.toml for `sol` extension escaper * add: Jordi's old Groth16 verifier .sol template and adapt it * tmp: create simple template struct to test * feat: templating kzg working * chore: add emv and revm * feat: start evm file * chore: add ark-poly-commit * chore: move `commitment` to `folding-schemes` * chore: update `.gitignore` to ignore generated contracts * chore: update template with bn254 lib on it (avoids import), update for loop to account for whitespaces * refactor: update template with no lib * feat: add evm deploy code, compile and create kzg verifier * chore: update `Cargo.toml` to have `folding-schemes` available with verifiers * feat: start kzg prove and verify with sol * chore: compute crs from kzg prover * feat: evm kzg verification passing * tmp * change: Swap order of G2 coordinates within the template * Update way to serialize proof with correct order * chore: update `Cargo.toml` * chore: add revm * chore: add `save_solidity` * refactor: verifiers in dedicated mod * refactor: have dedicated `utils` module * chore: expose modules * chore: update verifier for kzg * chore: rename templates * fix: look for binary using also name of contract * refactor: generate groth16 proof for sha256 pre-image, generate groth16 template with verifying key * chore: template renaming * fix: switch circuit for circuit that simply adds * feat: generates test data on the fly * feat: update to latest groth16 verifier * refactor: rename folder, update `.gitignore` * chore: update `Cargo.toml` * chore: update templates extension to indicate that they are templates * chore: rename templates, both files and structs * fix: template inheritance working * feat: template spdx and pragma statements * feat: decider verifier compiles, update test for kzg10 and groth16 templates * feat: parameterize which size of the crs should be stored on the contract * chore: add comment on how the groth16 and kzg10 proofs will be linked together * chore: cargo clippy run * chore: cargo clippy tests * chore: cargo fmt * refactor: remove unused lifetime parameter * chore: end merge * chore: move examples to `folding-schemes` workspace * get latest main changes * fix: temp fix clippy warnings, will remove lints once not used in tests only * fix: cargo clippy lint added on `code_size` * fix: update path to test circuit and add step for installing solc * chore: remove `save_solidity` steps * fix: the borrowed expression implements the required traits * chore: update `Cargo.toml` * chore: remove extra `[patch.crates-io]` * fix: update to patch at the workspace level and add comment explaining this * refactor: correct `staticcall` with valid input/output sizes and change return syntax for pairing * refactor: expose modules and remove `dead_code` calls * chore: update `README.md`, add additional comments on `kzg10` template and update `groth16` template comments * chore: be clearer on attributions on `kzg10` --------- Co-authored-by: CPerezz <c.perezbaro@gmail.com> Co-authored-by: arnaucube <root@arnaucube.com>
This commit is contained in:
10
folding-schemes/src/folding/circuits/mod.rs
Normal file
10
folding-schemes/src/folding/circuits/mod.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
/// Circuits and gadgets shared across the different folding schemes.
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::Field;
|
||||
|
||||
pub mod nonnative;
|
||||
pub mod sum_check;
|
||||
pub mod utils;
|
||||
|
||||
// CF represents the constraints field
|
||||
pub type CF<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;
|
||||
131
folding-schemes/src/folding/circuits/nonnative.rs
Normal file
131
folding-schemes/src/folding/circuits/nonnative.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use ark_ec::{AffineRepr, CurveGroup};
|
||||
use ark_ff::PrimeField;
|
||||
use ark_r1cs_std::fields::nonnative::{params::OptimizationType, AllocatedNonNativeFieldVar};
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
fields::{fp::FpVar, nonnative::NonNativeFieldVar},
|
||||
ToConstraintFieldGadget,
|
||||
};
|
||||
use ark_relations::r1cs::{Namespace, SynthesisError};
|
||||
use ark_std::{One, Zero};
|
||||
use core::borrow::Borrow;
|
||||
|
||||
/// NonNativeAffineVar represents an elliptic curve point in Affine represenation in the non-native
|
||||
/// field, over the constraint field. It is not intended to perform operations, but just to contain
|
||||
/// the affine coordinates in order to perform hash operations of the point.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NonNativeAffineVar<F: PrimeField> {
|
||||
pub x: Vec<FpVar<F>>,
|
||||
pub y: Vec<FpVar<F>>,
|
||||
}
|
||||
|
||||
impl<C> AllocVar<C, C::ScalarField> for NonNativeAffineVar<C::ScalarField>
|
||||
where
|
||||
C: CurveGroup,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
{
|
||||
fn new_variable<T: Borrow<C>>(
|
||||
cs: impl Into<Namespace<C::ScalarField>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let affine = val.borrow().into_affine();
|
||||
let zero_point = (&C::BaseField::zero(), &C::BaseField::one());
|
||||
let xy = affine.xy().unwrap_or(zero_point);
|
||||
|
||||
let x = NonNativeFieldVar::<C::BaseField, C::ScalarField>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(xy.0),
|
||||
mode,
|
||||
)?
|
||||
.to_constraint_field()?;
|
||||
let y = NonNativeFieldVar::<C::BaseField, C::ScalarField>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(xy.1),
|
||||
mode,
|
||||
)?
|
||||
.to_constraint_field()?;
|
||||
|
||||
Ok(Self { x, y })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper on top of [`point_to_nonnative_limbs_custom_opt`] which always uses
|
||||
/// [`OptimizationType::Weight`].
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn point_to_nonnative_limbs<C: CurveGroup>(
|
||||
p: C,
|
||||
) -> Result<(Vec<C::ScalarField>, Vec<C::ScalarField>), SynthesisError>
|
||||
where
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
{
|
||||
point_to_nonnative_limbs_custom_opt(p, OptimizationType::Weight)
|
||||
}
|
||||
|
||||
/// Used to compute (outside the circuit) the limbs representation of a point that matches the one
|
||||
/// used in-circuit, and in particular this method allows to specify which [`OptimizationType`] to
|
||||
/// use.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn point_to_nonnative_limbs_custom_opt<C: CurveGroup>(
|
||||
p: C,
|
||||
optimization_type: OptimizationType,
|
||||
) -> Result<(Vec<C::ScalarField>, Vec<C::ScalarField>), SynthesisError>
|
||||
where
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
{
|
||||
let affine = p.into_affine();
|
||||
if affine.is_zero() {
|
||||
let x =
|
||||
AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
|
||||
&C::BaseField::zero(),
|
||||
optimization_type,
|
||||
)?;
|
||||
let y =
|
||||
AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
|
||||
&C::BaseField::one(),
|
||||
optimization_type,
|
||||
)?;
|
||||
return Ok((x, y));
|
||||
}
|
||||
|
||||
let (x, y) = affine.xy().unwrap();
|
||||
let x = AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
|
||||
x,
|
||||
optimization_type,
|
||||
)?;
|
||||
let y = AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
|
||||
y,
|
||||
optimization_type,
|
||||
)?;
|
||||
Ok((x, y))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ark_pallas::{Fr, Projective};
|
||||
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
|
||||
use ark_relations::r1cs::ConstraintSystem;
|
||||
use ark_std::{UniformRand, Zero};
|
||||
|
||||
#[test]
|
||||
fn test_alloc_nonnativeaffinevar() {
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
// dealing with the 'zero' point should not panic when doing the unwrap
|
||||
let p = Projective::zero();
|
||||
NonNativeAffineVar::<Fr>::new_witness(cs.clone(), || Ok(p)).unwrap();
|
||||
|
||||
// check that point_to_nonnative_limbs returns the expected values
|
||||
let mut rng = ark_std::test_rng();
|
||||
let p = Projective::rand(&mut rng);
|
||||
let pVar = NonNativeAffineVar::<Fr>::new_witness(cs.clone(), || Ok(p)).unwrap();
|
||||
let (x, y) = point_to_nonnative_limbs(p).unwrap();
|
||||
assert_eq!(pVar.x.value().unwrap(), x);
|
||||
assert_eq!(pVar.y.value().unwrap(), y);
|
||||
}
|
||||
}
|
||||
273
folding-schemes/src/folding/circuits/sum_check.rs
Normal file
273
folding-schemes/src/folding/circuits/sum_check.rs
Normal file
@@ -0,0 +1,273 @@
|
||||
use crate::utils::espresso::sum_check::SumCheck;
|
||||
use crate::utils::virtual_polynomial::VPAuxInfo;
|
||||
use crate::{
|
||||
transcript::{
|
||||
poseidon::{PoseidonTranscript, PoseidonTranscriptVar},
|
||||
TranscriptVar,
|
||||
},
|
||||
utils::sum_check::{structs::IOPProof, IOPSumCheck},
|
||||
};
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
/// Heavily inspired from testudo: https://github.com/cryptonetlab/testudo/tree/master
|
||||
/// Some changes:
|
||||
/// - Typings to better stick to ark_poly's API
|
||||
/// - Uses `folding-schemes`' own `TranscriptVar` trait and `PoseidonTranscriptVar` struct
|
||||
/// - API made closer to gadgets found in `folding-schemes`
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
eq::EqGadget,
|
||||
fields::fp::FpVar,
|
||||
};
|
||||
use ark_relations::r1cs::{Namespace, SynthesisError};
|
||||
use std::{borrow::Borrow, marker::PhantomData};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DensePolynomialVar<F: PrimeField> {
|
||||
pub coeffs: Vec<FpVar<F>>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> AllocVar<DensePolynomial<F>, F> for DensePolynomialVar<F> {
|
||||
fn new_variable<T: Borrow<DensePolynomial<F>>>(
|
||||
cs: impl Into<Namespace<F>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|c| {
|
||||
let cs = cs.into();
|
||||
let cp: &DensePolynomial<F> = c.borrow();
|
||||
let mut coeffs_var = Vec::<FpVar<F>>::with_capacity(cp.coeffs.len());
|
||||
for coeff in cp.coeffs.iter() {
|
||||
let coeff_var = FpVar::<F>::new_variable(cs.clone(), || Ok(coeff), mode)?;
|
||||
coeffs_var.push(coeff_var);
|
||||
}
|
||||
Ok(Self { coeffs: coeffs_var })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField> DensePolynomialVar<F> {
|
||||
pub fn eval_at_zero(&self) -> FpVar<F> {
|
||||
self.coeffs[0].clone()
|
||||
}
|
||||
|
||||
pub fn eval_at_one(&self) -> FpVar<F> {
|
||||
let mut res = self.coeffs[0].clone();
|
||||
for i in 1..self.coeffs.len() {
|
||||
res = &res + &self.coeffs[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r: &FpVar<F>) -> FpVar<F> {
|
||||
let mut eval = self.coeffs[0].clone();
|
||||
let mut power = r.clone();
|
||||
|
||||
for i in 1..self.coeffs.len() {
|
||||
eval += &power * &self.coeffs[i];
|
||||
power *= r;
|
||||
}
|
||||
eval
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IOPProofVar<C: CurveGroup> {
|
||||
// We have to be generic over a CurveGroup because instantiating a IOPProofVar will call IOPSumCheck which requires a CurveGroup
|
||||
pub proofs: Vec<DensePolynomialVar<C::ScalarField>>,
|
||||
pub claim: FpVar<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> AllocVar<IOPProof<C::ScalarField>, C::ScalarField> for IOPProofVar<C>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
{
|
||||
fn new_variable<T: Borrow<IOPProof<C::ScalarField>>>(
|
||||
cs: impl Into<Namespace<C::ScalarField>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|c| {
|
||||
let cs = cs.into();
|
||||
let cp: &IOPProof<C::ScalarField> = c.borrow();
|
||||
let claim = IOPSumCheck::<C, PoseidonTranscript<C>>::extract_sum(cp);
|
||||
let claim = FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(claim), mode)?;
|
||||
let mut proofs =
|
||||
Vec::<DensePolynomialVar<C::ScalarField>>::with_capacity(cp.proofs.len());
|
||||
for proof in cp.proofs.iter() {
|
||||
let poly = DensePolynomial::from_coefficients_slice(&proof.coeffs);
|
||||
let proof = DensePolynomialVar::<C::ScalarField>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(poly),
|
||||
mode,
|
||||
)?;
|
||||
proofs.push(proof);
|
||||
}
|
||||
Ok(Self { proofs, claim })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VPAuxInfoVar<F: PrimeField> {
|
||||
pub num_variables: FpVar<F>,
|
||||
pub max_degree: FpVar<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> AllocVar<VPAuxInfo<F>, F> for VPAuxInfoVar<F> {
|
||||
fn new_variable<T: Borrow<VPAuxInfo<F>>>(
|
||||
cs: impl Into<Namespace<F>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|c| {
|
||||
let cs = cs.into();
|
||||
let cp: &VPAuxInfo<F> = c.borrow();
|
||||
let num_variables = FpVar::<F>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(F::from(cp.num_variables as u64)),
|
||||
mode,
|
||||
)?;
|
||||
let max_degree =
|
||||
FpVar::<F>::new_variable(cs.clone(), || Ok(F::from(cp.max_degree as u64)), mode)?;
|
||||
Ok(Self {
|
||||
num_variables,
|
||||
max_degree,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SumCheckVerifierGadget<C: CurveGroup> {
|
||||
_f: PhantomData<C>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> SumCheckVerifierGadget<C> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn verify(
|
||||
iop_proof_var: &IOPProofVar<C>,
|
||||
poly_aux_info_var: &VPAuxInfoVar<C::ScalarField>,
|
||||
transcript_var: &mut PoseidonTranscriptVar<C::ScalarField>,
|
||||
) -> Result<(Vec<FpVar<C::ScalarField>>, Vec<FpVar<C::ScalarField>>), SynthesisError> {
|
||||
let mut e_vars = vec![iop_proof_var.claim.clone()];
|
||||
let mut r_vars: Vec<FpVar<C::ScalarField>> = Vec::new();
|
||||
transcript_var.absorb(poly_aux_info_var.num_variables.clone())?;
|
||||
transcript_var.absorb(poly_aux_info_var.max_degree.clone())?;
|
||||
|
||||
for poly_var in iop_proof_var.proofs.iter() {
|
||||
let res = poly_var.eval_at_one() + poly_var.eval_at_zero();
|
||||
let e_var = e_vars.last().ok_or(SynthesisError::Unsatisfiable)?;
|
||||
res.enforce_equal(e_var)?;
|
||||
transcript_var.absorb_vec(&poly_var.coeffs)?;
|
||||
let r_i_var = transcript_var.get_challenge()?;
|
||||
e_vars.push(poly_var.evaluate(&r_i_var));
|
||||
r_vars.push(r_i_var);
|
||||
}
|
||||
|
||||
Ok((e_vars, r_vars))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
folding::circuits::sum_check::{IOPProofVar, VPAuxInfoVar},
|
||||
transcript::{
|
||||
poseidon::{poseidon_test_config, PoseidonTranscript, PoseidonTranscriptVar},
|
||||
Transcript, TranscriptVar,
|
||||
},
|
||||
utils::{
|
||||
sum_check::{structs::IOPProof, IOPSumCheck, SumCheck},
|
||||
virtual_polynomial::VirtualPolynomial,
|
||||
},
|
||||
};
|
||||
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::Field;
|
||||
use ark_pallas::{Fr, Projective};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, DenseMultilinearExtension, DenseUVPolynomial,
|
||||
MultilinearExtension, Polynomial,
|
||||
};
|
||||
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
|
||||
use ark_relations::r1cs::ConstraintSystem;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::SumCheckVerifierGadget;
|
||||
|
||||
pub type TestSumCheckProof<F> = (VirtualPolynomial<F>, PoseidonConfig<F>, IOPProof<F>);
|
||||
|
||||
/// Primarily used for testing the sumcheck gadget
|
||||
/// Returns a random virtual polynomial, the poseidon config used and the associated sumcheck proof
|
||||
pub fn get_test_sumcheck_proof<C: CurveGroup>(
|
||||
num_vars: usize,
|
||||
) -> TestSumCheckProof<C::ScalarField>
|
||||
where
|
||||
<C as ark_ec::Group>::ScalarField: Absorb,
|
||||
{
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config: PoseidonConfig<C::ScalarField> =
|
||||
poseidon_test_config::<C::ScalarField>();
|
||||
let mut poseidon_transcript_prove = PoseidonTranscript::<C>::new(&poseidon_config);
|
||||
let poly_mle = DenseMultilinearExtension::rand(num_vars, &mut rng);
|
||||
let virtual_poly =
|
||||
VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), C::ScalarField::ONE);
|
||||
let sum_check: IOPProof<C::ScalarField> = IOPSumCheck::<C, PoseidonTranscript<C>>::prove(
|
||||
&virtual_poly,
|
||||
&mut poseidon_transcript_prove,
|
||||
)
|
||||
.unwrap();
|
||||
(virtual_poly, poseidon_config, sum_check)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum_check_circuit() {
|
||||
for num_vars in 1..15 {
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let (virtual_poly, poseidon_config, sum_check) =
|
||||
get_test_sumcheck_proof::<Projective>(num_vars);
|
||||
let mut poseidon_var: PoseidonTranscriptVar<Fr> =
|
||||
PoseidonTranscriptVar::new(cs.clone(), &poseidon_config);
|
||||
let iop_proof_var =
|
||||
IOPProofVar::<Projective>::new_witness(cs.clone(), || Ok(&sum_check)).unwrap();
|
||||
let poly_aux_info_var =
|
||||
VPAuxInfoVar::<Fr>::new_witness(cs.clone(), || Ok(virtual_poly.aux_info)).unwrap();
|
||||
let res = SumCheckVerifierGadget::<Projective>::verify(
|
||||
&iop_proof_var,
|
||||
&poly_aux_info_var,
|
||||
&mut poseidon_var,
|
||||
);
|
||||
|
||||
assert!(res.is_ok());
|
||||
let (circuit_evals, r_challenges) = res.unwrap();
|
||||
|
||||
// 1. assert claim from circuit is equal to the one from the sum-check
|
||||
let claim: Fr =
|
||||
IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::extract_sum(&sum_check);
|
||||
assert_eq!(circuit_evals[0].value().unwrap(), claim);
|
||||
|
||||
// 2. assert that all in-circuit evaluations are equal to the ones from the sum-check
|
||||
for ((proof, point), circuit_eval) in sum_check
|
||||
.proofs
|
||||
.iter()
|
||||
.zip(sum_check.point.iter())
|
||||
.zip(circuit_evals.iter().skip(1))
|
||||
// we skip the first one since it's the above checked claim
|
||||
{
|
||||
let poly = DensePolynomial::from_coefficients_slice(&proof.coeffs);
|
||||
let eval = poly.evaluate(point);
|
||||
assert_eq!(eval, circuit_eval.value().unwrap());
|
||||
}
|
||||
|
||||
// 3. assert that all challenges are equal to the ones from the sum-check
|
||||
for (point, r_challenge) in sum_check.point.iter().zip(r_challenges.iter()) {
|
||||
assert_eq!(*point, r_challenge.value().unwrap());
|
||||
}
|
||||
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
75
folding-schemes/src/folding/circuits/utils.rs
Normal file
75
folding-schemes/src/folding/circuits/utils.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use ark_ff::PrimeField;
|
||||
use ark_r1cs_std::fields::{fp::FpVar, FieldVar};
|
||||
use ark_relations::r1cs::SynthesisError;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
/// EqEval is a gadget for computing $\tilde{eq}(a, b) = \prod_{i=1}^{l}(a_i \cdot b_i + (1 - a_i)(1 - b_i))$
|
||||
/// :warning: This is not the ark_r1cs_std::eq::EqGadget
|
||||
pub struct EqEvalGadget<F: PrimeField> {
|
||||
_f: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> EqEvalGadget<F> {
|
||||
/// Gadget to evaluate eq polynomial.
|
||||
/// Follows the implementation of `eq_eval` found in this crate.
|
||||
pub fn eq_eval(x: Vec<FpVar<F>>, y: Vec<FpVar<F>>) -> Result<FpVar<F>, SynthesisError> {
|
||||
if x.len() != y.len() {
|
||||
return Err(SynthesisError::Unsatisfiable);
|
||||
}
|
||||
if x.is_empty() || y.is_empty() {
|
||||
return Err(SynthesisError::AssignmentMissing);
|
||||
}
|
||||
let mut e = FpVar::<F>::one();
|
||||
for (xi, yi) in x.iter().zip(y.iter()) {
|
||||
let xi_yi = xi * yi;
|
||||
e *= xi_yi.clone() + xi_yi - xi - yi + F::one();
|
||||
}
|
||||
Ok(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::utils::virtual_polynomial::eq_eval;
|
||||
|
||||
use super::EqEvalGadget;
|
||||
use ark_ff::Field;
|
||||
use ark_pallas::Fr;
|
||||
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
|
||||
use ark_relations::r1cs::ConstraintSystem;
|
||||
use ark_std::{test_rng, UniformRand};
|
||||
|
||||
#[test]
|
||||
pub fn test_eq_eval_gadget() {
|
||||
let mut rng = test_rng();
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
for i in 1..20 {
|
||||
let x_vec: Vec<Fr> = (0..i).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let y_vec: Vec<Fr> = (0..i).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let x: Vec<FpVar<Fr>> = x_vec
|
||||
.iter()
|
||||
.map(|x| FpVar::<Fr>::new_witness(cs.clone(), || Ok(x)).unwrap())
|
||||
.collect();
|
||||
let y: Vec<FpVar<Fr>> = y_vec
|
||||
.iter()
|
||||
.map(|y| FpVar::<Fr>::new_witness(cs.clone(), || Ok(y)).unwrap())
|
||||
.collect();
|
||||
let expected_eq_eval = eq_eval::<Fr>(&x_vec, &y_vec).unwrap();
|
||||
let gadget_eq_eval: FpVar<Fr> = EqEvalGadget::<Fr>::eq_eval(x, y).unwrap();
|
||||
assert_eq!(expected_eq_eval, gadget_eq_eval.value().unwrap());
|
||||
}
|
||||
|
||||
let x: Vec<FpVar<Fr>> = vec![];
|
||||
let y: Vec<FpVar<Fr>> = vec![];
|
||||
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(x, y);
|
||||
assert!(gadget_eq_eval.is_err());
|
||||
|
||||
let x: Vec<FpVar<Fr>> = vec![];
|
||||
let y: Vec<FpVar<Fr>> =
|
||||
vec![FpVar::<Fr>::new_witness(cs.clone(), || Ok(&Fr::ONE)).unwrap()];
|
||||
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(x, y);
|
||||
assert!(gadget_eq_eval.is_err());
|
||||
}
|
||||
}
|
||||
231
folding-schemes/src/folding/hypernova/cccs.rs
Normal file
231
folding-schemes/src/folding/hypernova/cccs.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_std::One;
|
||||
use ark_std::Zero;
|
||||
use std::ops::Add;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ark_std::{rand::Rng, UniformRand};
|
||||
|
||||
use super::utils::compute_sum_Mz;
|
||||
use crate::ccs::CCS;
|
||||
use crate::commitment::{
|
||||
pedersen::{Params as PedersenParams, Pedersen},
|
||||
CommitmentProver,
|
||||
};
|
||||
use crate::utils::hypercube::BooleanHypercube;
|
||||
use crate::utils::mle::matrix_to_mle;
|
||||
use crate::utils::mle::vec_to_mle;
|
||||
use crate::utils::virtual_polynomial::VirtualPolynomial;
|
||||
use crate::Error;
|
||||
|
||||
/// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Witness<F: PrimeField> {
|
||||
pub w: Vec<F>,
|
||||
pub r_w: F, // randomness used in the Pedersen commitment of w
|
||||
}
|
||||
|
||||
/// Committed CCS instance
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CCCS<C: CurveGroup> {
|
||||
// Commitment to witness
|
||||
pub C: C,
|
||||
// Public input/output
|
||||
pub x: Vec<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> CCS<C> {
|
||||
pub fn to_cccs<R: Rng>(
|
||||
&self,
|
||||
rng: &mut R,
|
||||
pedersen_params: &PedersenParams<C>,
|
||||
z: &[C::ScalarField],
|
||||
) -> Result<(CCCS<C>, Witness<C::ScalarField>), Error> {
|
||||
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
|
||||
let r_w = C::ScalarField::rand(rng);
|
||||
let C = Pedersen::<C>::commit(pedersen_params, &w, &r_w)?;
|
||||
|
||||
Ok((
|
||||
CCCS::<C> {
|
||||
C,
|
||||
x: z[1..(1 + self.l)].to_vec(),
|
||||
},
|
||||
Witness::<C::ScalarField> { w, r_w },
|
||||
))
|
||||
}
|
||||
|
||||
/// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
|
||||
/// polynomial over x
|
||||
pub fn compute_q(&self, z: &Vec<C::ScalarField>) -> VirtualPolynomial<C::ScalarField> {
|
||||
let z_mle = vec_to_mle(self.s_prime, z);
|
||||
let mut q = VirtualPolynomial::<C::ScalarField>::new(self.s);
|
||||
|
||||
for i in 0..self.q {
|
||||
let mut prod: VirtualPolynomial<C::ScalarField> =
|
||||
VirtualPolynomial::<C::ScalarField>::new(self.s);
|
||||
for j in self.S[i].clone() {
|
||||
let M_j = matrix_to_mle(self.M[j].clone());
|
||||
|
||||
let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.s_prime);
|
||||
|
||||
// Fold this sum into the running product
|
||||
if prod.products.is_empty() {
|
||||
// If this is the first time we are adding something to this virtual polynomial, we need to
|
||||
// explicitly add the products using add_mle_list()
|
||||
// XXX is this true? improve API
|
||||
prod.add_mle_list([Arc::new(sum_Mz)], C::ScalarField::one())
|
||||
.unwrap();
|
||||
} else {
|
||||
prod.mul_by_mle(Arc::new(sum_Mz), C::ScalarField::one())
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
// Multiply by the product by the coefficient c_i
|
||||
prod.scalar_mul(&self.c[i]);
|
||||
// Add it to the running sum
|
||||
q = q.add(&prod);
|
||||
}
|
||||
q
|
||||
}
|
||||
|
||||
/// Computes Q(x) = eq(beta, x) * q(x)
|
||||
/// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
|
||||
/// polynomial over x
|
||||
pub fn compute_Q(
|
||||
&self,
|
||||
z: &Vec<C::ScalarField>,
|
||||
beta: &[C::ScalarField],
|
||||
) -> VirtualPolynomial<C::ScalarField> {
|
||||
let q = self.compute_q(z);
|
||||
q.build_f_hat(beta).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> CCCS<C> {
|
||||
/// Perform the check of the CCCS instance described at section 4.1
|
||||
pub fn check_relation(
|
||||
&self,
|
||||
pedersen_params: &PedersenParams<C>,
|
||||
ccs: &CCS<C>,
|
||||
w: &Witness<C::ScalarField>,
|
||||
) -> Result<(), Error> {
|
||||
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
|
||||
// opening, but checking that the Commmitment comes from committing to the witness.
|
||||
if self.C != Pedersen::commit(pedersen_params, &w.w, &w.r_w)? {
|
||||
return Err(Error::NotSatisfied);
|
||||
}
|
||||
|
||||
// check CCCS relation
|
||||
let z: Vec<C::ScalarField> =
|
||||
[vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat();
|
||||
|
||||
// A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube
|
||||
let q_x = ccs.compute_q(&z);
|
||||
for x in BooleanHypercube::new(ccs.s) {
|
||||
if !q_x.evaluate(&x).unwrap().is_zero() {
|
||||
return Err(Error::NotSatisfied);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::ccs::tests::{get_test_ccs, get_test_z};
|
||||
use ark_std::test_rng;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
use ark_pallas::{Fr, Projective};
|
||||
|
||||
/// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the
|
||||
/// hypercube, but to not-zero outside the hypercube.
|
||||
#[test]
|
||||
fn test_compute_q() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
let z = get_test_z(3);
|
||||
|
||||
let q = ccs.compute_q(&z);
|
||||
|
||||
// Evaluate inside the hypercube
|
||||
for x in BooleanHypercube::new(ccs.s) {
|
||||
assert_eq!(Fr::zero(), q.evaluate(&x).unwrap());
|
||||
}
|
||||
|
||||
// Evaluate outside the hypercube
|
||||
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
assert_ne!(Fr::zero(), q.evaluate(&beta).unwrap());
|
||||
}
|
||||
|
||||
/// Perform some sanity checks on Q(x).
|
||||
#[test]
|
||||
fn test_compute_Q() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let ccs: CCS<Projective> = get_test_ccs();
|
||||
let z = get_test_z(3);
|
||||
ccs.check_relation(&z).unwrap();
|
||||
|
||||
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Compute Q(x) = eq(beta, x) * q(x).
|
||||
let Q = ccs.compute_Q(&z, &beta);
|
||||
|
||||
// Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y)
|
||||
// which interpolates the multivariate polynomial q(x) inside the hypercube.
|
||||
//
|
||||
// Observe that summing Q(x) inside the hypercube, directly computes G(\beta).
|
||||
//
|
||||
// Now, G(x) is multilinear and agrees with q(x) inside the hypercube. Since q(x) vanishes inside the
|
||||
// hypercube, this means that G(x) also vanishes in the hypercube. Since G(x) is multilinear and vanishes
|
||||
// inside the hypercube, this makes it the zero polynomial.
|
||||
//
|
||||
// Hence, evaluating G(x) at a random beta should give zero.
|
||||
|
||||
// Now sum Q(x) evaluations in the hypercube and expect it to be 0
|
||||
let r = BooleanHypercube::new(ccs.s)
|
||||
.map(|x| Q.evaluate(&x).unwrap())
|
||||
.fold(Fr::zero(), |acc, result| acc + result);
|
||||
assert_eq!(r, Fr::zero());
|
||||
}
|
||||
|
||||
/// The polynomial G(x) (see above) interpolates q(x) inside the hypercube.
|
||||
/// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point.
|
||||
/// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside
|
||||
#[test]
|
||||
fn test_Q_against_q() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let ccs: CCS<Projective> = get_test_ccs();
|
||||
let z = get_test_z(3);
|
||||
ccs.check_relation(&z).unwrap();
|
||||
|
||||
// Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which
|
||||
// should be equal to q(d), since G(x) interpolates q(x) inside the hypercube
|
||||
let q = ccs.compute_q(&z);
|
||||
for d in BooleanHypercube::new(ccs.s) {
|
||||
let Q_at_d = ccs.compute_Q(&z, &d);
|
||||
|
||||
// Get G(d) by summing over Q_d(x) over the hypercube
|
||||
let G_at_d = BooleanHypercube::new(ccs.s)
|
||||
.map(|x| Q_at_d.evaluate(&x).unwrap())
|
||||
.fold(Fr::zero(), |acc, result| acc + result);
|
||||
assert_eq!(G_at_d, q.evaluate(&d).unwrap());
|
||||
}
|
||||
|
||||
// Now test that they should disagree outside of the hypercube
|
||||
let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let Q_at_r = ccs.compute_Q(&z, &r);
|
||||
|
||||
// Get G(d) by summing over Q_d(x) over the hypercube
|
||||
let G_at_r = BooleanHypercube::new(ccs.s)
|
||||
.map(|x| Q_at_r.evaluate(&x).unwrap())
|
||||
.fold(Fr::zero(), |acc, result| acc + result);
|
||||
assert_ne!(G_at_r, q.evaluate(&r).unwrap());
|
||||
}
|
||||
}
|
||||
318
folding-schemes/src/folding/hypernova/circuit.rs
Normal file
318
folding-schemes/src/folding/hypernova/circuit.rs
Normal file
@@ -0,0 +1,318 @@
|
||||
// hypernova nimfs verifier circuit
|
||||
// see section 5 in https://eprint.iacr.org/2023/573.pdf
|
||||
|
||||
use crate::{ccs::CCS, folding::circuits::utils::EqEvalGadget};
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_r1cs_std::{
|
||||
alloc::AllocVar,
|
||||
fields::{fp::FpVar, FieldVar},
|
||||
ToBitsGadget,
|
||||
};
|
||||
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
|
||||
use ark_std::Zero;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
/// Gadget to compute $\sum_{j \in [t]} \gamma^{j} \cdot e_1 \cdot \sigma_j + \gamma^{t+1} \cdot e_2 \cdot \sum_{i=1}^{q} c_i * \prod_{j \in S_i} \theta_j$.
|
||||
/// This is the sum computed by the verifier and laid out in section 5, step 5 of "A multi-folding scheme for CCS".
|
||||
pub struct ComputeCFromSigmasAndThetasGadget<C: CurveGroup> {
|
||||
_c: PhantomData<C>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> ComputeCFromSigmasAndThetasGadget<C> {
|
||||
/// Computes the sum $\sum_{j}^{j + n} \gamma^{j} \cdot eq_eval \cdot \sigma_{j}$, where $n$ is the length of the `sigmas` vector
|
||||
/// It corresponds to the first term of the sum that $\mathcal{V}$ has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `sigmas`: vector of $\sigma_j$ values
|
||||
/// - `eq_eval`: the value of $\tilde{eq}(x_j, x^{\prime})$
|
||||
/// - `gamma`: value $\gamma$
|
||||
/// - `j`: the power at which we start to compute $\gamma^{j}$. This is needed in the context of multifolding.
|
||||
///
|
||||
/// # Notes
|
||||
/// In the context of multifolding, `j` corresponds to `ccs.t` in `compute_c_from_sigmas_and_thetas`
|
||||
fn sum_muls_gamma_pows_eq_sigma(
|
||||
gamma: FpVar<C::ScalarField>,
|
||||
eq_eval: FpVar<C::ScalarField>,
|
||||
sigmas: Vec<FpVar<C::ScalarField>>,
|
||||
j: FpVar<C::ScalarField>,
|
||||
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
|
||||
let mut result = FpVar::<C::ScalarField>::zero();
|
||||
let mut gamma_pow = gamma.pow_le(&j.to_bits_le()?)?;
|
||||
for sigma in sigmas {
|
||||
result += gamma_pow.clone() * eq_eval.clone() * sigma;
|
||||
gamma_pow *= gamma.clone();
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Computes $\sum_{i=1}^{q} c_i * \prod_{j \in S_i} theta_j$
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `c_i`: vector of $c_i$ values
|
||||
/// - `thetas`: vector of pre-processed $\thetas[j]$ values corresponding to a particular `ccs.S[i]`
|
||||
///
|
||||
/// # Notes
|
||||
/// This is a part of the second term of the sum that $\mathcal{V}$ has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
|
||||
/// The first term is computed by `SumMulsGammaPowsEqSigmaGadget::sum_muls_gamma_pows_eq_sigma`.
|
||||
/// This is a doct product between a vector of c_i values and a vector of pre-processed $\theta_j$ values, where $j$ is a value from $S_i$.
|
||||
/// Hence, this requires some pre-processing of the $\theta_j$ values, before running this gadget.
|
||||
fn sum_ci_mul_prod_thetaj(
|
||||
c_i: Vec<FpVar<C::ScalarField>>,
|
||||
thetas: Vec<Vec<FpVar<C::ScalarField>>>,
|
||||
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
|
||||
let mut result = FpVar::<C::ScalarField>::zero();
|
||||
for (i, c_i) in c_i.iter().enumerate() {
|
||||
let prod = &thetas[i].iter().fold(FpVar::one(), |acc, e| acc * e);
|
||||
result += c_i * prod;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Computes the sum that the verifier has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `cs`: constraint system
|
||||
/// - `ccs`: the CCS instance
|
||||
/// - `vec_sigmas`: vector of $\sigma_j$ values
|
||||
/// - `vec_thetas`: vector of $\theta_j$ values
|
||||
/// - `gamma`: value $\gamma$
|
||||
/// - `beta`: vector of $\beta_j$ values
|
||||
/// - `vec_r_x`: vector of $r_{x_j}$ values
|
||||
/// - `vec_r_x_prime`: vector of $r_{x_j}^{\prime}$ values
|
||||
///
|
||||
/// # Notes
|
||||
/// Arguments to this function are *almost* the same as the arguments to `compute_c_from_sigmas_and_thetas` in `utils.rs`.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn compute_c_from_sigmas_and_thetas(
|
||||
cs: ConstraintSystemRef<C::ScalarField>,
|
||||
ccs: &CCS<C>,
|
||||
vec_sigmas: Vec<Vec<FpVar<C::ScalarField>>>,
|
||||
vec_thetas: Vec<Vec<FpVar<C::ScalarField>>>,
|
||||
gamma: FpVar<C::ScalarField>,
|
||||
beta: Vec<FpVar<C::ScalarField>>,
|
||||
vec_r_x: Vec<Vec<FpVar<C::ScalarField>>>,
|
||||
vec_r_x_prime: Vec<FpVar<C::ScalarField>>,
|
||||
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
|
||||
let mut c =
|
||||
FpVar::<C::ScalarField>::new_witness(cs.clone(), || Ok(C::ScalarField::zero()))?;
|
||||
let t = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
|
||||
Ok(C::ScalarField::from(ccs.t as u64))
|
||||
})?;
|
||||
|
||||
let mut e_lcccs = Vec::new();
|
||||
for r_x in vec_r_x.iter() {
|
||||
let e_1 = EqEvalGadget::eq_eval(r_x.to_vec(), vec_r_x_prime.to_vec())?;
|
||||
e_lcccs.push(e_1);
|
||||
}
|
||||
|
||||
for (i, sigmas) in vec_sigmas.iter().enumerate() {
|
||||
let i_var = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
|
||||
Ok(C::ScalarField::from(i as u64))
|
||||
})?;
|
||||
let pow = i_var * t.clone();
|
||||
c += Self::sum_muls_gamma_pows_eq_sigma(
|
||||
gamma.clone(),
|
||||
e_lcccs[i].clone(),
|
||||
sigmas.to_vec(),
|
||||
pow,
|
||||
)?;
|
||||
}
|
||||
|
||||
let mu = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
|
||||
Ok(C::ScalarField::from(vec_sigmas.len() as u64))
|
||||
})?;
|
||||
let e_2 = EqEvalGadget::eq_eval(beta, vec_r_x_prime)?;
|
||||
for (k, thetas) in vec_thetas.iter().enumerate() {
|
||||
// get prepared thetas. only step different from original `compute_c_from_sigmas_and_thetas`
|
||||
let mut prepared_thetas = Vec::new();
|
||||
for i in 0..ccs.q {
|
||||
let prepared: Vec<FpVar<C::ScalarField>> =
|
||||
ccs.S[i].iter().map(|j| thetas[*j].clone()).collect();
|
||||
prepared_thetas.push(prepared.to_vec());
|
||||
}
|
||||
|
||||
let c_i = Vec::<FpVar<C::ScalarField>>::new_witness(cs.clone(), || Ok(ccs.c.clone()))
|
||||
.unwrap();
|
||||
let lhs = Self::sum_ci_mul_prod_thetaj(c_i.clone(), prepared_thetas.clone())?;
|
||||
|
||||
// compute gamma^(t+1)
|
||||
let pow = mu.clone() * t.clone()
|
||||
+ FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
|
||||
Ok(C::ScalarField::from(k as u64))
|
||||
})?;
|
||||
let gamma_t1 = gamma.pow_le(&pow.to_bits_le()?)?;
|
||||
|
||||
c += gamma_t1.clone() * e_2.clone() * lhs.clone();
|
||||
}
|
||||
|
||||
Ok(c)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ComputeCFromSigmasAndThetasGadget;
|
||||
use crate::{
|
||||
ccs::{
|
||||
tests::{get_test_ccs, get_test_z},
|
||||
CCS,
|
||||
},
|
||||
commitment::pedersen::Pedersen,
|
||||
folding::hypernova::utils::{
|
||||
compute_c_from_sigmas_and_thetas, compute_sigmas_and_thetas, sum_ci_mul_prod_thetaj,
|
||||
sum_muls_gamma_pows_eq_sigma,
|
||||
},
|
||||
utils::virtual_polynomial::eq_eval,
|
||||
};
|
||||
use ark_pallas::{Fr, Projective};
|
||||
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
|
||||
use ark_relations::r1cs::ConstraintSystem;
|
||||
use ark_std::{test_rng, UniformRand};
|
||||
|
||||
#[test]
|
||||
pub fn test_sum_muls_gamma_pow_eq_sigma_gadget() {
|
||||
let mut rng = test_rng();
|
||||
let ccs: CCS<Projective> = get_test_ccs();
|
||||
let z1 = get_test_z(3);
|
||||
let z2 = get_test_z(4);
|
||||
|
||||
let gamma: Fr = Fr::rand(&mut rng);
|
||||
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Initialize a multifolding object
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
|
||||
let sigmas_thetas =
|
||||
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
|
||||
|
||||
let mut e_lcccs = Vec::new();
|
||||
for r_x in &vec![lcccs_instance.r_x] {
|
||||
e_lcccs.push(eq_eval(r_x, &r_x_prime).unwrap());
|
||||
}
|
||||
|
||||
// Initialize cs and gamma
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let gamma_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(gamma)).unwrap();
|
||||
|
||||
for (i, sigmas) in sigmas_thetas.0.iter().enumerate() {
|
||||
let expected =
|
||||
sum_muls_gamma_pows_eq_sigma(gamma, e_lcccs[i], sigmas, (i * ccs.t) as u64);
|
||||
let sigmas_var =
|
||||
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(sigmas.clone())).unwrap();
|
||||
let eq_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(e_lcccs[i])).unwrap();
|
||||
let pow =
|
||||
FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::from((i * ccs.t) as u64))).unwrap();
|
||||
let computed =
|
||||
ComputeCFromSigmasAndThetasGadget::<Projective>::sum_muls_gamma_pows_eq_sigma(
|
||||
gamma_var.clone(),
|
||||
eq_var,
|
||||
sigmas_var,
|
||||
pow,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(expected, computed.value().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_sum_ci_mul_prod_thetaj_gadget() {
|
||||
let mut rng = test_rng();
|
||||
let ccs: CCS<Projective> = get_test_ccs();
|
||||
let z1 = get_test_z(3);
|
||||
let z2 = get_test_z(4);
|
||||
|
||||
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Initialize a multifolding object
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
|
||||
let sigmas_thetas =
|
||||
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
|
||||
|
||||
let mut e_lcccs = Vec::new();
|
||||
for r_x in &vec![lcccs_instance.r_x] {
|
||||
e_lcccs.push(eq_eval(r_x, &r_x_prime).unwrap());
|
||||
}
|
||||
|
||||
// Initialize cs
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let vec_thetas = sigmas_thetas.1;
|
||||
for thetas in vec_thetas.iter() {
|
||||
// sum c_i * prod theta_j
|
||||
let expected = sum_ci_mul_prod_thetaj(&ccs, thetas); // from `compute_c_from_sigmas_and_thetas`
|
||||
let mut prepared_thetas = Vec::new();
|
||||
for i in 0..ccs.q {
|
||||
let prepared: Vec<Fr> = ccs.S[i].iter().map(|j| thetas[*j]).collect();
|
||||
prepared_thetas
|
||||
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(prepared)).unwrap());
|
||||
}
|
||||
let computed = ComputeCFromSigmasAndThetasGadget::<Projective>::sum_ci_mul_prod_thetaj(
|
||||
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(ccs.c.clone())).unwrap(),
|
||||
prepared_thetas,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(expected, computed.value().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_compute_c_from_sigmas_and_thetas_gadget() {
|
||||
let ccs: CCS<Projective> = get_test_ccs();
|
||||
let z1 = get_test_z(3);
|
||||
let z2 = get_test_z(4);
|
||||
|
||||
let mut rng = test_rng();
|
||||
let gamma: Fr = Fr::rand(&mut rng);
|
||||
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Initialize a multifolding object
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
|
||||
let sigmas_thetas =
|
||||
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
|
||||
|
||||
let expected_c = compute_c_from_sigmas_and_thetas(
|
||||
&ccs,
|
||||
&sigmas_thetas,
|
||||
gamma,
|
||||
&beta,
|
||||
&vec![lcccs_instance.r_x.clone()],
|
||||
&r_x_prime,
|
||||
);
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let mut vec_sigmas = Vec::new();
|
||||
let mut vec_thetas = Vec::new();
|
||||
for sigmas in sigmas_thetas.0 {
|
||||
vec_sigmas
|
||||
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(sigmas.clone())).unwrap());
|
||||
}
|
||||
for thetas in sigmas_thetas.1 {
|
||||
vec_thetas
|
||||
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(thetas.clone())).unwrap());
|
||||
}
|
||||
let vec_r_x =
|
||||
vec![
|
||||
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(lcccs_instance.r_x.clone()))
|
||||
.unwrap(),
|
||||
];
|
||||
let vec_r_x_prime =
|
||||
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(r_x_prime.clone())).unwrap();
|
||||
let gamma_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(gamma)).unwrap();
|
||||
let beta_var = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(beta.clone())).unwrap();
|
||||
let computed_c = ComputeCFromSigmasAndThetasGadget::compute_c_from_sigmas_and_thetas(
|
||||
cs,
|
||||
&ccs,
|
||||
vec_sigmas,
|
||||
vec_thetas,
|
||||
gamma_var,
|
||||
beta_var,
|
||||
vec_r_x,
|
||||
vec_r_x_prime,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(expected_c, computed_c.value().unwrap());
|
||||
}
|
||||
}
|
||||
188
folding-schemes/src/folding/hypernova/lcccs.rs
Normal file
188
folding-schemes/src/folding/hypernova/lcccs.rs
Normal file
@@ -0,0 +1,188 @@
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_poly::DenseMultilinearExtension;
|
||||
use ark_std::One;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ark_std::{rand::Rng, UniformRand};
|
||||
|
||||
use super::cccs::Witness;
|
||||
use super::utils::{compute_all_sum_Mz_evals, compute_sum_Mz};
|
||||
use crate::ccs::CCS;
|
||||
use crate::commitment::{
|
||||
pedersen::{Params as PedersenParams, Pedersen},
|
||||
CommitmentProver,
|
||||
};
|
||||
use crate::utils::mle::{matrix_to_mle, vec_to_mle};
|
||||
use crate::utils::virtual_polynomial::VirtualPolynomial;
|
||||
use crate::Error;
|
||||
|
||||
/// Linearized Committed CCS instance
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct LCCCS<C: CurveGroup> {
|
||||
// Commitment to witness
|
||||
pub C: C,
|
||||
// Relaxation factor of z for folded LCCCS
|
||||
pub u: C::ScalarField,
|
||||
// Public input/output
|
||||
pub x: Vec<C::ScalarField>,
|
||||
// Random evaluation point for the v_i
|
||||
pub r_x: Vec<C::ScalarField>,
|
||||
// Vector of v_i
|
||||
pub v: Vec<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> CCS<C> {
|
||||
/// Compute v_j values of the linearized committed CCS form
|
||||
/// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
|
||||
fn compute_v_j(&self, z: &[C::ScalarField], r: &[C::ScalarField]) -> Vec<C::ScalarField> {
|
||||
compute_all_sum_Mz_evals(&self.M, &z.to_vec(), r, self.s_prime)
|
||||
}
|
||||
|
||||
pub fn to_lcccs<R: Rng>(
|
||||
&self,
|
||||
rng: &mut R,
|
||||
pedersen_params: &PedersenParams<C>,
|
||||
z: &[C::ScalarField],
|
||||
) -> Result<(LCCCS<C>, Witness<C::ScalarField>), Error> {
|
||||
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
|
||||
let r_w = C::ScalarField::rand(rng);
|
||||
let C = Pedersen::commit(pedersen_params, &w, &r_w)?;
|
||||
|
||||
let r_x: Vec<C::ScalarField> = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect();
|
||||
let v = self.compute_v_j(z, &r_x);
|
||||
|
||||
Ok((
|
||||
LCCCS::<C> {
|
||||
C,
|
||||
u: C::ScalarField::one(),
|
||||
x: z[1..(1 + self.l)].to_vec(),
|
||||
r_x,
|
||||
v,
|
||||
},
|
||||
Witness::<C::ScalarField> { w, r_w },
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> LCCCS<C> {
|
||||
/// Compute all L_j(x) polynomials
|
||||
pub fn compute_Ls(
|
||||
&self,
|
||||
ccs: &CCS<C>,
|
||||
z: &Vec<C::ScalarField>,
|
||||
) -> Vec<VirtualPolynomial<C::ScalarField>> {
|
||||
let z_mle = vec_to_mle(ccs.s_prime, z);
|
||||
// Convert all matrices to MLE
|
||||
let M_x_y_mle: Vec<DenseMultilinearExtension<C::ScalarField>> =
|
||||
ccs.M.clone().into_iter().map(matrix_to_mle).collect();
|
||||
|
||||
let mut vec_L_j_x = Vec::with_capacity(ccs.t);
|
||||
for M_j in M_x_y_mle {
|
||||
let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
|
||||
let sum_Mz_virtual =
|
||||
VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), C::ScalarField::one());
|
||||
let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap();
|
||||
vec_L_j_x.push(L_j_x);
|
||||
}
|
||||
|
||||
vec_L_j_x
|
||||
}
|
||||
|
||||
/// Perform the check of the LCCCS instance described at section 4.2
|
||||
pub fn check_relation(
|
||||
&self,
|
||||
pedersen_params: &PedersenParams<C>,
|
||||
ccs: &CCS<C>,
|
||||
w: &Witness<C::ScalarField>,
|
||||
) -> Result<(), Error> {
|
||||
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
|
||||
// opening, but checking that the Commmitment comes from committing to the witness.
|
||||
if self.C != Pedersen::commit(pedersen_params, &w.w, &w.r_w)? {
|
||||
return Err(Error::NotSatisfied);
|
||||
}
|
||||
|
||||
// check CCS relation
|
||||
let z: Vec<C::ScalarField> = [vec![self.u], self.x.clone(), w.w.to_vec()].concat();
|
||||
let computed_v = compute_all_sum_Mz_evals(&ccs.M, &z, &self.r_x, ccs.s_prime);
|
||||
if computed_v != self.v {
|
||||
return Err(Error::NotSatisfied);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_std::Zero;
|
||||
|
||||
use crate::ccs::tests::{get_test_ccs, get_test_z};
|
||||
use crate::utils::hypercube::BooleanHypercube;
|
||||
use ark_std::test_rng;
|
||||
|
||||
use ark_pallas::{Fr, Projective};
|
||||
|
||||
#[test]
|
||||
/// Test linearized CCCS v_j against the L_j(x)
|
||||
fn test_lcccs_v_j() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let ccs = get_test_ccs();
|
||||
let z = get_test_z(3);
|
||||
ccs.check_relation(&z.clone()).unwrap();
|
||||
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap();
|
||||
// with our test vector comming from R1CS, v should have length 3
|
||||
assert_eq!(lcccs.v.len(), 3);
|
||||
|
||||
let vec_L_j_x = lcccs.compute_Ls(&ccs, &z);
|
||||
assert_eq!(vec_L_j_x.len(), lcccs.v.len());
|
||||
|
||||
for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
|
||||
let sum_L_j_x = BooleanHypercube::new(ccs.s)
|
||||
.map(|y| L_j_x.evaluate(&y).unwrap())
|
||||
.fold(Fr::zero(), |acc, result| acc + result);
|
||||
assert_eq!(v_i, sum_L_j_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a bad z, check that the v_j should not match with the L_j(x)
|
||||
#[test]
|
||||
fn test_bad_v_j() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let ccs = get_test_ccs();
|
||||
let z = get_test_z(3);
|
||||
ccs.check_relation(&z.clone()).unwrap();
|
||||
|
||||
// Mutate z so that the relation does not hold
|
||||
let mut bad_z = z.clone();
|
||||
bad_z[3] = Fr::zero();
|
||||
assert!(ccs.check_relation(&bad_z.clone()).is_err());
|
||||
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
// Compute v_j with the right z
|
||||
let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap();
|
||||
// with our test vector comming from R1CS, v should have length 3
|
||||
assert_eq!(lcccs.v.len(), 3);
|
||||
|
||||
// Bad compute L_j(x) with the bad z
|
||||
let vec_L_j_x = lcccs.compute_Ls(&ccs, &bad_z);
|
||||
assert_eq!(vec_L_j_x.len(), lcccs.v.len());
|
||||
|
||||
// Make sure that the LCCCS is not satisfied given these L_j(x)
|
||||
// i.e. summing L_j(x) over the hypercube should not give v_j for all j
|
||||
let mut satisfied = true;
|
||||
for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
|
||||
let sum_L_j_x = BooleanHypercube::new(ccs.s)
|
||||
.map(|y| L_j_x.evaluate(&y).unwrap())
|
||||
.fold(Fr::zero(), |acc, result| acc + result);
|
||||
if v_i != sum_L_j_x {
|
||||
satisfied = false;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(!satisfied);
|
||||
}
|
||||
}
|
||||
6
folding-schemes/src/folding/hypernova/mod.rs
Normal file
6
folding-schemes/src/folding/hypernova/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
/// Implements the scheme described in [HyperNova](https://eprint.iacr.org/2023/573.pdf)
|
||||
pub mod cccs;
|
||||
pub mod circuit;
|
||||
pub mod lcccs;
|
||||
pub mod nimfs;
|
||||
pub mod utils;
|
||||
721
folding-schemes/src/folding/hypernova/nimfs.rs
Normal file
721
folding-schemes/src/folding/hypernova/nimfs.rs
Normal file
@@ -0,0 +1,721 @@
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_poly::{DenseUVPolynomial, Polynomial};
|
||||
use ark_std::{One, Zero};
|
||||
|
||||
use super::cccs::{Witness, CCCS};
|
||||
use super::lcccs::LCCCS;
|
||||
use super::utils::{compute_c_from_sigmas_and_thetas, compute_g, compute_sigmas_and_thetas};
|
||||
use crate::ccs::CCS;
|
||||
use crate::transcript::Transcript;
|
||||
use crate::utils::hypercube::BooleanHypercube;
|
||||
use crate::utils::sum_check::structs::IOPProof as SumCheckProof;
|
||||
use crate::utils::sum_check::{IOPSumCheck, SumCheck};
|
||||
use crate::utils::virtual_polynomial::VPAuxInfo;
|
||||
use crate::Error;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
/// Proof defines a multifolding proof
|
||||
#[derive(Debug)]
|
||||
pub struct Proof<C: CurveGroup> {
|
||||
pub sc_proof: SumCheckProof<C::ScalarField>,
|
||||
pub sigmas_thetas: SigmasThetas<C::ScalarField>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SigmasThetas<F: PrimeField>(pub Vec<Vec<F>>, pub Vec<Vec<F>>);
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Implements the Non-Interactive Multi Folding Scheme described in section 5 of
|
||||
/// [HyperNova](https://eprint.iacr.org/2023/573.pdf)
|
||||
pub struct NIMFS<C: CurveGroup, T: Transcript<C>> {
|
||||
pub _c: PhantomData<C>,
|
||||
pub _t: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup, T: Transcript<C>> NIMFS<C, T>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
{
|
||||
pub fn fold(
|
||||
lcccs: &[LCCCS<C>],
|
||||
cccs: &[CCCS<C>],
|
||||
sigmas_thetas: &SigmasThetas<C::ScalarField>,
|
||||
r_x_prime: Vec<C::ScalarField>,
|
||||
rho: C::ScalarField,
|
||||
) -> LCCCS<C> {
|
||||
let (sigmas, thetas) = (sigmas_thetas.0.clone(), sigmas_thetas.1.clone());
|
||||
let mut C_folded = C::zero();
|
||||
let mut u_folded = C::ScalarField::zero();
|
||||
let mut x_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); lcccs[0].x.len()];
|
||||
let mut v_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); sigmas[0].len()];
|
||||
|
||||
for i in 0..(lcccs.len() + cccs.len()) {
|
||||
let rho_i = rho.pow([i as u64]);
|
||||
|
||||
let c: C;
|
||||
let u: C::ScalarField;
|
||||
let x: Vec<C::ScalarField>;
|
||||
let v: Vec<C::ScalarField>;
|
||||
if i < lcccs.len() {
|
||||
c = lcccs[i].C;
|
||||
u = lcccs[i].u;
|
||||
x = lcccs[i].x.clone();
|
||||
v = sigmas[i].clone();
|
||||
} else {
|
||||
c = cccs[i - lcccs.len()].C;
|
||||
u = C::ScalarField::one();
|
||||
x = cccs[i - lcccs.len()].x.clone();
|
||||
v = thetas[i - lcccs.len()].clone();
|
||||
}
|
||||
|
||||
C_folded += c.mul(rho_i);
|
||||
u_folded += rho_i * u;
|
||||
x_folded = x_folded
|
||||
.iter()
|
||||
.zip(
|
||||
x.iter()
|
||||
.map(|x_i| *x_i * rho_i)
|
||||
.collect::<Vec<C::ScalarField>>(),
|
||||
)
|
||||
.map(|(a_i, b_i)| *a_i + b_i)
|
||||
.collect();
|
||||
|
||||
v_folded = v_folded
|
||||
.iter()
|
||||
.zip(
|
||||
v.iter()
|
||||
.map(|x_i| *x_i * rho_i)
|
||||
.collect::<Vec<C::ScalarField>>(),
|
||||
)
|
||||
.map(|(a_i, b_i)| *a_i + b_i)
|
||||
.collect();
|
||||
}
|
||||
|
||||
LCCCS::<C> {
|
||||
C: C_folded,
|
||||
u: u_folded,
|
||||
x: x_folded,
|
||||
r_x: r_x_prime,
|
||||
v: v_folded,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fold_witness(
|
||||
w_lcccs: &[Witness<C::ScalarField>],
|
||||
w_cccs: &[Witness<C::ScalarField>],
|
||||
rho: C::ScalarField,
|
||||
) -> Witness<C::ScalarField> {
|
||||
let mut w_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w_lcccs[0].w.len()];
|
||||
let mut r_w_folded = C::ScalarField::zero();
|
||||
|
||||
for i in 0..(w_lcccs.len() + w_cccs.len()) {
|
||||
let rho_i = rho.pow([i as u64]);
|
||||
let w: Vec<C::ScalarField>;
|
||||
let r_w: C::ScalarField;
|
||||
|
||||
if i < w_lcccs.len() {
|
||||
w = w_lcccs[i].w.clone();
|
||||
r_w = w_lcccs[i].r_w;
|
||||
} else {
|
||||
w = w_cccs[i - w_lcccs.len()].w.clone();
|
||||
r_w = w_cccs[i - w_lcccs.len()].r_w;
|
||||
}
|
||||
|
||||
w_folded = w_folded
|
||||
.iter()
|
||||
.zip(
|
||||
w.iter()
|
||||
.map(|x_i| *x_i * rho_i)
|
||||
.collect::<Vec<C::ScalarField>>(),
|
||||
)
|
||||
.map(|(a_i, b_i)| *a_i + b_i)
|
||||
.collect();
|
||||
|
||||
r_w_folded += rho_i * r_w;
|
||||
}
|
||||
Witness {
|
||||
w: w_folded,
|
||||
r_w: r_w_folded,
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs the multifolding prover. Given μ LCCCS instances and ν CCS instances, fold them
|
||||
/// into a single LCCCS instance. Since this is the prover, also fold their witness.
|
||||
/// Returns the final folded LCCCS, the folded witness, and the multifolding proof, which
|
||||
/// contains the sumcheck proof and the helper sumcheck claim sigmas and thetas.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn prove(
|
||||
transcript: &mut impl Transcript<C>,
|
||||
ccs: &CCS<C>,
|
||||
running_instances: &[LCCCS<C>],
|
||||
new_instances: &[CCCS<C>],
|
||||
w_lcccs: &[Witness<C::ScalarField>],
|
||||
w_cccs: &[Witness<C::ScalarField>],
|
||||
) -> Result<(Proof<C>, LCCCS<C>, Witness<C::ScalarField>), Error> {
|
||||
// TODO appends to transcript
|
||||
|
||||
if running_instances.is_empty() {
|
||||
return Err(Error::Empty);
|
||||
}
|
||||
if new_instances.is_empty() {
|
||||
return Err(Error::Empty);
|
||||
}
|
||||
|
||||
// construct the LCCCS z vector from the relaxation factor, public IO and witness
|
||||
// XXX this deserves its own function in LCCCS
|
||||
let mut z_lcccs = Vec::new();
|
||||
for (i, running_instance) in running_instances.iter().enumerate() {
|
||||
let z_1: Vec<C::ScalarField> = [
|
||||
vec![running_instance.u],
|
||||
running_instance.x.clone(),
|
||||
w_lcccs[i].w.to_vec(),
|
||||
]
|
||||
.concat();
|
||||
z_lcccs.push(z_1);
|
||||
}
|
||||
// construct the CCCS z vector from the public IO and witness
|
||||
let mut z_cccs = Vec::new();
|
||||
for (i, new_instance) in new_instances.iter().enumerate() {
|
||||
let z_2: Vec<C::ScalarField> = [
|
||||
vec![C::ScalarField::one()],
|
||||
new_instance.x.clone(),
|
||||
w_cccs[i].w.to_vec(),
|
||||
]
|
||||
.concat();
|
||||
z_cccs.push(z_2);
|
||||
}
|
||||
|
||||
// Step 1: Get some challenges
|
||||
let gamma_scalar = C::ScalarField::from_le_bytes_mod_order(b"gamma");
|
||||
let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta");
|
||||
transcript.absorb(&gamma_scalar);
|
||||
let gamma: C::ScalarField = transcript.get_challenge();
|
||||
transcript.absorb(&beta_scalar);
|
||||
let beta: Vec<C::ScalarField> = transcript.get_challenges(ccs.s);
|
||||
|
||||
// Compute g(x)
|
||||
let g = compute_g(ccs, running_instances, &z_lcccs, &z_cccs, gamma, &beta);
|
||||
|
||||
// Step 3: Run the sumcheck prover
|
||||
let sumcheck_proof = IOPSumCheck::<C, T>::prove(&g, transcript)
|
||||
.map_err(|err| Error::SumCheckProveError(err.to_string()))?;
|
||||
|
||||
// Note: The following two "sanity checks" are done for this prototype, in a final version
|
||||
// they should be removed.
|
||||
//
|
||||
// Sanity check 1: evaluate g(x) over x \in {0,1} (the boolean hypercube), and check that
|
||||
// its sum is equal to the extracted_sum from the SumCheck.
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
let mut g_over_bhc = C::ScalarField::zero();
|
||||
for x in BooleanHypercube::new(ccs.s) {
|
||||
g_over_bhc += g.evaluate(&x)?;
|
||||
}
|
||||
|
||||
// note: this is the sum of g(x) over the whole boolean hypercube
|
||||
let extracted_sum = IOPSumCheck::<C, T>::extract_sum(&sumcheck_proof);
|
||||
|
||||
if extracted_sum != g_over_bhc {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
// Sanity check 2: expect \sum v_j * gamma^j to be equal to the sum of g(x) over the
|
||||
// boolean hypercube (and also equal to the extracted_sum from the SumCheck).
|
||||
let mut sum_v_j_gamma = C::ScalarField::zero();
|
||||
for (i, running_instance) in running_instances.iter().enumerate() {
|
||||
for j in 0..running_instance.v.len() {
|
||||
let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
|
||||
sum_v_j_gamma += running_instance.v[j] * gamma_j;
|
||||
}
|
||||
}
|
||||
if g_over_bhc != sum_v_j_gamma {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
if extracted_sum != sum_v_j_gamma {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Step 2: dig into the sumcheck and extract r_x_prime
|
||||
let r_x_prime = sumcheck_proof.point.clone();
|
||||
|
||||
// Step 4: compute sigmas and thetas
|
||||
let sigmas_thetas = compute_sigmas_and_thetas(ccs, &z_lcccs, &z_cccs, &r_x_prime);
|
||||
|
||||
// Step 6: Get the folding challenge
|
||||
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
|
||||
transcript.absorb(&rho_scalar);
|
||||
let rho: C::ScalarField = transcript.get_challenge();
|
||||
|
||||
// Step 7: Create the folded instance
|
||||
let folded_lcccs = Self::fold(
|
||||
running_instances,
|
||||
new_instances,
|
||||
&sigmas_thetas,
|
||||
r_x_prime,
|
||||
rho,
|
||||
);
|
||||
|
||||
// Step 8: Fold the witnesses
|
||||
let folded_witness = Self::fold_witness(w_lcccs, w_cccs, rho);
|
||||
|
||||
Ok((
|
||||
Proof::<C> {
|
||||
sc_proof: sumcheck_proof,
|
||||
sigmas_thetas,
|
||||
},
|
||||
folded_lcccs,
|
||||
folded_witness,
|
||||
))
|
||||
}
|
||||
|
||||
/// Performs the multifolding verifier. Given μ LCCCS instances and ν CCS instances, fold them
|
||||
/// into a single LCCCS instance.
|
||||
/// Returns the folded LCCCS instance.
|
||||
pub fn verify(
|
||||
transcript: &mut impl Transcript<C>,
|
||||
ccs: &CCS<C>,
|
||||
running_instances: &[LCCCS<C>],
|
||||
new_instances: &[CCCS<C>],
|
||||
proof: Proof<C>,
|
||||
) -> Result<LCCCS<C>, Error> {
|
||||
// TODO appends to transcript
|
||||
|
||||
if running_instances.is_empty() {
|
||||
return Err(Error::Empty);
|
||||
}
|
||||
if new_instances.is_empty() {
|
||||
return Err(Error::Empty);
|
||||
}
|
||||
|
||||
// Step 1: Get some challenges
|
||||
let gamma_scalar = C::ScalarField::from_le_bytes_mod_order(b"gamma");
|
||||
transcript.absorb(&gamma_scalar);
|
||||
let gamma: C::ScalarField = transcript.get_challenge();
|
||||
|
||||
let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta");
|
||||
transcript.absorb(&beta_scalar);
|
||||
let beta: Vec<C::ScalarField> = transcript.get_challenges(ccs.s);
|
||||
|
||||
let vp_aux_info = VPAuxInfo::<C::ScalarField> {
|
||||
max_degree: ccs.d + 1,
|
||||
num_variables: ccs.s,
|
||||
phantom: PhantomData::<C::ScalarField>,
|
||||
};
|
||||
|
||||
// Step 3: Start verifying the sumcheck
|
||||
// First, compute the expected sumcheck sum: \sum gamma^j v_j
|
||||
let mut sum_v_j_gamma = C::ScalarField::zero();
|
||||
for (i, running_instance) in running_instances.iter().enumerate() {
|
||||
for j in 0..running_instance.v.len() {
|
||||
let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
|
||||
sum_v_j_gamma += running_instance.v[j] * gamma_j;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the interactive part of the sumcheck
|
||||
let sumcheck_subclaim =
|
||||
IOPSumCheck::<C, T>::verify(sum_v_j_gamma, &proof.sc_proof, &vp_aux_info, transcript)
|
||||
.map_err(|err| Error::SumCheckVerifyError(err.to_string()))?;
|
||||
|
||||
// Step 2: Dig into the sumcheck claim and extract the randomness used
|
||||
let r_x_prime = sumcheck_subclaim.point.clone();
|
||||
|
||||
// Step 5: Finish verifying sumcheck (verify the claim c)
|
||||
let c = compute_c_from_sigmas_and_thetas(
|
||||
ccs,
|
||||
&proof.sigmas_thetas,
|
||||
gamma,
|
||||
&beta,
|
||||
&running_instances
|
||||
.iter()
|
||||
.map(|lcccs| lcccs.r_x.clone())
|
||||
.collect(),
|
||||
&r_x_prime,
|
||||
);
|
||||
// check that the g(r_x') from the sumcheck proof is equal to the computed c from sigmas&thetas
|
||||
if c != sumcheck_subclaim.expected_evaluation {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
|
||||
// Sanity check: we can also compute g(r_x') from the proof last evaluation value, and
|
||||
// should be equal to the previously obtained values.
|
||||
let g_on_rxprime_from_sumcheck_last_eval =
|
||||
DensePolynomial::from_coefficients_slice(&proof.sc_proof.proofs.last().unwrap().coeffs)
|
||||
.evaluate(r_x_prime.last().unwrap());
|
||||
if g_on_rxprime_from_sumcheck_last_eval != c {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
if g_on_rxprime_from_sumcheck_last_eval != sumcheck_subclaim.expected_evaluation {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
|
||||
// Step 6: Get the folding challenge
|
||||
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
|
||||
transcript.absorb(&rho_scalar);
|
||||
let rho: C::ScalarField = transcript.get_challenge();
|
||||
|
||||
// Step 7: Compute the folded instance
|
||||
Ok(Self::fold(
|
||||
running_instances,
|
||||
new_instances,
|
||||
&proof.sigmas_thetas,
|
||||
r_x_prime,
|
||||
rho,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::ccs::tests::{get_test_ccs, get_test_z};
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
use crate::transcript::poseidon::PoseidonTranscript;
|
||||
use ark_std::test_rng;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
use crate::commitment::pedersen::Pedersen;
|
||||
use ark_pallas::{Fr, Projective};
|
||||
|
||||
#[test]
|
||||
fn test_fold() {
|
||||
let ccs = get_test_ccs();
|
||||
let z1 = get_test_z::<Fr>(3);
|
||||
let z2 = get_test_z::<Fr>(4);
|
||||
ccs.check_relation(&z1).unwrap();
|
||||
ccs.check_relation(&z2).unwrap();
|
||||
|
||||
let mut rng = test_rng();
|
||||
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
let sigmas_thetas =
|
||||
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
|
||||
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
|
||||
let (lcccs, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
|
||||
let (cccs, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z2).unwrap();
|
||||
|
||||
lcccs.check_relation(&pedersen_params, &ccs, &w1).unwrap();
|
||||
cccs.check_relation(&pedersen_params, &ccs, &w2).unwrap();
|
||||
|
||||
let mut rng = test_rng();
|
||||
let rho = Fr::rand(&mut rng);
|
||||
|
||||
let folded = NIMFS::<Projective, PoseidonTranscript<Projective>>::fold(
|
||||
&[lcccs],
|
||||
&[cccs],
|
||||
&sigmas_thetas,
|
||||
r_x_prime,
|
||||
rho,
|
||||
);
|
||||
|
||||
let w_folded =
|
||||
NIMFS::<Projective, PoseidonTranscript<Projective>>::fold_witness(&[w1], &[w2], rho);
|
||||
|
||||
// check lcccs relation
|
||||
folded
|
||||
.check_relation(&pedersen_params, &ccs, &w_folded)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper)
|
||||
#[test]
|
||||
pub fn test_basic_multifolding() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// Create a basic CCS circuit
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
|
||||
// Generate a satisfying witness
|
||||
let z_1 = get_test_z(3);
|
||||
// Generate another satisfying witness
|
||||
let z_2 = get_test_z(4);
|
||||
|
||||
// Create the LCCCS instance out of z_1
|
||||
let (running_instance, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1).unwrap();
|
||||
// Create the CCCS instance out of z_2
|
||||
let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2).unwrap();
|
||||
|
||||
// Prover's transcript
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
let mut transcript_p: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
// Run the prover side of the multifolding
|
||||
let (proof, folded_lcccs, folded_witness) =
|
||||
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
|
||||
&mut transcript_p,
|
||||
&ccs,
|
||||
&[running_instance.clone()],
|
||||
&[new_instance.clone()],
|
||||
&[w1],
|
||||
&[w2],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Verifier's transcript
|
||||
let mut transcript_v: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
// Run the verifier side of the multifolding
|
||||
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
|
||||
&mut transcript_v,
|
||||
&ccs,
|
||||
&[running_instance.clone()],
|
||||
&[new_instance.clone()],
|
||||
proof,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(folded_lcccs, folded_lcccs_v);
|
||||
|
||||
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness
|
||||
folded_lcccs
|
||||
.check_relation(&pedersen_params, &ccs, &folded_witness)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance
|
||||
#[test]
|
||||
pub fn test_multifolding_two_instances_multiple_steps() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
|
||||
// LCCCS witness
|
||||
let z_1 = get_test_z(2);
|
||||
let (mut running_instance, mut w1) =
|
||||
ccs.to_lcccs(&mut rng, &pedersen_params, &z_1).unwrap();
|
||||
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let mut transcript_p: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
let mut transcript_v: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
let n: usize = 10;
|
||||
for i in 3..n {
|
||||
println!("\niteration: i {}", i); // DBG
|
||||
|
||||
// CCS witness
|
||||
let z_2 = get_test_z(i);
|
||||
println!("z_2 {:?}", z_2); // DBG
|
||||
|
||||
let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2).unwrap();
|
||||
|
||||
// run the prover side of the multifolding
|
||||
let (proof, folded_lcccs, folded_witness) =
|
||||
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
|
||||
&mut transcript_p,
|
||||
&ccs,
|
||||
&[running_instance.clone()],
|
||||
&[new_instance.clone()],
|
||||
&[w1],
|
||||
&[w2],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// run the verifier side of the multifolding
|
||||
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
|
||||
&mut transcript_v,
|
||||
&ccs,
|
||||
&[running_instance.clone()],
|
||||
&[new_instance.clone()],
|
||||
proof,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(folded_lcccs, folded_lcccs_v);
|
||||
|
||||
// check that the folded instance with the folded witness holds the LCCCS relation
|
||||
println!("check_relation {}", i);
|
||||
folded_lcccs
|
||||
.check_relation(&pedersen_params, &ccs, &folded_witness)
|
||||
.unwrap();
|
||||
|
||||
running_instance = folded_lcccs;
|
||||
w1 = folded_witness;
|
||||
}
|
||||
}
|
||||
|
||||
/// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step.
|
||||
#[test]
|
||||
pub fn test_multifolding_mu_nu_instances() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// Create a basic CCS circuit
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
|
||||
let mu = 10;
|
||||
let nu = 15;
|
||||
|
||||
// Generate a mu LCCCS & nu CCCS satisfying witness
|
||||
let mut z_lcccs = Vec::new();
|
||||
for i in 0..mu {
|
||||
let z = get_test_z(i + 3);
|
||||
z_lcccs.push(z);
|
||||
}
|
||||
let mut z_cccs = Vec::new();
|
||||
for i in 0..nu {
|
||||
let z = get_test_z(nu + i + 3);
|
||||
z_cccs.push(z);
|
||||
}
|
||||
|
||||
// Create the LCCCS instances out of z_lcccs
|
||||
let mut lcccs_instances = Vec::new();
|
||||
let mut w_lcccs = Vec::new();
|
||||
for z_i in z_lcccs.iter() {
|
||||
let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap();
|
||||
lcccs_instances.push(running_instance);
|
||||
w_lcccs.push(w);
|
||||
}
|
||||
// Create the CCCS instance out of z_cccs
|
||||
let mut cccs_instances = Vec::new();
|
||||
let mut w_cccs = Vec::new();
|
||||
for z_i in z_cccs.iter() {
|
||||
let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap();
|
||||
cccs_instances.push(new_instance);
|
||||
w_cccs.push(w);
|
||||
}
|
||||
|
||||
// Prover's transcript
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
let mut transcript_p: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
// Run the prover side of the multifolding
|
||||
let (proof, folded_lcccs, folded_witness) =
|
||||
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
|
||||
&mut transcript_p,
|
||||
&ccs,
|
||||
&lcccs_instances,
|
||||
&cccs_instances,
|
||||
&w_lcccs,
|
||||
&w_cccs,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Verifier's transcript
|
||||
let mut transcript_v: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
// Run the verifier side of the multifolding
|
||||
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
|
||||
&mut transcript_v,
|
||||
&ccs,
|
||||
&lcccs_instances,
|
||||
&cccs_instances,
|
||||
proof,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(folded_lcccs, folded_lcccs_v);
|
||||
|
||||
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness
|
||||
folded_lcccs
|
||||
.check_relation(&pedersen_params, &ccs, &folded_witness)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step
|
||||
/// and repeats the process doing multiple steps.
|
||||
#[test]
|
||||
pub fn test_multifolding_mu_nu_instances_multiple_steps() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// Create a basic CCS circuit
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
// Prover's transcript
|
||||
let mut transcript_p: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
// Verifier's transcript
|
||||
let mut transcript_v: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
|
||||
|
||||
let n_steps = 3;
|
||||
|
||||
// number of LCCCS & CCCS instances in each multifolding step
|
||||
let mu = 10;
|
||||
let nu = 15;
|
||||
|
||||
// Generate a mu LCCCS & nu CCCS satisfying witness, for each step
|
||||
for step in 0..n_steps {
|
||||
let mut z_lcccs = Vec::new();
|
||||
for i in 0..mu {
|
||||
let z = get_test_z(step + i + 3);
|
||||
z_lcccs.push(z);
|
||||
}
|
||||
let mut z_cccs = Vec::new();
|
||||
for i in 0..nu {
|
||||
let z = get_test_z(nu + i + 3);
|
||||
z_cccs.push(z);
|
||||
}
|
||||
|
||||
// Create the LCCCS instances out of z_lcccs
|
||||
let mut lcccs_instances = Vec::new();
|
||||
let mut w_lcccs = Vec::new();
|
||||
for z_i in z_lcccs.iter() {
|
||||
let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap();
|
||||
lcccs_instances.push(running_instance);
|
||||
w_lcccs.push(w);
|
||||
}
|
||||
// Create the CCCS instance out of z_cccs
|
||||
let mut cccs_instances = Vec::new();
|
||||
let mut w_cccs = Vec::new();
|
||||
for z_i in z_cccs.iter() {
|
||||
let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap();
|
||||
cccs_instances.push(new_instance);
|
||||
w_cccs.push(w);
|
||||
}
|
||||
|
||||
// Run the prover side of the multifolding
|
||||
let (proof, folded_lcccs, folded_witness) =
|
||||
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
|
||||
&mut transcript_p,
|
||||
&ccs,
|
||||
&lcccs_instances,
|
||||
&cccs_instances,
|
||||
&w_lcccs,
|
||||
&w_cccs,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Run the verifier side of the multifolding
|
||||
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
|
||||
&mut transcript_v,
|
||||
&ccs,
|
||||
&lcccs_instances,
|
||||
&cccs_instances,
|
||||
proof,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(folded_lcccs, folded_lcccs_v);
|
||||
|
||||
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness
|
||||
folded_lcccs
|
||||
.check_relation(&pedersen_params, &ccs, &folded_witness)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
382
folding-schemes/src/folding/hypernova/utils.rs
Normal file
382
folding-schemes/src/folding/hypernova/utils.rs
Normal file
@@ -0,0 +1,382 @@
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_poly::DenseMultilinearExtension;
|
||||
use ark_poly::MultilinearExtension;
|
||||
use ark_std::{One, Zero};
|
||||
use std::ops::Add;
|
||||
|
||||
use crate::utils::multilinear_polynomial::fix_variables;
|
||||
use crate::utils::multilinear_polynomial::scalar_mul;
|
||||
|
||||
use super::lcccs::LCCCS;
|
||||
use super::nimfs::SigmasThetas;
|
||||
use crate::ccs::CCS;
|
||||
use crate::utils::hypercube::BooleanHypercube;
|
||||
use crate::utils::mle::dense_vec_to_mle;
|
||||
use crate::utils::mle::matrix_to_mle;
|
||||
use crate::utils::vec::SparseMatrix;
|
||||
use crate::utils::virtual_polynomial::{eq_eval, VirtualPolynomial};
|
||||
|
||||
/// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) for all j values
|
||||
/// in 0..self.t
|
||||
pub fn compute_all_sum_Mz_evals<F: PrimeField>(
|
||||
vec_M: &[SparseMatrix<F>],
|
||||
z: &Vec<F>,
|
||||
r: &[F],
|
||||
s_prime: usize,
|
||||
) -> Vec<F> {
|
||||
// Convert z to MLE
|
||||
let z_y_mle = dense_vec_to_mle(s_prime, z);
|
||||
// Convert all matrices to MLE
|
||||
let M_x_y_mle: Vec<DenseMultilinearExtension<F>> =
|
||||
vec_M.iter().cloned().map(matrix_to_mle).collect();
|
||||
|
||||
let mut v = Vec::with_capacity(M_x_y_mle.len());
|
||||
for M_i in M_x_y_mle {
|
||||
let sum_Mz = compute_sum_Mz(M_i, &z_y_mle, s_prime);
|
||||
let v_i = sum_Mz.evaluate(r).unwrap();
|
||||
v.push(v_i);
|
||||
}
|
||||
v
|
||||
}
|
||||
|
||||
/// Return the multilinear polynomial p(x) = \sum_{y \in {0,1}^s'} M_j(x, y) * z(y)
|
||||
pub fn compute_sum_Mz<F: PrimeField>(
|
||||
M_j: DenseMultilinearExtension<F>,
|
||||
z: &DenseMultilinearExtension<F>,
|
||||
s_prime: usize,
|
||||
) -> DenseMultilinearExtension<F> {
|
||||
let mut sum_Mz = DenseMultilinearExtension {
|
||||
evaluations: vec![F::zero(); M_j.evaluations.len()],
|
||||
num_vars: M_j.num_vars - s_prime,
|
||||
};
|
||||
|
||||
let bhc = BooleanHypercube::new(s_prime);
|
||||
for y in bhc.into_iter() {
|
||||
// In a slightly counter-intuitive fashion fix_variables() fixes the right-most variables of the polynomial. So
|
||||
// for a polynomial M(x,y) and a random field element r, if we do fix_variables(M,r) we will get M(x,r).
|
||||
let M_j_y = fix_variables(&M_j, &y);
|
||||
let z_y = z.evaluate(&y).unwrap();
|
||||
let M_j_z = scalar_mul(&M_j_y, &z_y);
|
||||
sum_Mz = sum_Mz.add(M_j_z);
|
||||
}
|
||||
sum_Mz
|
||||
}
|
||||
|
||||
/// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS
|
||||
/// instances
|
||||
pub fn compute_sigmas_and_thetas<C: CurveGroup>(
|
||||
ccs: &CCS<C>,
|
||||
z_lcccs: &[Vec<C::ScalarField>],
|
||||
z_cccs: &[Vec<C::ScalarField>],
|
||||
r_x_prime: &[C::ScalarField],
|
||||
) -> SigmasThetas<C::ScalarField> {
|
||||
let mut sigmas: Vec<Vec<C::ScalarField>> = Vec::new();
|
||||
for z_lcccs_i in z_lcccs {
|
||||
// sigmas
|
||||
let sigma_i = compute_all_sum_Mz_evals(&ccs.M, z_lcccs_i, r_x_prime, ccs.s_prime);
|
||||
sigmas.push(sigma_i);
|
||||
}
|
||||
let mut thetas: Vec<Vec<C::ScalarField>> = Vec::new();
|
||||
for z_cccs_i in z_cccs {
|
||||
// thetas
|
||||
let theta_i = compute_all_sum_Mz_evals(&ccs.M, z_cccs_i, r_x_prime, ccs.s_prime);
|
||||
thetas.push(theta_i);
|
||||
}
|
||||
SigmasThetas(sigmas, thetas)
|
||||
}
|
||||
|
||||
/// Computes the sum $\sum_{j = 0}^{n} \gamma^{\text{pow} + j} \cdot eq_eval \cdot \sigma_{j}$
|
||||
/// `pow` corresponds to `i * ccs.t` in `compute_c_from_sigmas_and_thetas`
|
||||
pub fn sum_muls_gamma_pows_eq_sigma<F: PrimeField>(
|
||||
gamma: F,
|
||||
eq_eval: F,
|
||||
sigmas: &[F],
|
||||
pow: u64,
|
||||
) -> F {
|
||||
let mut result = F::zero();
|
||||
for (j, sigma_j) in sigmas.iter().enumerate() {
|
||||
let gamma_j = gamma.pow([(pow + (j as u64))]);
|
||||
result += gamma_j * eq_eval * sigma_j;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Computes $\sum_{i=1}^{q} c_i * \prod_{j \in S_i} theta_j$
|
||||
pub fn sum_ci_mul_prod_thetaj<C: CurveGroup>(
|
||||
ccs: &CCS<C>,
|
||||
thetas: &[C::ScalarField],
|
||||
) -> C::ScalarField {
|
||||
let mut result = C::ScalarField::zero();
|
||||
for i in 0..ccs.q {
|
||||
let mut prod = C::ScalarField::one();
|
||||
for j in ccs.S[i].clone() {
|
||||
prod *= thetas[j];
|
||||
}
|
||||
result += ccs.c[i] * prod;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Compute the right-hand-side of step 5 of the multifolding scheme
|
||||
pub fn compute_c_from_sigmas_and_thetas<C: CurveGroup>(
|
||||
ccs: &CCS<C>,
|
||||
st: &SigmasThetas<C::ScalarField>,
|
||||
gamma: C::ScalarField,
|
||||
beta: &[C::ScalarField],
|
||||
vec_r_x: &Vec<Vec<C::ScalarField>>,
|
||||
r_x_prime: &[C::ScalarField],
|
||||
) -> C::ScalarField {
|
||||
let (vec_sigmas, vec_thetas) = (st.0.clone(), st.1.clone());
|
||||
let mut c = C::ScalarField::zero();
|
||||
|
||||
let mut e_lcccs = Vec::new();
|
||||
for r_x in vec_r_x {
|
||||
e_lcccs.push(eq_eval(r_x, r_x_prime).unwrap());
|
||||
}
|
||||
for (i, sigmas) in vec_sigmas.iter().enumerate() {
|
||||
// (sum gamma^j * e_i * sigma_j)
|
||||
c += sum_muls_gamma_pows_eq_sigma(gamma, e_lcccs[i], sigmas, (i * ccs.t) as u64);
|
||||
}
|
||||
|
||||
let mu = vec_sigmas.len();
|
||||
let e2 = eq_eval(beta, r_x_prime).unwrap();
|
||||
for (k, thetas) in vec_thetas.iter().enumerate() {
|
||||
// + gamma^{t+1} * e2 * sum c_i * prod theta_j
|
||||
let lhs = sum_ci_mul_prod_thetaj(ccs, thetas);
|
||||
let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]);
|
||||
c += gamma_t1 * e2 * lhs;
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
/// Compute g(x) polynomial for the given inputs.
|
||||
pub fn compute_g<C: CurveGroup>(
|
||||
ccs: &CCS<C>,
|
||||
running_instances: &[LCCCS<C>],
|
||||
z_lcccs: &[Vec<C::ScalarField>],
|
||||
z_cccs: &[Vec<C::ScalarField>],
|
||||
gamma: C::ScalarField,
|
||||
beta: &[C::ScalarField],
|
||||
) -> VirtualPolynomial<C::ScalarField> {
|
||||
let mu = running_instances.len();
|
||||
let mut vec_Ls: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
|
||||
for (i, running_instance) in running_instances.iter().enumerate() {
|
||||
let mut Ls = running_instance.compute_Ls(ccs, &z_lcccs[i]);
|
||||
vec_Ls.append(&mut Ls);
|
||||
}
|
||||
let mut vec_Q: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
|
||||
// for (i, _) in cccs_instances.iter().enumerate() {
|
||||
for z_cccs_i in z_cccs.iter() {
|
||||
let Q = ccs.compute_Q(z_cccs_i, beta);
|
||||
vec_Q.push(Q);
|
||||
}
|
||||
let mut g = vec_Ls[0].clone();
|
||||
|
||||
// note: the following two loops can be integrated in the previous two loops, but left
|
||||
// separated for clarity in the PoC implementation.
|
||||
for (j, L_j) in vec_Ls.iter_mut().enumerate().skip(1) {
|
||||
let gamma_j = gamma.pow([j as u64]);
|
||||
L_j.scalar_mul(&gamma_j);
|
||||
g = g.add(L_j);
|
||||
}
|
||||
for (i, Q_i) in vec_Q.iter_mut().enumerate() {
|
||||
let gamma_mut_i = gamma.pow([(mu * ccs.t + i) as u64]);
|
||||
Q_i.scalar_mul(&gamma_mut_i);
|
||||
g = g.add(Q_i);
|
||||
}
|
||||
g
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
use ark_pallas::{Fr, Projective};
|
||||
use ark_std::test_rng;
|
||||
use ark_std::One;
|
||||
use ark_std::UniformRand;
|
||||
use ark_std::Zero;
|
||||
|
||||
use crate::ccs::tests::{get_test_ccs, get_test_z};
|
||||
use crate::commitment::pedersen::Pedersen;
|
||||
use crate::utils::multilinear_polynomial::tests::fix_last_variables;
|
||||
use crate::utils::virtual_polynomial::eq_eval;
|
||||
|
||||
#[test]
|
||||
fn test_compute_sum_Mz_over_boolean_hypercube() {
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
let z = get_test_z(3);
|
||||
ccs.check_relation(&z).unwrap();
|
||||
let z_mle = dense_vec_to_mle(ccs.s_prime, &z);
|
||||
|
||||
// check that evaluating over all the values x over the boolean hypercube, the result of
|
||||
// the next for loop is equal to 0
|
||||
for x in BooleanHypercube::new(ccs.s) {
|
||||
let mut r = Fr::zero();
|
||||
for i in 0..ccs.q {
|
||||
let mut Sj_prod = Fr::one();
|
||||
for j in ccs.S[i].clone() {
|
||||
let M_j = matrix_to_mle(ccs.M[j].clone());
|
||||
let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
|
||||
let sum_Mz_x = sum_Mz.evaluate(&x).unwrap();
|
||||
Sj_prod *= sum_Mz_x;
|
||||
}
|
||||
r += Sj_prod * ccs.c[i];
|
||||
}
|
||||
assert_eq!(r, Fr::zero());
|
||||
}
|
||||
}
|
||||
|
||||
/// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which
|
||||
/// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements
|
||||
/// of the column and the values eq_i(r) where i is the row of that element
|
||||
///
|
||||
/// For example, for matrix M:
|
||||
///
|
||||
/// [2, 3, 4, 4
|
||||
/// 4, 4, 3, 2
|
||||
/// 2, 8, 9, 2
|
||||
/// 9, 4, 2, 0]
|
||||
///
|
||||
/// The polynomial ~M(r,y) is a polynomial in F^2 which evaluates to the following values in the hypercube:
|
||||
/// - M(00) = 2*eq_00(r) + 4*eq_10(r) + 2*eq_01(r) + 9*eq_11(r)
|
||||
/// - M(10) = 3*eq_00(r) + 4*eq_10(r) + 8*eq_01(r) + 4*eq_11(r)
|
||||
/// - M(01) = 4*eq_00(r) + 3*eq_10(r) + 9*eq_01(r) + 2*eq_11(r)
|
||||
/// - M(11) = 4*eq_00(r) + 2*eq_10(r) + 2*eq_01(r) + 0*eq_11(r)
|
||||
///
|
||||
/// This is used by Hypernova in LCCCS to perform a verifier-chosen random linear combination between the columns
|
||||
/// of the matrix and the z vector. This technique is also used extensively in "An Algebraic Framework for
|
||||
/// Universal and Updatable SNARKs".
|
||||
#[test]
|
||||
fn test_compute_M_r_y_compression() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// s = 2, s' = 3
|
||||
let ccs = get_test_ccs::<Projective>();
|
||||
|
||||
let M = ccs.M[0].clone().to_dense();
|
||||
let M_mle = matrix_to_mle(ccs.M[0].clone());
|
||||
|
||||
// Fix the polynomial ~M(r,y)
|
||||
let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let M_r_y = fix_last_variables(&M_mle, &r);
|
||||
|
||||
// compute M_r_y the other way around
|
||||
for j in 0..M[0].len() {
|
||||
// Go over every column of M
|
||||
let column_j: Vec<Fr> = M.clone().iter().map(|x| x[j]).collect();
|
||||
// and perform the random lincomb between the elements of the column and eq_i(r)
|
||||
let rlc = BooleanHypercube::new(ccs.s)
|
||||
.enumerate()
|
||||
.map(|(i, x)| column_j[i] * eq_eval(&x, &r).unwrap())
|
||||
.fold(Fr::zero(), |acc, result| acc + result);
|
||||
|
||||
assert_eq!(M_r_y.evaluations[j], rlc);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_sigmas_and_thetas() {
|
||||
let ccs = get_test_ccs();
|
||||
let z1 = get_test_z(3);
|
||||
let z2 = get_test_z(4);
|
||||
ccs.check_relation(&z1).unwrap();
|
||||
ccs.check_relation(&z2).unwrap();
|
||||
|
||||
let mut rng = test_rng();
|
||||
let gamma: Fr = Fr::rand(&mut rng);
|
||||
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Initialize a multifolding object
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
|
||||
|
||||
let sigmas_thetas =
|
||||
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
|
||||
|
||||
let g = compute_g(
|
||||
&ccs,
|
||||
&[lcccs_instance.clone()],
|
||||
&[z1.clone()],
|
||||
&[z2.clone()],
|
||||
gamma,
|
||||
&beta,
|
||||
);
|
||||
|
||||
// we expect g(r_x_prime) to be equal to:
|
||||
// c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j
|
||||
// from compute_c_from_sigmas_and_thetas
|
||||
let expected_c = g.evaluate(&r_x_prime).unwrap();
|
||||
let c = compute_c_from_sigmas_and_thetas::<Projective>(
|
||||
&ccs,
|
||||
&sigmas_thetas,
|
||||
gamma,
|
||||
&beta,
|
||||
&vec![lcccs_instance.r_x],
|
||||
&r_x_prime,
|
||||
);
|
||||
assert_eq!(c, expected_c);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_g() {
|
||||
let ccs = get_test_ccs();
|
||||
let z1 = get_test_z(3);
|
||||
let z2 = get_test_z(4);
|
||||
ccs.check_relation(&z1).unwrap();
|
||||
ccs.check_relation(&z2).unwrap();
|
||||
|
||||
let mut rng = test_rng(); // TMP
|
||||
let gamma: Fr = Fr::rand(&mut rng);
|
||||
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Initialize a multifolding object
|
||||
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
|
||||
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
|
||||
|
||||
let mut sum_v_j_gamma = Fr::zero();
|
||||
for j in 0..lcccs_instance.v.len() {
|
||||
let gamma_j = gamma.pow([j as u64]);
|
||||
sum_v_j_gamma += lcccs_instance.v[j] * gamma_j;
|
||||
}
|
||||
|
||||
// Compute g(x) with that r_x
|
||||
let g = compute_g::<Projective>(
|
||||
&ccs,
|
||||
&[lcccs_instance.clone()],
|
||||
&[z1.clone()],
|
||||
&[z2.clone()],
|
||||
gamma,
|
||||
&beta,
|
||||
);
|
||||
|
||||
// evaluate g(x) over x \in {0,1}^s
|
||||
let mut g_on_bhc = Fr::zero();
|
||||
for x in BooleanHypercube::new(ccs.s) {
|
||||
g_on_bhc += g.evaluate(&x).unwrap();
|
||||
}
|
||||
|
||||
// evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s
|
||||
let mut sum_Lj_on_bhc = Fr::zero();
|
||||
let vec_L = lcccs_instance.compute_Ls(&ccs, &z1);
|
||||
for x in BooleanHypercube::new(ccs.s) {
|
||||
for (j, Lj) in vec_L.iter().enumerate() {
|
||||
let gamma_j = gamma.pow([j as u64]);
|
||||
sum_Lj_on_bhc += Lj.evaluate(&x).unwrap() * gamma_j;
|
||||
}
|
||||
}
|
||||
|
||||
// Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q'
|
||||
assert_ne!(g_on_bhc, Fr::zero());
|
||||
|
||||
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
|
||||
// sum of gamma^j * Lj(x) over the boolean hypercube
|
||||
assert_eq!(g_on_bhc, sum_Lj_on_bhc);
|
||||
|
||||
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
|
||||
// sum of gamma^j * v_j over j \in [t]
|
||||
assert_eq!(g_on_bhc, sum_v_j_gamma);
|
||||
}
|
||||
}
|
||||
4
folding-schemes/src/folding/mod.rs
Normal file
4
folding-schemes/src/folding/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod circuits;
|
||||
pub mod hypernova;
|
||||
pub mod nova;
|
||||
pub mod protogalaxy;
|
||||
848
folding-schemes/src/folding/nova/circuits.rs
Normal file
848
folding-schemes/src/folding/nova/circuits.rs
Normal file
@@ -0,0 +1,848 @@
|
||||
/// contains [Nova](https://eprint.iacr.org/2021/370.pdf) related circuits
|
||||
use ark_crypto_primitives::crh::{
|
||||
poseidon::constraints::{CRHGadget, CRHParametersVar},
|
||||
CRHSchemeGadget,
|
||||
};
|
||||
use ark_crypto_primitives::sponge::{
|
||||
constraints::CryptographicSpongeVar,
|
||||
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
|
||||
Absorb, CryptographicSponge,
|
||||
};
|
||||
use ark_ec::{AffineRepr, CurveGroup, Group};
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
boolean::Boolean,
|
||||
eq::EqGadget,
|
||||
fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar},
|
||||
groups::GroupOpsBounds,
|
||||
prelude::CurveVar,
|
||||
ToBitsGadget, ToConstraintFieldGadget,
|
||||
};
|
||||
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
|
||||
use ark_std::fmt::Debug;
|
||||
use ark_std::{One, Zero};
|
||||
use core::{borrow::Borrow, marker::PhantomData};
|
||||
|
||||
use super::{
|
||||
cyclefold::{
|
||||
CycleFoldChallengeGadget, CycleFoldCommittedInstanceVar, NIFSFullGadget, CF_IO_LEN,
|
||||
},
|
||||
CommittedInstance,
|
||||
};
|
||||
use crate::constants::N_BITS_RO;
|
||||
use crate::folding::circuits::nonnative::{point_to_nonnative_limbs, NonNativeAffineVar};
|
||||
use crate::frontend::FCircuit;
|
||||
|
||||
/// CF1 represents the ConstraintField used for the main Nova circuit which is over E1::Fr, where
|
||||
/// E1 is the main curve where we do the folding.
|
||||
pub type CF1<C> = <<C as CurveGroup>::Affine as AffineRepr>::ScalarField;
|
||||
/// CF2 represents the ConstraintField used for the CycleFold circuit which is over E2::Fr=E1::Fq,
|
||||
/// where E2 is the auxiliary curve (from [CycleFold](https://eprint.iacr.org/2023/1192.pdf)
|
||||
/// approach) where we check the folding of the commitments (elliptic curve points).
|
||||
pub type CF2<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;
|
||||
|
||||
/// CommittedInstanceVar contains the u, x, cmE and cmW values which are folded on the main Nova
|
||||
/// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are
|
||||
/// represented non-natively over the constraint field.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CommittedInstanceVar<C: CurveGroup> {
|
||||
pub u: FpVar<C::ScalarField>,
|
||||
pub x: Vec<FpVar<C::ScalarField>>,
|
||||
pub cmE: NonNativeAffineVar<C::ScalarField>,
|
||||
pub cmW: NonNativeAffineVar<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C> AllocVar<CommittedInstance<C>, CF1<C>> for CommittedInstanceVar<C>
|
||||
where
|
||||
C: CurveGroup,
|
||||
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
|
||||
{
|
||||
fn new_variable<T: Borrow<CommittedInstance<C>>>(
|
||||
cs: impl Into<Namespace<CF1<C>>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let u = FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?;
|
||||
let x: Vec<FpVar<C::ScalarField>> =
|
||||
Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?;
|
||||
|
||||
let cmE = NonNativeAffineVar::<C::ScalarField>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(val.borrow().cmE),
|
||||
mode,
|
||||
)?;
|
||||
let cmW = NonNativeAffineVar::<C::ScalarField>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(val.borrow().cmW),
|
||||
mode,
|
||||
)?;
|
||||
|
||||
Ok(Self { u, x, cmE, cmW })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> CommittedInstanceVar<C>
|
||||
where
|
||||
C: CurveGroup,
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
{
|
||||
/// hash implements the committed instance hash compatible with the native implementation from
|
||||
/// CommittedInstance.hash.
|
||||
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the
|
||||
/// `CommittedInstance`.
|
||||
pub fn hash(
|
||||
self,
|
||||
crh_params: &CRHParametersVar<CF1<C>>,
|
||||
i: FpVar<CF1<C>>,
|
||||
z_0: Vec<FpVar<CF1<C>>>,
|
||||
z_i: Vec<FpVar<CF1<C>>>,
|
||||
) -> Result<FpVar<CF1<C>>, SynthesisError> {
|
||||
let input = vec![
|
||||
vec![i],
|
||||
z_0,
|
||||
z_i,
|
||||
vec![self.u],
|
||||
self.x,
|
||||
self.cmE.x,
|
||||
self.cmE.y,
|
||||
self.cmW.x,
|
||||
self.cmW.y,
|
||||
]
|
||||
.concat();
|
||||
CRHGadget::<C::ScalarField>::evaluate(crh_params, &input)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the circuit that does the checks of the Non-Interactive Folding Scheme Verifier
|
||||
/// described in section 4 of [Nova](https://eprint.iacr.org/2021/370.pdf), where the cmE & cmW checks are
|
||||
/// delegated to the NIFSCycleFoldGadget.
|
||||
pub struct NIFSGadget<C: CurveGroup> {
|
||||
_c: PhantomData<C>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> NIFSGadget<C>
|
||||
where
|
||||
C: CurveGroup,
|
||||
{
|
||||
/// Implements the constraints for NIFS.V for u and x, since cm(E) and cm(W) are delegated to
|
||||
/// the CycleFold circuit.
|
||||
pub fn verify(
|
||||
r: FpVar<CF1<C>>,
|
||||
ci1: CommittedInstanceVar<C>,
|
||||
ci2: CommittedInstanceVar<C>,
|
||||
ci3: CommittedInstanceVar<C>,
|
||||
) -> Result<Boolean<CF1<C>>, SynthesisError> {
|
||||
// ensure that: ci3.u == ci1.u + r * ci2.u
|
||||
let first_check = ci3.u.is_eq(&(ci1.u + r.clone() * ci2.u))?;
|
||||
|
||||
// ensure that: ci3.x == ci1.x + r * ci2.x
|
||||
let x_rlc = ci1
|
||||
.x
|
||||
.iter()
|
||||
.zip(ci2.x)
|
||||
.map(|(a, b)| a + &r * &b)
|
||||
.collect::<Vec<FpVar<CF1<C>>>>();
|
||||
let second_check = x_rlc.is_eq(&ci3.x)?;
|
||||
|
||||
first_check.and(&second_check)
|
||||
}
|
||||
}
|
||||
|
||||
/// ChallengeGadget computes the RO challenge used for the Nova instances NIFS, it contains a
|
||||
/// rust-native and a in-circuit compatible versions.
|
||||
pub struct ChallengeGadget<C: CurveGroup> {
|
||||
_c: PhantomData<C>,
|
||||
}
|
||||
impl<C: CurveGroup> ChallengeGadget<C>
|
||||
where
|
||||
C: CurveGroup,
|
||||
<C as CurveGroup>::BaseField: PrimeField,
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
{
|
||||
pub fn get_challenge_native(
|
||||
poseidon_config: &PoseidonConfig<C::ScalarField>,
|
||||
u_i: CommittedInstance<C>,
|
||||
U_i: CommittedInstance<C>,
|
||||
cmT: C,
|
||||
) -> Result<Vec<bool>, SynthesisError> {
|
||||
let (u_cmE_x, u_cmE_y) = point_to_nonnative_limbs::<C>(u_i.cmE)?;
|
||||
let (u_cmW_x, u_cmW_y) = point_to_nonnative_limbs::<C>(u_i.cmW)?;
|
||||
let (U_cmE_x, U_cmE_y) = point_to_nonnative_limbs::<C>(U_i.cmE)?;
|
||||
let (U_cmW_x, U_cmW_y) = point_to_nonnative_limbs::<C>(U_i.cmW)?;
|
||||
let (cmT_x, cmT_y) = point_to_nonnative_limbs::<C>(cmT)?;
|
||||
|
||||
let mut sponge = PoseidonSponge::<C::ScalarField>::new(poseidon_config);
|
||||
let input = vec![
|
||||
vec![u_i.u],
|
||||
u_i.x.clone(),
|
||||
u_cmE_x,
|
||||
u_cmE_y,
|
||||
u_cmW_x,
|
||||
u_cmW_y,
|
||||
vec![U_i.u],
|
||||
U_i.x.clone(),
|
||||
U_cmE_x,
|
||||
U_cmE_y,
|
||||
U_cmW_x,
|
||||
U_cmW_y,
|
||||
cmT_x,
|
||||
cmT_y,
|
||||
]
|
||||
.concat();
|
||||
sponge.absorb(&input);
|
||||
let bits = sponge.squeeze_bits(N_BITS_RO);
|
||||
Ok(bits)
|
||||
}
|
||||
|
||||
// compatible with the native get_challenge_native
|
||||
pub fn get_challenge_gadget(
|
||||
cs: ConstraintSystemRef<C::ScalarField>,
|
||||
poseidon_config: &PoseidonConfig<C::ScalarField>,
|
||||
u_i: CommittedInstanceVar<C>,
|
||||
U_i: CommittedInstanceVar<C>,
|
||||
cmT: NonNativeAffineVar<C::ScalarField>,
|
||||
) -> Result<Vec<Boolean<C::ScalarField>>, SynthesisError> {
|
||||
let mut sponge = PoseidonSpongeVar::<C::ScalarField>::new(cs, poseidon_config);
|
||||
|
||||
let input: Vec<FpVar<C::ScalarField>> = vec![
|
||||
vec![u_i.u.clone()],
|
||||
u_i.x.clone(),
|
||||
u_i.cmE.x,
|
||||
u_i.cmE.y,
|
||||
u_i.cmW.x,
|
||||
u_i.cmW.y,
|
||||
vec![U_i.u.clone()],
|
||||
U_i.x.clone(),
|
||||
U_i.cmE.x,
|
||||
U_i.cmE.y,
|
||||
U_i.cmW.x,
|
||||
U_i.cmW.y,
|
||||
cmT.x,
|
||||
cmT.y,
|
||||
]
|
||||
.concat();
|
||||
sponge.absorb(&input)?;
|
||||
let bits = sponge.squeeze_bits(N_BITS_RO)?;
|
||||
Ok(bits)
|
||||
}
|
||||
}
|
||||
|
||||
/// AugmentedFCircuit implements the F' circuit (augmented F) defined in
|
||||
/// [Nova](https://eprint.iacr.org/2021/370.pdf) together with the extra constraints defined in
|
||||
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AugmentedFCircuit<
|
||||
C1: CurveGroup,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<CF1<C1>>,
|
||||
> where
|
||||
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
|
||||
{
|
||||
pub _gc2: PhantomData<GC2>,
|
||||
pub poseidon_config: PoseidonConfig<CF1<C1>>,
|
||||
pub i: Option<CF1<C1>>,
|
||||
pub z_0: Option<Vec<C1::ScalarField>>,
|
||||
pub z_i: Option<Vec<C1::ScalarField>>,
|
||||
pub u_i: Option<CommittedInstance<C1>>,
|
||||
pub U_i: Option<CommittedInstance<C1>>,
|
||||
pub U_i1: Option<CommittedInstance<C1>>,
|
||||
pub cmT: Option<C1>,
|
||||
pub F: FC, // F circuit
|
||||
pub x: Option<CF1<C1>>, // public inputs (u_{i+1}.x)
|
||||
|
||||
// cyclefold verifier on C1
|
||||
pub cf_u_i: Option<CommittedInstance<C2>>,
|
||||
pub cf_U_i: Option<CommittedInstance<C2>>,
|
||||
pub cf_U_i1: Option<CommittedInstance<C2>>,
|
||||
pub cf_cmT: Option<C2>,
|
||||
pub cf_r_nonnat: Option<C2::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C1: CurveGroup, C2: CurveGroup, GC2: CurveVar<C2, CF2<C2>>, FC: FCircuit<CF1<C1>>>
|
||||
AugmentedFCircuit<C1, C2, GC2, FC>
|
||||
where
|
||||
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
|
||||
{
|
||||
pub fn empty(poseidon_config: &PoseidonConfig<CF1<C1>>, F_circuit: FC) -> Self {
|
||||
Self {
|
||||
_gc2: PhantomData,
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
i: None,
|
||||
z_0: None,
|
||||
z_i: None,
|
||||
u_i: None,
|
||||
U_i: None,
|
||||
U_i1: None,
|
||||
cmT: None,
|
||||
F: F_circuit,
|
||||
x: None,
|
||||
// cyclefold values
|
||||
cf_u_i: None,
|
||||
cf_U_i: None,
|
||||
cf_U_i1: None,
|
||||
cf_cmT: None,
|
||||
cf_r_nonnat: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C1, C2, GC2, FC> ConstraintSynthesizer<CF1<C1>> for AugmentedFCircuit<C1, C2, GC2, FC>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<CF1<C1>>,
|
||||
<C1 as CurveGroup>::BaseField: PrimeField,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
|
||||
{
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
|
||||
let i = FpVar::<CF1<C1>>::new_witness(cs.clone(), || {
|
||||
Ok(self.i.unwrap_or_else(CF1::<C1>::zero))
|
||||
})?;
|
||||
let z_0 = Vec::<FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
|
||||
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
|
||||
})?;
|
||||
let z_i = Vec::<FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
|
||||
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
|
||||
})?;
|
||||
|
||||
let u_dummy_native = CommittedInstance::<C1>::dummy(1);
|
||||
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
|
||||
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let U_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
|
||||
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let U_i1 = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
|
||||
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let cmT =
|
||||
NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?;
|
||||
let x =
|
||||
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.x.unwrap_or_else(CF1::<C1>::zero)))?;
|
||||
|
||||
let crh_params = CRHParametersVar::<C1::ScalarField>::new_constant(
|
||||
cs.clone(),
|
||||
self.poseidon_config.clone(),
|
||||
)?;
|
||||
|
||||
// get z_{i+1} from the F circuit
|
||||
let z_i1 = self.F.generate_step_constraints(cs.clone(), z_i.clone())?;
|
||||
|
||||
let zero = FpVar::<CF1<C1>>::new_constant(cs.clone(), CF1::<C1>::zero())?;
|
||||
let is_not_basecase = i.is_neq(&zero)?;
|
||||
|
||||
// 1. u_i.x == H(i, z_0, z_i, U_i)
|
||||
let u_i_x = U_i
|
||||
.clone()
|
||||
.hash(&crh_params, i.clone(), z_0.clone(), z_i.clone())?;
|
||||
|
||||
// check that h == u_i.x
|
||||
(u_i.x[0]).conditional_enforce_equal(&u_i_x, &is_not_basecase)?;
|
||||
|
||||
// 2. u_i.cmE==cm(0), u_i.u==1
|
||||
let zero_x = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
|
||||
cs.clone(),
|
||||
C1::BaseField::zero(),
|
||||
)?
|
||||
.to_constraint_field()?;
|
||||
let zero_y = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
|
||||
cs.clone(),
|
||||
C1::BaseField::one(),
|
||||
)?
|
||||
.to_constraint_field()?;
|
||||
(u_i.cmE.x.is_eq(&zero_x)?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
(u_i.cmE.y.is_eq(&zero_y)?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
(u_i.u.is_one()?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
|
||||
// 3. nifs.verify, checks that folding u_i & U_i obtains U_{i+1}.
|
||||
// compute r = H(u_i, U_i, cmT)
|
||||
let r_bits = ChallengeGadget::<C1>::get_challenge_gadget(
|
||||
cs.clone(),
|
||||
&self.poseidon_config,
|
||||
u_i.clone(),
|
||||
U_i.clone(),
|
||||
cmT.clone(),
|
||||
)?;
|
||||
let r = Boolean::le_bits_to_fp_var(&r_bits)?;
|
||||
|
||||
// Notice that NIFSGadget::verify is not checking the folding of cmE & cmW, since it will
|
||||
// be done on the other curve.
|
||||
let nifs_check = NIFSGadget::<C1>::verify(r, u_i.clone(), U_i.clone(), U_i1.clone())?;
|
||||
nifs_check.conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
|
||||
// 4. u_{i+1}.x = H(i+1, z_0, z_i+1, U_{i+1}), this is the output of F'
|
||||
let u_i1_x = U_i1.clone().hash(
|
||||
&crh_params,
|
||||
i + FpVar::<CF1<C1>>::one(),
|
||||
z_0.clone(),
|
||||
z_i1.clone(),
|
||||
)?;
|
||||
|
||||
u_i1_x.enforce_equal(&x)?;
|
||||
|
||||
// CycleFold part
|
||||
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(CF_IO_LEN);
|
||||
let cf_u_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
|
||||
Ok(self.cf_u_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
|
||||
})?;
|
||||
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
|
||||
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
|
||||
})?;
|
||||
let cf_U_i1 = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
|
||||
Ok(self.cf_U_i1.unwrap_or_else(|| cf_u_dummy_native.clone()))
|
||||
})?;
|
||||
let cf_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf_cmT.unwrap_or_else(C2::zero)))?;
|
||||
// cf_r_nonnat is an auxiliary input
|
||||
let cf_r_nonnat =
|
||||
NonNativeFieldVar::<C2::ScalarField, CF2<C2>>::new_witness(cs.clone(), || {
|
||||
Ok(self.cf_r_nonnat.unwrap_or_else(C2::ScalarField::zero))
|
||||
})?;
|
||||
|
||||
// check that cf_u_i.x == [u_i, U_i, U_{i+1}]
|
||||
let incircuit_x = vec![
|
||||
u_i.cmE.x, u_i.cmE.y, u_i.cmW.x, u_i.cmW.y, U_i.cmE.x, U_i.cmE.y, U_i.cmW.x, U_i.cmW.y,
|
||||
U_i1.cmE.x, U_i1.cmE.y, U_i1.cmW.x, U_i1.cmW.y,
|
||||
]
|
||||
.concat();
|
||||
|
||||
let mut cf_u_i_x: Vec<FpVar<CF2<C2>>> = vec![];
|
||||
for x_i in cf_u_i.x.iter() {
|
||||
let mut x_fpvar = x_i.to_constraint_field()?;
|
||||
cf_u_i_x.append(&mut x_fpvar);
|
||||
}
|
||||
cf_u_i_x.conditional_enforce_equal(&incircuit_x, &is_not_basecase)?;
|
||||
|
||||
// cf_r_bits is denoted by rho* in the paper
|
||||
let cf_r_bits = CycleFoldChallengeGadget::<C2, GC2>::get_challenge_gadget(
|
||||
cs.clone(),
|
||||
&self.poseidon_config,
|
||||
cf_u_i.clone(),
|
||||
cf_U_i.clone(),
|
||||
cf_cmT.clone(),
|
||||
)?;
|
||||
// assert that cf_r_bits == cf_r_nonnat converted to bits. cf_r_nonnat is just an auxiliary
|
||||
// value used to compute RLC of NonNativeFieldVar values, since we can convert
|
||||
// NonNativeFieldVar into Vec<Boolean>, but not in the other direction.
|
||||
let cf_r_nonnat_bits = cf_r_nonnat.to_bits_le()?;
|
||||
cf_r_bits.conditional_enforce_equal(&cf_r_nonnat_bits[..N_BITS_RO], &is_not_basecase)?;
|
||||
|
||||
// check cf_u_i.cmE=0, cf_u_i.u=1
|
||||
(cf_u_i.cmE.is_zero()?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
(cf_u_i.u.is_one()?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
|
||||
// check the fold of all the parameteres of the CycleFold instances, where the elliptic
|
||||
// curve points relations are checked natively in Curve1 circuit (this one)
|
||||
let v = NIFSFullGadget::<C2, GC2>::verify(
|
||||
cf_r_bits,
|
||||
cf_r_nonnat,
|
||||
cf_cmT,
|
||||
cf_U_i,
|
||||
cf_u_i,
|
||||
cf_U_i1,
|
||||
)?;
|
||||
v.conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_ff::BigInteger;
|
||||
use ark_pallas::{Fq, Fr, Projective};
|
||||
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
|
||||
use ark_relations::r1cs::{ConstraintLayer, ConstraintSystem, TracingMode};
|
||||
use ark_std::One;
|
||||
use ark_std::UniformRand;
|
||||
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
|
||||
use crate::ccs::r1cs::{extract_r1cs, extract_w_x};
|
||||
use crate::commitment::pedersen::Pedersen;
|
||||
use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs;
|
||||
use crate::folding::nova::{
|
||||
get_committed_instance_coordinates, nifs::NIFS, traits::NovaR1CS, Witness,
|
||||
};
|
||||
use crate::frontend::tests::CubicFCircuit;
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
|
||||
#[test]
|
||||
fn test_committed_instance_var() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
|
||||
let ci = CommittedInstance::<Projective> {
|
||||
cmE: Projective::rand(&mut rng),
|
||||
u: Fr::rand(&mut rng),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: vec![Fr::rand(&mut rng); 1],
|
||||
};
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let ciVar =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci.clone())).unwrap();
|
||||
assert_eq!(ciVar.u.value().unwrap(), ci.u);
|
||||
assert_eq!(ciVar.x.value().unwrap(), ci.x);
|
||||
// the values cmE and cmW are checked in the CycleFold's circuit
|
||||
// CommittedInstanceInCycleFoldVar in
|
||||
// nova::cyclefold::tests::test_committed_instance_cyclefold_var
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nifs_gadget() {
|
||||
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, _, r_Fr) = prepare_simple_fold_inputs();
|
||||
|
||||
let ci3_verifier = NIFS::<Projective, Pedersen<Projective>>::verify(r_Fr, &ci1, &ci2, &cmT);
|
||||
assert_eq!(ci3_verifier, ci3);
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
let rVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(r_Fr)).unwrap();
|
||||
let ci1Var =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci1.clone()))
|
||||
.unwrap();
|
||||
let ci2Var =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci2.clone()))
|
||||
.unwrap();
|
||||
let ci3Var =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci3.clone()))
|
||||
.unwrap();
|
||||
|
||||
let nifs_check = NIFSGadget::<Projective>::verify(
|
||||
rVar.clone(),
|
||||
ci1Var.clone(),
|
||||
ci2Var.clone(),
|
||||
ci3Var.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
nifs_check.enforce_equal(&Boolean::<Fr>::TRUE).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_committed_instance_hash() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let i = Fr::from(3_u32);
|
||||
let z_0 = vec![Fr::from(3_u32)];
|
||||
let z_i = vec![Fr::from(3_u32)];
|
||||
let ci = CommittedInstance::<Projective> {
|
||||
cmE: Projective::rand(&mut rng),
|
||||
u: Fr::rand(&mut rng),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: vec![Fr::rand(&mut rng); 1],
|
||||
};
|
||||
|
||||
// compute the CommittedInstance hash natively
|
||||
let h = ci
|
||||
.hash(&poseidon_config, i, z_0.clone(), z_i.clone())
|
||||
.unwrap();
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
let iVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(i)).unwrap();
|
||||
let z_0Var = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z_0.clone())).unwrap();
|
||||
let z_iVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap();
|
||||
let ciVar =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci.clone())).unwrap();
|
||||
|
||||
let crh_params = CRHParametersVar::<Fr>::new_constant(cs.clone(), poseidon_config).unwrap();
|
||||
|
||||
// compute the CommittedInstance hash in-circuit
|
||||
let hVar = ciVar.hash(&crh_params, iVar, z_0Var, z_iVar).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
|
||||
// check that the natively computed and in-circuit computed hashes match
|
||||
assert_eq!(hVar.value().unwrap(), h);
|
||||
}
|
||||
|
||||
// checks that the gadget and native implementations of the challenge computation matcbh
|
||||
#[test]
|
||||
fn test_challenge_gadget() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let u_i = CommittedInstance::<Projective> {
|
||||
cmE: Projective::rand(&mut rng),
|
||||
u: Fr::rand(&mut rng),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: vec![Fr::rand(&mut rng); 1],
|
||||
};
|
||||
let U_i = CommittedInstance::<Projective> {
|
||||
cmE: Projective::rand(&mut rng),
|
||||
u: Fr::rand(&mut rng),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: vec![Fr::rand(&mut rng); 1],
|
||||
};
|
||||
let cmT = Projective::rand(&mut rng);
|
||||
|
||||
// compute the challenge natively
|
||||
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
|
||||
&poseidon_config,
|
||||
u_i.clone(),
|
||||
U_i.clone(),
|
||||
cmT,
|
||||
)
|
||||
.unwrap();
|
||||
let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let u_iVar =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(u_i.clone()))
|
||||
.unwrap();
|
||||
let U_iVar =
|
||||
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(U_i.clone()))
|
||||
.unwrap();
|
||||
let cmTVar = NonNativeAffineVar::<Fr>::new_witness(cs.clone(), || Ok(cmT)).unwrap();
|
||||
|
||||
// compute the challenge in-circuit
|
||||
let r_bitsVar = ChallengeGadget::<Projective>::get_challenge_gadget(
|
||||
cs.clone(),
|
||||
&poseidon_config,
|
||||
u_iVar,
|
||||
U_iVar,
|
||||
cmTVar,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
|
||||
// check that the natively computed and in-circuit computed hashes match
|
||||
let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap();
|
||||
assert_eq!(rVar.value().unwrap(), r);
|
||||
assert_eq!(r_bitsVar.value().unwrap(), r_bits);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// test_augmented_f_circuit folds the CubicFCircuit circuit in multiple iterations, feeding the
|
||||
/// values into the AugmentedFCircuit.
|
||||
fn test_augmented_f_circuit() {
|
||||
let mut layer = ConstraintLayer::default();
|
||||
layer.mode = TracingMode::OnlyConstraints;
|
||||
let subscriber = tracing_subscriber::Registry::default().with(layer);
|
||||
let _guard = tracing::subscriber::set_default(subscriber);
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
// compute z vector for the initial instance
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
// prepare the circuit to obtain its R1CS
|
||||
let F_circuit = CubicFCircuit::<Fr>::new(());
|
||||
let mut augmented_F_circuit =
|
||||
AugmentedFCircuit::<Projective, Projective2, GVar2, CubicFCircuit<Fr>>::empty(
|
||||
&poseidon_config,
|
||||
F_circuit,
|
||||
);
|
||||
augmented_F_circuit
|
||||
.generate_constraints(cs.clone())
|
||||
.unwrap();
|
||||
cs.finalize();
|
||||
println!("num_constraints={:?}", cs.num_constraints());
|
||||
let cs = cs.into_inner().unwrap();
|
||||
let r1cs = extract_r1cs::<Fr>(&cs);
|
||||
let (w, x) = extract_w_x::<Fr>(&cs);
|
||||
assert_eq!(1 + x.len() + w.len(), r1cs.A.n_cols);
|
||||
assert_eq!(r1cs.l, x.len());
|
||||
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_rows);
|
||||
|
||||
// first step, set z_i=z_0=3 and z_{i+1}=35 (initial values)
|
||||
let z_0 = vec![Fr::from(3_u32)];
|
||||
let mut z_i = z_0.clone();
|
||||
let mut z_i1 = vec![Fr::from(35_u32)];
|
||||
|
||||
let w_dummy = Witness::<Projective>::new(vec![Fr::zero(); w.len()], r1cs.A.n_rows);
|
||||
let u_dummy = CommittedInstance::<Projective>::dummy(x.len());
|
||||
|
||||
// W_i is a 'dummy witness', all zeroes, but with the size corresponding to the R1CS that
|
||||
// we're working with.
|
||||
// set U_i <-- dummy instance
|
||||
let mut W_i = w_dummy.clone();
|
||||
let mut U_i = u_dummy.clone();
|
||||
r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap();
|
||||
|
||||
let mut w_i = w_dummy.clone();
|
||||
let mut u_i = u_dummy.clone();
|
||||
let (mut W_i1, mut U_i1, mut cmT): (
|
||||
Witness<Projective>,
|
||||
CommittedInstance<Projective>,
|
||||
Projective,
|
||||
) = (w_dummy.clone(), u_dummy.clone(), Projective::generator());
|
||||
// as expected, dummy instances pass the relaxed_r1cs check
|
||||
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
|
||||
|
||||
let mut i = Fr::zero();
|
||||
let mut u_i1_x: Fr;
|
||||
for _ in 0..4 {
|
||||
if i == Fr::zero() {
|
||||
// base case: i=0, z_i=z_0, U_i = U_d := dummy instance
|
||||
// u_1.x = H(1, z_0, z_i, U_i)
|
||||
u_i1_x = U_i
|
||||
.hash(&poseidon_config, Fr::one(), z_0.clone(), z_i1.clone())
|
||||
.unwrap();
|
||||
|
||||
// base case
|
||||
augmented_F_circuit =
|
||||
AugmentedFCircuit::<Projective, Projective2, GVar2, CubicFCircuit<Fr>> {
|
||||
_gc2: PhantomData,
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
i: Some(i), // = 0
|
||||
z_0: Some(z_0.clone()), // = z_i=3
|
||||
z_i: Some(z_i.clone()),
|
||||
u_i: Some(u_i.clone()), // = dummy
|
||||
U_i: Some(U_i.clone()), // = dummy
|
||||
U_i1: Some(U_i1.clone()), // = dummy
|
||||
cmT: Some(cmT),
|
||||
F: F_circuit,
|
||||
x: Some(u_i1_x),
|
||||
// cyclefold instances (not tested in this test)
|
||||
cf_u_i: None,
|
||||
cf_U_i: None,
|
||||
cf_U_i1: None,
|
||||
cf_cmT: None,
|
||||
cf_r_nonnat: None,
|
||||
};
|
||||
} else {
|
||||
r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap();
|
||||
r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap();
|
||||
|
||||
// U_{i+1}
|
||||
let T: Vec<Fr>;
|
||||
(T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
|
||||
&pedersen_params,
|
||||
&r1cs,
|
||||
&w_i,
|
||||
&u_i,
|
||||
&W_i,
|
||||
&U_i,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// get challenge r
|
||||
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
|
||||
&poseidon_config,
|
||||
u_i.clone(),
|
||||
U_i.clone(),
|
||||
cmT,
|
||||
)
|
||||
.unwrap();
|
||||
let r_Fr = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
|
||||
|
||||
(W_i1, U_i1) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
|
||||
r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
|
||||
|
||||
// folded instance output (public input, x)
|
||||
// u_{i+1}.x = H(i+1, z_0, z_{i+1}, U_{i+1})
|
||||
u_i1_x = U_i1
|
||||
.hash(&poseidon_config, i + Fr::one(), z_0.clone(), z_i1.clone())
|
||||
.unwrap();
|
||||
|
||||
// set up dummy cyclefold instances just for the sake of this test. Warning, this
|
||||
// is only because we are in a test were we're not testing the cyclefold side of
|
||||
// things.
|
||||
let cf_W_i = Witness::<Projective2>::new(vec![Fq::zero(); 1], 1);
|
||||
let cf_U_i = CommittedInstance::<Projective2>::dummy(CF_IO_LEN);
|
||||
let cf_u_i_x = [
|
||||
get_committed_instance_coordinates(&u_i),
|
||||
get_committed_instance_coordinates(&U_i),
|
||||
get_committed_instance_coordinates(&U_i1),
|
||||
]
|
||||
.concat();
|
||||
let cf_u_i = CommittedInstance::<Projective2> {
|
||||
cmE: cf_U_i.cmE,
|
||||
u: Fq::one(),
|
||||
cmW: cf_U_i.cmW,
|
||||
x: cf_u_i_x,
|
||||
};
|
||||
let cf_w_i = cf_W_i.clone();
|
||||
let (cf_T, cf_cmT): (Vec<Fq>, Projective2) =
|
||||
(vec![Fq::zero(); cf_W_i.E.len()], Projective2::zero());
|
||||
let cf_r_bits =
|
||||
CycleFoldChallengeGadget::<Projective2, GVar2>::get_challenge_native(
|
||||
&poseidon_config,
|
||||
cf_u_i.clone(),
|
||||
cf_U_i.clone(),
|
||||
cf_cmT,
|
||||
)
|
||||
.unwrap();
|
||||
let cf_r_Fq = Fq::from_bigint(BigInteger::from_bits_le(&cf_r_bits)).unwrap();
|
||||
let (_, cf_U_i1) = NIFS::<Projective2, Pedersen<Projective2>>::fold_instances(
|
||||
cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
augmented_F_circuit =
|
||||
AugmentedFCircuit::<Projective, Projective2, GVar2, CubicFCircuit<Fr>> {
|
||||
_gc2: PhantomData,
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
i: Some(i),
|
||||
z_0: Some(z_0.clone()),
|
||||
z_i: Some(z_i.clone()),
|
||||
u_i: Some(u_i),
|
||||
U_i: Some(U_i.clone()),
|
||||
U_i1: Some(U_i1.clone()),
|
||||
cmT: Some(cmT),
|
||||
F: F_circuit,
|
||||
x: Some(u_i1_x),
|
||||
cf_u_i: Some(cf_u_i),
|
||||
cf_U_i: Some(cf_U_i),
|
||||
cf_U_i1: Some(cf_U_i1),
|
||||
cf_cmT: Some(cf_cmT),
|
||||
cf_r_nonnat: Some(cf_r_Fq),
|
||||
};
|
||||
}
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
augmented_F_circuit
|
||||
.generate_constraints(cs.clone())
|
||||
.unwrap();
|
||||
let is_satisfied = cs.is_satisfied().unwrap();
|
||||
if !is_satisfied {
|
||||
dbg!(cs.which_is_unsatisfied().unwrap());
|
||||
}
|
||||
assert!(is_satisfied);
|
||||
|
||||
cs.finalize();
|
||||
let cs = cs.into_inner().unwrap();
|
||||
let (w_i1, x_i1) = extract_w_x::<Fr>(&cs);
|
||||
assert_eq!(x_i1.len(), 1);
|
||||
assert_eq!(x_i1[0], u_i1_x);
|
||||
|
||||
// compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we
|
||||
// assign them directly to w_i, u_i.
|
||||
w_i = Witness::<Projective>::new(w_i1.clone(), r1cs.A.n_rows);
|
||||
u_i = w_i
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, vec![u_i1_x])
|
||||
.unwrap();
|
||||
|
||||
r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap();
|
||||
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
|
||||
|
||||
// set values for next iteration
|
||||
i += Fr::one();
|
||||
// advance the F circuit state
|
||||
z_i = z_i1.clone();
|
||||
z_i1 = F_circuit.step_native(z_i.clone()).unwrap();
|
||||
U_i = U_i1.clone();
|
||||
W_i = W_i1.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
567
folding-schemes/src/folding/nova/cyclefold.rs
Normal file
567
folding-schemes/src/folding/nova/cyclefold.rs
Normal file
@@ -0,0 +1,567 @@
|
||||
/// contains [CycleFold](https://eprint.iacr.org/2023/1192.pdf) related circuits
|
||||
use ark_crypto_primitives::sponge::{
|
||||
constraints::CryptographicSpongeVar,
|
||||
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
|
||||
Absorb, CryptographicSponge,
|
||||
};
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
bits::uint8::UInt8,
|
||||
boolean::Boolean,
|
||||
eq::EqGadget,
|
||||
fields::{fp::FpVar, nonnative::NonNativeFieldVar},
|
||||
groups::GroupOpsBounds,
|
||||
prelude::CurveVar,
|
||||
ToBytesGadget,
|
||||
};
|
||||
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
use ark_std::fmt::Debug;
|
||||
use ark_std::Zero;
|
||||
use core::{borrow::Borrow, marker::PhantomData};
|
||||
|
||||
use super::circuits::CF2;
|
||||
use super::CommittedInstance;
|
||||
use crate::constants::N_BITS_RO;
|
||||
use crate::Error;
|
||||
|
||||
// publi inputs length for the CycleFoldCircuit, |[u_i, U_i, U_{i+1}]|
|
||||
pub const CF_IO_LEN: usize = 12;
|
||||
|
||||
/// CycleFoldCommittedInstanceVar is the CycleFold CommittedInstance representation in the Nova
|
||||
/// circuit.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CycleFoldCommittedInstanceVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
|
||||
where
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
_c: PhantomData<C>,
|
||||
pub cmE: GC,
|
||||
pub u: NonNativeFieldVar<C::ScalarField, CF2<C>>,
|
||||
pub cmW: GC,
|
||||
pub x: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>,
|
||||
}
|
||||
impl<C, GC> AllocVar<CommittedInstance<C>, CF2<C>> for CycleFoldCommittedInstanceVar<C, GC>
|
||||
where
|
||||
C: CurveGroup,
|
||||
GC: CurveVar<C, CF2<C>>,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
fn new_variable<T: Borrow<CommittedInstance<C>>>(
|
||||
cs: impl Into<Namespace<CF2<C>>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?;
|
||||
let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?;
|
||||
let u = NonNativeFieldVar::<C::ScalarField, CF2<C>>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(val.borrow().u),
|
||||
mode,
|
||||
)?;
|
||||
let x = Vec::<NonNativeFieldVar<C::ScalarField, CF2<C>>>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(val.borrow().x.clone()),
|
||||
mode,
|
||||
)?;
|
||||
|
||||
Ok(Self {
|
||||
_c: PhantomData,
|
||||
cmE,
|
||||
u,
|
||||
cmW,
|
||||
x,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// CommittedInstanceInCycleFoldVar represents the Nova CommittedInstance in the CycleFold circuit,
|
||||
/// where the commitments to E and W (cmW and cmW) from the CommittedInstance on the E2,
|
||||
/// represented as native points, which are folded on the auxiliary curve constraints field (E2::Fr
|
||||
/// = E1::Fq).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CommittedInstanceInCycleFoldVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
|
||||
where
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
_c: PhantomData<C>,
|
||||
pub cmE: GC,
|
||||
pub cmW: GC,
|
||||
}
|
||||
|
||||
impl<C, GC> AllocVar<CommittedInstance<C>, CF2<C>> for CommittedInstanceInCycleFoldVar<C, GC>
|
||||
where
|
||||
C: CurveGroup,
|
||||
GC: CurveVar<C, CF2<C>>,
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
fn new_variable<T: Borrow<CommittedInstance<C>>>(
|
||||
cs: impl Into<Namespace<CF2<C>>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?;
|
||||
let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?;
|
||||
|
||||
Ok(Self {
|
||||
_c: PhantomData,
|
||||
cmE,
|
||||
cmW,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// NIFSinCycleFoldGadget performs the Nova NIFS.V elliptic curve points relation checks in the other
|
||||
/// curve (natively) following [CycleFold](https://eprint.iacr.org/2023/1192.pdf).
|
||||
pub struct NIFSinCycleFoldGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
|
||||
_c: PhantomData<C>,
|
||||
_gc: PhantomData<GC>,
|
||||
}
|
||||
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> NIFSinCycleFoldGadget<C, GC>
|
||||
where
|
||||
C: CurveGroup,
|
||||
GC: CurveVar<C, CF2<C>>,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
pub fn verify(
|
||||
r_bits: Vec<Boolean<CF2<C>>>,
|
||||
cmT: GC,
|
||||
ci1: CommittedInstanceInCycleFoldVar<C, GC>,
|
||||
ci2: CommittedInstanceInCycleFoldVar<C, GC>,
|
||||
ci3: CommittedInstanceInCycleFoldVar<C, GC>,
|
||||
) -> Result<Boolean<CF2<C>>, SynthesisError> {
|
||||
// cm(E) check: ci3.cmE == ci1.cmE + r * cmT + r^2 * ci2.cmE
|
||||
let first_check = ci3.cmE.is_eq(
|
||||
&((ci2.cmE.scalar_mul_le(r_bits.iter())? + cmT).scalar_mul_le(r_bits.iter())?
|
||||
+ ci1.cmE),
|
||||
)?;
|
||||
// cm(W) check: ci3.cmW == ci1.cmW + r * ci2.cmW
|
||||
let second_check = ci3
|
||||
.cmW
|
||||
.is_eq(&(ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?))?;
|
||||
|
||||
first_check.and(&second_check)
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the gadget used in the AugmentedFCircuit to verify the CycleFold instances folding,
|
||||
/// which checks the correct RLC of u,x,cmE,cmW (hence the name containing 'Full', since it checks
|
||||
/// all the RLC values, not only the native ones). It assumes that ci2.cmE=0, ci2.u=1.
|
||||
pub struct NIFSFullGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
|
||||
_c: PhantomData<C>,
|
||||
_gc: PhantomData<GC>,
|
||||
}
|
||||
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> NIFSFullGadget<C, GC>
|
||||
where
|
||||
C: CurveGroup,
|
||||
GC: CurveVar<C, CF2<C>>,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
pub fn verify(
|
||||
r_bits: Vec<Boolean<CF2<C>>>,
|
||||
r_nonnat: NonNativeFieldVar<C::ScalarField, CF2<C>>,
|
||||
cmT: GC,
|
||||
// ci1 is assumed to be always with cmE=0, u=1 (checks done previous to this method)
|
||||
ci1: CycleFoldCommittedInstanceVar<C, GC>,
|
||||
ci2: CycleFoldCommittedInstanceVar<C, GC>,
|
||||
ci3: CycleFoldCommittedInstanceVar<C, GC>,
|
||||
) -> Result<Boolean<CF2<C>>, SynthesisError> {
|
||||
// cm(E) check: ci3.cmE == ci1.cmE + r * cmT (ci2.cmE=0)
|
||||
let first_check = ci3
|
||||
.cmE
|
||||
.is_eq(&(cmT.scalar_mul_le(r_bits.iter())? + ci1.cmE))?;
|
||||
|
||||
// cm(W) check: ci3.cmW == ci1.cmW + r * ci2.cmW
|
||||
let second_check = ci3
|
||||
.cmW
|
||||
.is_eq(&(ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?))?;
|
||||
|
||||
let u_rlc: NonNativeFieldVar<C::ScalarField, CF2<C>> = ci1.u + r_nonnat.clone();
|
||||
let third_check = u_rlc.is_eq(&ci3.u)?;
|
||||
|
||||
// ensure that: ci3.x == ci1.x + r * ci2.x
|
||||
let x_rlc: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>> = ci1
|
||||
.x
|
||||
.iter()
|
||||
.zip(ci2.x)
|
||||
.map(|(a, b)| a + &r_nonnat * &b)
|
||||
.collect::<Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>>();
|
||||
let fourth_check = x_rlc.is_eq(&ci3.x)?;
|
||||
|
||||
first_check
|
||||
.and(&second_check)?
|
||||
.and(&third_check)?
|
||||
.and(&fourth_check)
|
||||
}
|
||||
}
|
||||
|
||||
/// ChallengeGadget computes the RO challenge used for the CycleFold instances NIFS, it contains a
|
||||
/// rust-native and a in-circuit compatible versions.
|
||||
pub struct CycleFoldChallengeGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
|
||||
_c: PhantomData<C>, // Nova's Curve2, the one used for the CycleFold circuit
|
||||
_gc: PhantomData<GC>,
|
||||
}
|
||||
impl<C, GC> CycleFoldChallengeGadget<C, GC>
|
||||
where
|
||||
C: CurveGroup,
|
||||
GC: CurveVar<C, CF2<C>>,
|
||||
<C as CurveGroup>::BaseField: PrimeField,
|
||||
<C as CurveGroup>::BaseField: Absorb,
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
pub fn get_challenge_native(
|
||||
poseidon_config: &PoseidonConfig<C::BaseField>,
|
||||
u_i: CommittedInstance<C>,
|
||||
U_i: CommittedInstance<C>,
|
||||
cmT: C,
|
||||
) -> Result<Vec<bool>, Error> {
|
||||
let mut sponge = PoseidonSponge::<C::BaseField>::new(poseidon_config);
|
||||
|
||||
let u_i_cmE_bytes = point_to_bytes(u_i.cmE);
|
||||
let u_i_cmW_bytes = point_to_bytes(u_i.cmW);
|
||||
let U_i_cmE_bytes = point_to_bytes(U_i.cmE);
|
||||
let U_i_cmW_bytes = point_to_bytes(U_i.cmW);
|
||||
let cmT_bytes = point_to_bytes(cmT);
|
||||
|
||||
let mut u_i_u_bytes = Vec::new();
|
||||
u_i.u.serialize_uncompressed(&mut u_i_u_bytes)?;
|
||||
let mut u_i_x_bytes = Vec::new();
|
||||
u_i.x.serialize_uncompressed(&mut u_i_x_bytes)?;
|
||||
u_i_x_bytes = u_i_x_bytes[8..].to_vec();
|
||||
let mut U_i_u_bytes = Vec::new();
|
||||
U_i.u.serialize_uncompressed(&mut U_i_u_bytes)?;
|
||||
let mut U_i_x_bytes = Vec::new();
|
||||
U_i.x.serialize_uncompressed(&mut U_i_x_bytes)?;
|
||||
U_i_x_bytes = U_i_x_bytes[8..].to_vec();
|
||||
|
||||
let input: Vec<u8> = [
|
||||
u_i_cmE_bytes,
|
||||
u_i_u_bytes,
|
||||
u_i_cmW_bytes,
|
||||
u_i_x_bytes,
|
||||
U_i_cmE_bytes,
|
||||
U_i_u_bytes,
|
||||
U_i_cmW_bytes,
|
||||
U_i_x_bytes,
|
||||
cmT_bytes,
|
||||
]
|
||||
.concat();
|
||||
sponge.absorb(&input);
|
||||
let bits = sponge.squeeze_bits(N_BITS_RO);
|
||||
Ok(bits)
|
||||
}
|
||||
// compatible with the native get_challenge_native
|
||||
pub fn get_challenge_gadget(
|
||||
cs: ConstraintSystemRef<C::BaseField>,
|
||||
poseidon_config: &PoseidonConfig<C::BaseField>,
|
||||
u_i: CycleFoldCommittedInstanceVar<C, GC>,
|
||||
U_i: CycleFoldCommittedInstanceVar<C, GC>,
|
||||
cmT: GC,
|
||||
) -> Result<Vec<Boolean<C::BaseField>>, SynthesisError> {
|
||||
let mut sponge = PoseidonSpongeVar::<C::BaseField>::new(cs, poseidon_config);
|
||||
|
||||
let u_i_x_bytes: Vec<UInt8<CF2<C>>> = u_i
|
||||
.x
|
||||
.iter()
|
||||
.flat_map(|e| e.to_bytes().unwrap_or(vec![]))
|
||||
.collect::<Vec<UInt8<CF2<C>>>>();
|
||||
let U_i_x_bytes: Vec<UInt8<CF2<C>>> = U_i
|
||||
.x
|
||||
.iter()
|
||||
.flat_map(|e| e.to_bytes().unwrap_or(vec![]))
|
||||
.collect::<Vec<UInt8<CF2<C>>>>();
|
||||
|
||||
let input: Vec<UInt8<CF2<C>>> = [
|
||||
u_i.cmE.to_bytes()?,
|
||||
u_i.u.to_bytes()?,
|
||||
u_i.cmW.to_bytes()?,
|
||||
u_i_x_bytes,
|
||||
U_i.cmE.to_bytes()?,
|
||||
U_i.u.to_bytes()?,
|
||||
U_i.cmW.to_bytes()?,
|
||||
U_i_x_bytes,
|
||||
cmT.to_bytes()?,
|
||||
// TODO instead of bytes, use field elements, but needs x,y coordinates from
|
||||
// u_i.{cmE,cmW}, U_i.{cmE,cmW}, cmT. Depends exposing x,y coordinates of GC. Issue to
|
||||
// keep track of this:
|
||||
// https://github.com/privacy-scaling-explorations/folding-schemes/issues/44
|
||||
]
|
||||
.concat();
|
||||
sponge.absorb(&input)?;
|
||||
let bits = sponge.squeeze_bits(N_BITS_RO)?;
|
||||
Ok(bits)
|
||||
}
|
||||
}
|
||||
|
||||
/// returns the bytes being compatible with the ark_r1cs_std `.to_bytes` approach
|
||||
fn point_to_bytes<C: CurveGroup>(p: C) -> Vec<u8> {
|
||||
let l = p.uncompressed_size();
|
||||
let mut b = Vec::new();
|
||||
p.serialize_uncompressed(&mut b).unwrap();
|
||||
b[l - 1] = 0;
|
||||
if p.is_zero() {
|
||||
b[l / 2] = 1;
|
||||
b[l - 1] = 1;
|
||||
}
|
||||
b
|
||||
}
|
||||
|
||||
/// CycleFoldCircuit contains the constraints that check the correct fold of the committed
|
||||
/// instances from Curve1. Namely, it checks the random linear combinations of the elliptic curve
|
||||
/// (Curve1) points of u_i, U_i leading to U_{i+1}
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CycleFoldCircuit<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
|
||||
pub _gc: PhantomData<GC>,
|
||||
pub r_bits: Option<Vec<bool>>,
|
||||
pub cmT: Option<C>,
|
||||
// u_i,U_i,U_i1 are the nova instances from AugmentedFCircuit which will be (their elliptic
|
||||
// curve points) checked natively in CycleFoldCircuit
|
||||
pub u_i: Option<CommittedInstance<C>>,
|
||||
pub U_i: Option<CommittedInstance<C>>,
|
||||
pub U_i1: Option<CommittedInstance<C>>,
|
||||
pub x: Option<Vec<CF2<C>>>, // public inputs (cf_u_{i+1}.x)
|
||||
}
|
||||
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> CycleFoldCircuit<C, GC> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
_gc: PhantomData,
|
||||
r_bits: None,
|
||||
cmT: None,
|
||||
u_i: None,
|
||||
U_i: None,
|
||||
U_i1: None,
|
||||
x: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<C, GC> ConstraintSynthesizer<CF2<C>> for CycleFoldCircuit<C, GC>
|
||||
where
|
||||
C: CurveGroup,
|
||||
GC: CurveVar<C, CF2<C>>,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
|
||||
{
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<CF2<C>>) -> Result<(), SynthesisError> {
|
||||
let r_bits: Vec<Boolean<CF2<C>>> = Vec::new_witness(cs.clone(), || {
|
||||
Ok(self.r_bits.unwrap_or(vec![false; N_BITS_RO]))
|
||||
})?;
|
||||
let cmT = GC::new_witness(cs.clone(), || Ok(self.cmT.unwrap_or(C::zero())))?;
|
||||
|
||||
let u_dummy_native = CommittedInstance::<C>::dummy(1);
|
||||
|
||||
let u_i = CommittedInstanceInCycleFoldVar::<C, GC>::new_witness(cs.clone(), || {
|
||||
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let U_i = CommittedInstanceInCycleFoldVar::<C, GC>::new_witness(cs.clone(), || {
|
||||
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let U_i1 = CommittedInstanceInCycleFoldVar::<C, GC>::new_witness(cs.clone(), || {
|
||||
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let _x = Vec::<FpVar<CF2<C>>>::new_input(cs.clone(), || {
|
||||
Ok(self.x.unwrap_or(vec![CF2::<C>::zero(); CF_IO_LEN]))
|
||||
})?;
|
||||
#[cfg(test)]
|
||||
assert_eq!(_x.len(), CF_IO_LEN); // non-constrained sanity check
|
||||
|
||||
// fold the original Nova instances natively in CycleFold
|
||||
let v =
|
||||
NIFSinCycleFoldGadget::<C, GC>::verify(r_bits.clone(), cmT, u_i.clone(), U_i, U_i1)?;
|
||||
v.enforce_equal(&Boolean::TRUE)?;
|
||||
|
||||
// check that x == [u_i, U_i, U_{i+1}], check that the cmW & cmW from u_i, U_i, U_{i+1} in
|
||||
// the CycleFoldCircuit are the sames used in the public inputs 'x', which come from the
|
||||
// AugmentedFCircuit.
|
||||
// TODO: Issue to keep track of this: https://github.com/privacy-scaling-explorations/folding-schemes/issues/44
|
||||
// and https://github.com/privacy-scaling-explorations/folding-schemes/issues/48
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_ff::BigInteger;
|
||||
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
|
||||
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
|
||||
use ark_relations::r1cs::ConstraintSystem;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs;
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
|
||||
#[test]
|
||||
fn test_committed_instance_cyclefold_var() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
|
||||
let ci = CommittedInstance::<Projective> {
|
||||
cmE: Projective::rand(&mut rng),
|
||||
u: Fr::rand(&mut rng),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: vec![Fr::rand(&mut rng); 1],
|
||||
};
|
||||
|
||||
// check the instantiation of the CycleFold side:
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
let ciVar =
|
||||
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci.clone())
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(ciVar.cmE.value().unwrap(), ci.cmE);
|
||||
assert_eq!(ciVar.cmW.value().unwrap(), ci.cmW);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nifs_gadget_cyclefold() {
|
||||
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, _) = prepare_simple_fold_inputs();
|
||||
|
||||
// cs is the Constraint System on the Curve Cycle auxiliary curve constraints field
|
||||
// (E2::Fr)
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
|
||||
let r_bitsVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
|
||||
|
||||
let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap();
|
||||
let ci1Var =
|
||||
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci1.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let ci2Var =
|
||||
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci2.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let ci3Var =
|
||||
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci3.clone())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let nifs_cf_check = NIFSinCycleFoldGadget::<Projective, GVar>::verify(
|
||||
r_bitsVar, cmTVar, ci1Var, ci2Var, ci3Var,
|
||||
)
|
||||
.unwrap();
|
||||
nifs_cf_check.enforce_equal(&Boolean::<Fq>::TRUE).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
dbg!(cs.num_constraints());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nifs_full_gadget() {
|
||||
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, r_Fr) = prepare_simple_fold_inputs();
|
||||
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
|
||||
let r_nonnatVar =
|
||||
NonNativeFieldVar::<Fr, Fq>::new_witness(cs.clone(), || Ok(r_Fr)).unwrap();
|
||||
let r_bitsVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
|
||||
|
||||
let ci1Var =
|
||||
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci1.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let ci2Var =
|
||||
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci2.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let ci3Var =
|
||||
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(ci3.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap();
|
||||
|
||||
let nifs_check = NIFSFullGadget::<Projective, GVar>::verify(
|
||||
r_bitsVar,
|
||||
r_nonnatVar,
|
||||
cmTVar,
|
||||
ci1Var,
|
||||
ci2Var,
|
||||
ci3Var,
|
||||
)
|
||||
.unwrap();
|
||||
nifs_check.enforce_equal(&Boolean::<Fq>::TRUE).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
dbg!(cs.num_constraints());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cyclefold_challenge_gadget() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fq>();
|
||||
|
||||
let u_i = CommittedInstance::<Projective> {
|
||||
cmE: Projective::zero(), // zero on purpose, so we test also the zero point case
|
||||
u: Fr::zero(),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: std::iter::repeat_with(|| Fr::rand(&mut rng))
|
||||
.take(CF_IO_LEN)
|
||||
.collect(),
|
||||
};
|
||||
let U_i = CommittedInstance::<Projective> {
|
||||
cmE: Projective::rand(&mut rng),
|
||||
u: Fr::rand(&mut rng),
|
||||
cmW: Projective::rand(&mut rng),
|
||||
x: std::iter::repeat_with(|| Fr::rand(&mut rng))
|
||||
.take(CF_IO_LEN)
|
||||
.collect(),
|
||||
};
|
||||
let cmT = Projective::rand(&mut rng);
|
||||
|
||||
// compute the challenge natively
|
||||
let r_bits = CycleFoldChallengeGadget::<Projective, GVar>::get_challenge_native(
|
||||
&poseidon_config,
|
||||
u_i.clone(),
|
||||
U_i.clone(),
|
||||
cmT,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
let u_iVar =
|
||||
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(u_i.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let U_iVar =
|
||||
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
|
||||
Ok(U_i.clone())
|
||||
})
|
||||
.unwrap();
|
||||
let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap();
|
||||
|
||||
let r_bitsVar = CycleFoldChallengeGadget::<Projective, GVar>::get_challenge_gadget(
|
||||
cs.clone(),
|
||||
&poseidon_config,
|
||||
u_iVar,
|
||||
U_iVar,
|
||||
cmTVar,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
|
||||
// check that the natively computed and in-circuit computed hashes match
|
||||
let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap();
|
||||
let r = Fq::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
|
||||
assert_eq!(rVar.value().unwrap(), r);
|
||||
assert_eq!(r_bitsVar.value().unwrap(), r_bits);
|
||||
}
|
||||
}
|
||||
205
folding-schemes/src/folding/nova/decider_eth.rs
Normal file
205
folding-schemes/src/folding/nova/decider_eth.rs
Normal file
@@ -0,0 +1,205 @@
|
||||
/// This file implements the onchain (Ethereum's EVM) decider.
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_ff::PrimeField;
|
||||
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar};
|
||||
use ark_snark::SNARK;
|
||||
use ark_std::rand::CryptoRng;
|
||||
use ark_std::rand::RngCore;
|
||||
use core::marker::PhantomData;
|
||||
|
||||
pub use super::decider_eth_circuit::DeciderEthCircuit;
|
||||
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentProver};
|
||||
use crate::folding::circuits::nonnative::point_to_nonnative_limbs_custom_opt;
|
||||
use crate::folding::nova::{circuits::CF2, CommittedInstance, Nova};
|
||||
use crate::frontend::FCircuit;
|
||||
use crate::Error;
|
||||
use crate::{Decider as DeciderTrait, FoldingScheme};
|
||||
use ark_r1cs_std::fields::nonnative::params::OptimizationType;
|
||||
|
||||
/// Onchain Decider, for ethereum use cases
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Decider<C1, GC1, C2, GC2, FC, CP1, CP2, S, FS> {
|
||||
_c1: PhantomData<C1>,
|
||||
_gc1: PhantomData<GC1>,
|
||||
_c2: PhantomData<C2>,
|
||||
_gc2: PhantomData<GC2>,
|
||||
_fc: PhantomData<FC>,
|
||||
_cp1: PhantomData<CP1>,
|
||||
_cp2: PhantomData<CP2>,
|
||||
_s: PhantomData<S>,
|
||||
_fs: PhantomData<FS>,
|
||||
}
|
||||
|
||||
impl<C1, GC1, C2, GC2, FC, CP1, CP2, S, FS> DeciderTrait<C1, C2, FC, FS>
|
||||
for Decider<C1, GC1, C2, GC2, FC, CP1, CP2, S, FS>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
C2: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<C1::ScalarField>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
// enforce that the CP2 is Pedersen commitment, since we're at Ethereum's EVM decider
|
||||
CP2: CommitmentProver<C2, Params = PedersenParams<C2>>,
|
||||
S: SNARK<C1::ScalarField>,
|
||||
FS: FoldingScheme<C1, C2, FC>,
|
||||
<C1 as CurveGroup>::BaseField: PrimeField,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
|
||||
// constrain FS into Nova, since this is a Decider specificly for Nova
|
||||
Nova<C1, GC1, C2, GC2, FC, CP1, CP2>: From<FS>,
|
||||
{
|
||||
type ProverParam = S::ProvingKey;
|
||||
type Proof = S::Proof;
|
||||
type VerifierParam = S::VerifyingKey;
|
||||
type PublicInput = Vec<C1::ScalarField>;
|
||||
type CommittedInstanceWithWitness = ();
|
||||
type CommittedInstance = CommittedInstance<C1>;
|
||||
|
||||
fn prove(
|
||||
pp: &Self::ProverParam,
|
||||
mut rng: impl RngCore + CryptoRng,
|
||||
folding_scheme: FS,
|
||||
) -> Result<Self::Proof, Error> {
|
||||
let circuit =
|
||||
DeciderEthCircuit::<C1, GC1, C2, GC2, CP1, CP2>::from_nova::<FC>(folding_scheme.into());
|
||||
|
||||
let proof = S::prove(pp, circuit.clone(), &mut rng).unwrap();
|
||||
|
||||
Ok(proof)
|
||||
}
|
||||
|
||||
fn verify(
|
||||
vp: &Self::VerifierParam,
|
||||
i: C1::ScalarField,
|
||||
z_0: Vec<C1::ScalarField>,
|
||||
z_i: Vec<C1::ScalarField>,
|
||||
running_instance: &Self::CommittedInstance,
|
||||
proof: Self::Proof,
|
||||
) -> Result<bool, Error> {
|
||||
let (cmE_x, cmE_y) = point_to_nonnative_limbs_custom_opt::<C1>(
|
||||
running_instance.cmE,
|
||||
OptimizationType::Constraints,
|
||||
)?;
|
||||
let (cmW_x, cmW_y) = point_to_nonnative_limbs_custom_opt::<C1>(
|
||||
running_instance.cmW,
|
||||
OptimizationType::Constraints,
|
||||
)?;
|
||||
let public_input: Vec<C1::ScalarField> = vec![
|
||||
vec![i],
|
||||
z_0,
|
||||
z_i,
|
||||
vec![running_instance.u],
|
||||
running_instance.x.clone(),
|
||||
cmE_x,
|
||||
cmE_y,
|
||||
cmW_x,
|
||||
cmW_y,
|
||||
]
|
||||
.concat();
|
||||
S::verify(vp, &public_input, &proof).map_err(|e| Error::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_groth16::Groth16;
|
||||
use ark_mnt4_298::{constraints::G1Var as GVar, Fr, G1Projective as Projective, MNT4_298};
|
||||
use ark_mnt6_298::{constraints::G1Var as GVar2, G1Projective as Projective2};
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::commitment::pedersen::Pedersen;
|
||||
use crate::folding::nova::{get_pedersen_params_len, ProverParams};
|
||||
use crate::frontend::tests::CubicFCircuit;
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
|
||||
// Note: since we're testing a big circuit, this test takes a bit more of computation and time,
|
||||
// do not run in the normal CI.
|
||||
// To run the test use `--ignored` flag, eg. `cargo test -- --ignored`
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_decider() {
|
||||
type NOVA = Nova<
|
||||
Projective,
|
||||
GVar,
|
||||
Projective2,
|
||||
GVar2,
|
||||
CubicFCircuit<Fr>,
|
||||
Pedersen<Projective>,
|
||||
Pedersen<Projective2>,
|
||||
>;
|
||||
type DECIDER = Decider<
|
||||
Projective,
|
||||
GVar,
|
||||
Projective2,
|
||||
GVar2,
|
||||
CubicFCircuit<Fr>,
|
||||
Pedersen<Projective>,
|
||||
Pedersen<Projective2>,
|
||||
Groth16<MNT4_298>, // here we define the Snark to use in the decider
|
||||
NOVA, // here we define the FoldingScheme to use
|
||||
>;
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let F_circuit = CubicFCircuit::<Fr>::new(());
|
||||
let z_0 = vec![Fr::from(3_u32)];
|
||||
|
||||
let (cm_len, cf_cm_len) =
|
||||
get_pedersen_params_len::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>>(
|
||||
&poseidon_config,
|
||||
F_circuit,
|
||||
)
|
||||
.unwrap();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
|
||||
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
|
||||
|
||||
let start = Instant::now();
|
||||
let prover_params =
|
||||
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
cm_params: pedersen_params,
|
||||
cf_cm_params: cf_pedersen_params,
|
||||
};
|
||||
println!("generating pedersen params, {:?}", start.elapsed());
|
||||
|
||||
// use Nova as FoldingScheme
|
||||
let start = Instant::now();
|
||||
let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap();
|
||||
println!("Nova initialized, {:?}", start.elapsed());
|
||||
let start = Instant::now();
|
||||
nova.prove_step().unwrap();
|
||||
println!("prove_step, {:?}", start.elapsed());
|
||||
|
||||
// generate Groth16 setup
|
||||
let circuit = DeciderEthCircuit::<
|
||||
Projective,
|
||||
GVar,
|
||||
Projective2,
|
||||
GVar2,
|
||||
Pedersen<Projective>,
|
||||
Pedersen<Projective2>,
|
||||
>::from_nova::<CubicFCircuit<Fr>>(nova.clone());
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let start = Instant::now();
|
||||
let (pk, vk) =
|
||||
Groth16::<MNT4_298>::circuit_specific_setup(circuit.clone(), &mut rng).unwrap();
|
||||
println!("Groth16 setup, {:?}", start.elapsed());
|
||||
|
||||
// decider proof generation
|
||||
let start = Instant::now();
|
||||
let proof = DECIDER::prove(&pk, rng, nova.clone()).unwrap();
|
||||
println!("Decider Groth16 prove, {:?}", start.elapsed());
|
||||
|
||||
// decider proof verification
|
||||
let verified = DECIDER::verify(&vk, nova.i, nova.z_0, nova.z_i, &nova.U_i, proof).unwrap();
|
||||
assert!(verified);
|
||||
}
|
||||
}
|
||||
686
folding-schemes/src/folding/nova/decider_eth_circuit.rs
Normal file
686
folding-schemes/src/folding/nova/decider_eth_circuit.rs
Normal file
@@ -0,0 +1,686 @@
|
||||
/// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases,
|
||||
/// other more efficient approaches can be used.
|
||||
use ark_crypto_primitives::crh::poseidon::constraints::CRHParametersVar;
|
||||
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_ff::PrimeField;
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
boolean::Boolean,
|
||||
eq::EqGadget,
|
||||
fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar},
|
||||
groups::GroupOpsBounds,
|
||||
prelude::CurveVar,
|
||||
ToConstraintFieldGadget,
|
||||
};
|
||||
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
|
||||
use ark_std::{One, Zero};
|
||||
use core::{borrow::Borrow, marker::PhantomData};
|
||||
|
||||
use crate::ccs::r1cs::R1CS;
|
||||
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentProver};
|
||||
use crate::folding::nova::{
|
||||
circuits::{CommittedInstanceVar, CF1, CF2},
|
||||
CommittedInstance, Nova, Witness,
|
||||
};
|
||||
use crate::frontend::FCircuit;
|
||||
use crate::utils::gadgets::{
|
||||
hadamard, mat_vec_mul_sparse, vec_add, vec_scalar_mul, SparseMatrixVar,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RelaxedR1CSGadget<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> {
|
||||
_f: PhantomData<F>,
|
||||
_cf: PhantomData<CF>,
|
||||
_fv: PhantomData<FV>,
|
||||
}
|
||||
impl<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> RelaxedR1CSGadget<F, CF, FV> {
|
||||
/// performs the RelaxedR1CS check (Az∘Bz==uCz+E)
|
||||
pub fn check(
|
||||
r1cs: R1CSVar<F, CF, FV>,
|
||||
E: Vec<FV>,
|
||||
u: FV,
|
||||
z: Vec<FV>,
|
||||
) -> Result<(), SynthesisError> {
|
||||
let Az = mat_vec_mul_sparse(r1cs.A, z.clone());
|
||||
let Bz = mat_vec_mul_sparse(r1cs.B, z.clone());
|
||||
let Cz = mat_vec_mul_sparse(r1cs.C, z.clone());
|
||||
let uCz = vec_scalar_mul(&Cz, &u);
|
||||
let uCzE = vec_add(&uCz, &E)?;
|
||||
let AzBz = hadamard(&Az, &Bz)?;
|
||||
for i in 0..AzBz.len() {
|
||||
AzBz[i].enforce_equal(&uCzE[i].clone())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct R1CSVar<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> {
|
||||
_f: PhantomData<F>,
|
||||
_cf: PhantomData<CF>,
|
||||
_fv: PhantomData<FV>,
|
||||
pub A: SparseMatrixVar<F, CF, FV>,
|
||||
pub B: SparseMatrixVar<F, CF, FV>,
|
||||
pub C: SparseMatrixVar<F, CF, FV>,
|
||||
}
|
||||
|
||||
impl<F, CF, FV> AllocVar<R1CS<F>, CF> for R1CSVar<F, CF, FV>
|
||||
where
|
||||
F: PrimeField,
|
||||
CF: PrimeField,
|
||||
FV: FieldVar<F, CF>,
|
||||
{
|
||||
fn new_variable<T: Borrow<R1CS<F>>>(
|
||||
cs: impl Into<Namespace<CF>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
_mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let A = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().A)?;
|
||||
let B = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().B)?;
|
||||
let C = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().C)?;
|
||||
|
||||
Ok(Self {
|
||||
_f: PhantomData,
|
||||
_cf: PhantomData,
|
||||
_fv: PhantomData,
|
||||
A,
|
||||
B,
|
||||
C,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// In-circuit representation of the Witness associated to the CommittedInstance.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WitnessVar<C: CurveGroup> {
|
||||
pub E: Vec<FpVar<C::ScalarField>>,
|
||||
pub rE: FpVar<C::ScalarField>,
|
||||
pub W: Vec<FpVar<C::ScalarField>>,
|
||||
pub rW: FpVar<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C> AllocVar<Witness<C>, CF1<C>> for WitnessVar<C>
|
||||
where
|
||||
C: CurveGroup,
|
||||
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
|
||||
{
|
||||
fn new_variable<T: Borrow<Witness<C>>>(
|
||||
cs: impl Into<Namespace<CF1<C>>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let E: Vec<FpVar<C::ScalarField>> =
|
||||
Vec::new_variable(cs.clone(), || Ok(val.borrow().E.clone()), mode)?;
|
||||
let rE =
|
||||
FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(val.borrow().rE), mode)?;
|
||||
|
||||
let W: Vec<FpVar<C::ScalarField>> =
|
||||
Vec::new_variable(cs.clone(), || Ok(val.borrow().W.clone()), mode)?;
|
||||
let rW =
|
||||
FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(val.borrow().rW), mode)?;
|
||||
|
||||
Ok(Self { E, rE, W, rW })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// In-circuit representation of the Witness associated to the CommittedInstance, but with
|
||||
/// non-native representation, since it is used to represent the CycleFold witness.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CycleFoldWitnessVar<C: CurveGroup> {
|
||||
pub E: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>,
|
||||
pub rE: NonNativeFieldVar<C::ScalarField, CF2<C>>,
|
||||
pub W: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>,
|
||||
pub rW: NonNativeFieldVar<C::ScalarField, CF2<C>>,
|
||||
}
|
||||
|
||||
impl<C> AllocVar<Witness<C>, CF2<C>> for CycleFoldWitnessVar<C>
|
||||
where
|
||||
C: CurveGroup,
|
||||
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
|
||||
{
|
||||
fn new_variable<T: Borrow<Witness<C>>>(
|
||||
cs: impl Into<Namespace<CF2<C>>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let E: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>> =
|
||||
Vec::new_variable(cs.clone(), || Ok(val.borrow().E.clone()), mode)?;
|
||||
let rE = NonNativeFieldVar::<C::ScalarField, CF2<C>>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(val.borrow().rE),
|
||||
mode,
|
||||
)?;
|
||||
|
||||
let W: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>> =
|
||||
Vec::new_variable(cs.clone(), || Ok(val.borrow().W.clone()), mode)?;
|
||||
let rW = NonNativeFieldVar::<C::ScalarField, CF2<C>>::new_variable(
|
||||
cs.clone(),
|
||||
|| Ok(val.borrow().rW),
|
||||
mode,
|
||||
)?;
|
||||
|
||||
Ok(Self { E, rE, W, rW })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Circuit that implements the in-circuit checks needed for the onchain (Ethereum's EVM)
|
||||
/// verification.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DeciderEthCircuit<C1, GC1, C2, GC2, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
CP2: CommitmentProver<C2>,
|
||||
{
|
||||
_c1: PhantomData<C1>,
|
||||
_gc1: PhantomData<GC1>,
|
||||
_c2: PhantomData<C2>,
|
||||
_gc2: PhantomData<GC2>,
|
||||
_cp1: PhantomData<CP1>,
|
||||
_cp2: PhantomData<CP2>,
|
||||
|
||||
/// E vector's length of the Nova instance witness
|
||||
pub E_len: usize,
|
||||
/// E vector's length of the CycleFold instance witness
|
||||
pub cf_E_len: usize,
|
||||
/// R1CS of the Augmented Function circuit
|
||||
pub r1cs: R1CS<C1::ScalarField>,
|
||||
/// R1CS of the CycleFold circuit
|
||||
pub cf_r1cs: R1CS<C2::ScalarField>,
|
||||
/// CycleFold PedersenParams over C2
|
||||
pub cf_pedersen_params: PedersenParams<C2>,
|
||||
pub poseidon_config: PoseidonConfig<CF1<C1>>,
|
||||
pub i: Option<CF1<C1>>,
|
||||
/// initial state
|
||||
pub z_0: Option<Vec<C1::ScalarField>>,
|
||||
/// current i-th state
|
||||
pub z_i: Option<Vec<C1::ScalarField>>,
|
||||
/// Nova instances
|
||||
pub u_i: Option<CommittedInstance<C1>>,
|
||||
pub w_i: Option<Witness<C1>>,
|
||||
pub U_i: Option<CommittedInstance<C1>>,
|
||||
pub W_i: Option<Witness<C1>>,
|
||||
/// CycleFold running instance
|
||||
pub cf_U_i: Option<CommittedInstance<C2>>,
|
||||
pub cf_W_i: Option<Witness<C2>>,
|
||||
}
|
||||
impl<C1, GC1, C2, GC2, CP1, CP2> DeciderEthCircuit<C1, GC1, C2, GC2, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
C2: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
// enforce that the CP2 is Pedersen commitment, since we're at Ethereum's EVM decider
|
||||
CP2: CommitmentProver<C2, Params = PedersenParams<C2>>,
|
||||
{
|
||||
pub fn from_nova<FC: FCircuit<C1::ScalarField>>(
|
||||
nova: Nova<C1, GC1, C2, GC2, FC, CP1, CP2>,
|
||||
) -> Self {
|
||||
Self {
|
||||
_c1: PhantomData,
|
||||
_gc1: PhantomData,
|
||||
_c2: PhantomData,
|
||||
_gc2: PhantomData,
|
||||
_cp1: PhantomData,
|
||||
_cp2: PhantomData,
|
||||
|
||||
E_len: nova.W_i.E.len(),
|
||||
cf_E_len: nova.cf_W_i.E.len(),
|
||||
r1cs: nova.r1cs,
|
||||
cf_r1cs: nova.cf_r1cs,
|
||||
cf_pedersen_params: nova.cf_cm_params,
|
||||
poseidon_config: nova.poseidon_config,
|
||||
i: Some(nova.i),
|
||||
z_0: Some(nova.z_0),
|
||||
z_i: Some(nova.z_i),
|
||||
u_i: Some(nova.u_i),
|
||||
w_i: Some(nova.w_i),
|
||||
U_i: Some(nova.U_i),
|
||||
W_i: Some(nova.W_i),
|
||||
cf_U_i: Some(nova.cf_U_i),
|
||||
cf_W_i: Some(nova.cf_W_i),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C1, GC1, C2, GC2, CP1, CP2> ConstraintSynthesizer<CF1<C1>>
|
||||
for DeciderEthCircuit<C1, GC1, C2, GC2, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
C2: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
CP2: CommitmentProver<C2>,
|
||||
<C1 as CurveGroup>::BaseField: PrimeField,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
|
||||
{
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
|
||||
let r1cs =
|
||||
R1CSVar::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
|
||||
Ok(self.r1cs.clone())
|
||||
})?;
|
||||
|
||||
let i =
|
||||
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::<C1>::zero)))?;
|
||||
let z_0 = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
|
||||
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
|
||||
})?;
|
||||
let z_i = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
|
||||
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
|
||||
})?;
|
||||
|
||||
let u_dummy_native = CommittedInstance::<C1>::dummy(1);
|
||||
let w_dummy_native = Witness::<C1>::new(
|
||||
vec![C1::ScalarField::zero(); self.r1cs.A.n_cols - 2 /* (2=1+1, since u_i.x.len=1) */],
|
||||
self.E_len,
|
||||
);
|
||||
|
||||
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
|
||||
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let w_i = WitnessVar::<C1>::new_witness(cs.clone(), || {
|
||||
Ok(self.w_i.unwrap_or(w_dummy_native.clone()))
|
||||
})?;
|
||||
let U_i = CommittedInstanceVar::<C1>::new_input(cs.clone(), || {
|
||||
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
|
||||
})?;
|
||||
let W_i = WitnessVar::<C1>::new_witness(cs.clone(), || {
|
||||
Ok(self.W_i.unwrap_or(w_dummy_native.clone()))
|
||||
})?;
|
||||
|
||||
let crh_params = CRHParametersVar::<C1::ScalarField>::new_constant(
|
||||
cs.clone(),
|
||||
self.poseidon_config.clone(),
|
||||
)?;
|
||||
|
||||
// 1. check RelaxedR1CS of u_i
|
||||
let z_u: Vec<FpVar<CF1<C1>>> = [
|
||||
vec![FpVar::<CF1<C1>>::one()],
|
||||
u_i.x.to_vec(),
|
||||
w_i.W.to_vec(),
|
||||
]
|
||||
.concat();
|
||||
RelaxedR1CSGadget::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::check(
|
||||
r1cs.clone(),
|
||||
w_i.E,
|
||||
u_i.u.clone(),
|
||||
z_u,
|
||||
)?;
|
||||
|
||||
// 2. check RelaxedR1CS of U_i
|
||||
let z_U: Vec<FpVar<CF1<C1>>> =
|
||||
[vec![U_i.u.clone()], U_i.x.to_vec(), W_i.W.to_vec()].concat();
|
||||
RelaxedR1CSGadget::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::check(
|
||||
r1cs,
|
||||
W_i.E,
|
||||
U_i.u.clone(),
|
||||
z_U,
|
||||
)?;
|
||||
|
||||
// 3. u_i.cmE==cm(0), u_i.u==1
|
||||
// Here zero_x & zero_y are the x & y coordinates of the zero point affine representation.
|
||||
let zero_x = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
|
||||
cs.clone(),
|
||||
C1::BaseField::zero(),
|
||||
)?
|
||||
.to_constraint_field()?;
|
||||
let zero_y = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
|
||||
cs.clone(),
|
||||
C1::BaseField::one(),
|
||||
)?
|
||||
.to_constraint_field()?;
|
||||
(u_i.cmE.x.is_eq(&zero_x)?).enforce_equal(&Boolean::TRUE)?;
|
||||
(u_i.cmE.y.is_eq(&zero_y)?).enforce_equal(&Boolean::TRUE)?;
|
||||
(u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
|
||||
|
||||
// 4. u_i.x == H(i, z_0, z_i, U_i)
|
||||
let u_i_x = U_i
|
||||
.clone()
|
||||
.hash(&crh_params, i.clone(), z_0.clone(), z_i.clone())?;
|
||||
(u_i.x[0]).enforce_equal(&u_i_x)?;
|
||||
|
||||
// The following two checks (and their respective allocations) are disabled for normal
|
||||
// tests since they take ~24.5M constraints and would take several minutes (and RAM) to run
|
||||
// the test
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
// imports here instead of at the top of the file, so we avoid having multiple
|
||||
// `#[cfg(not(test))]`
|
||||
use crate::commitment::pedersen::PedersenGadget;
|
||||
use crate::folding::nova::cyclefold::{CycleFoldCommittedInstanceVar, CF_IO_LEN};
|
||||
use ark_r1cs_std::ToBitsGadget;
|
||||
|
||||
let cf_r1cs = R1CSVar::<
|
||||
C1::BaseField,
|
||||
CF1<C1>,
|
||||
NonNativeFieldVar<C1::BaseField, CF1<C1>>,
|
||||
>::new_witness(cs.clone(), || Ok(self.cf_r1cs.clone()))?;
|
||||
|
||||
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(CF_IO_LEN);
|
||||
let w_dummy_native = Witness::<C2>::new(
|
||||
vec![C2::ScalarField::zero(); self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l],
|
||||
self.cf_E_len,
|
||||
);
|
||||
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
|
||||
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
|
||||
})?;
|
||||
let cf_W_i = CycleFoldWitnessVar::<C2>::new_witness(cs.clone(), || {
|
||||
Ok(self.cf_W_i.unwrap_or(w_dummy_native.clone()))
|
||||
})?;
|
||||
|
||||
// 5. check Pedersen commitments of cf_U_i.{cmE, cmW}
|
||||
let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?;
|
||||
let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?;
|
||||
let cf_W_i_E_bits: Vec<Vec<Boolean<CF1<C1>>>> = cf_W_i
|
||||
.E
|
||||
.iter()
|
||||
.map(|E_i| E_i.to_bits_le().unwrap())
|
||||
.collect();
|
||||
let cf_W_i_W_bits: Vec<Vec<Boolean<CF1<C1>>>> = cf_W_i
|
||||
.W
|
||||
.iter()
|
||||
.map(|W_i| W_i.to_bits_le().unwrap())
|
||||
.collect();
|
||||
|
||||
let computed_cmE = PedersenGadget::<C2, GC2>::commit(
|
||||
H.clone(),
|
||||
G.clone(),
|
||||
cf_W_i_E_bits,
|
||||
cf_W_i.rE.to_bits_le()?,
|
||||
)?;
|
||||
cf_U_i.cmE.enforce_equal(&computed_cmE)?;
|
||||
let computed_cmW =
|
||||
PedersenGadget::<C2, GC2>::commit(H, G, cf_W_i_W_bits, cf_W_i.rW.to_bits_le()?)?;
|
||||
cf_U_i.cmW.enforce_equal(&computed_cmW)?;
|
||||
|
||||
// 6. check RelaxedR1CS of cf_U_i
|
||||
let cf_z_U: Vec<NonNativeFieldVar<C2::ScalarField, CF1<C1>>> =
|
||||
[vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
|
||||
RelaxedR1CSGadget::<
|
||||
C2::ScalarField,
|
||||
CF1<C1>,
|
||||
NonNativeFieldVar<C2::ScalarField, CF1<C1>>,
|
||||
>::check(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_crypto_primitives::crh::{
|
||||
sha256::{
|
||||
constraints::{Sha256Gadget, UnitVar},
|
||||
Sha256,
|
||||
},
|
||||
CRHScheme, CRHSchemeGadget,
|
||||
};
|
||||
use ark_ff::BigInteger;
|
||||
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
|
||||
use ark_r1cs_std::{
|
||||
alloc::AllocVar,
|
||||
bits::uint8::UInt8,
|
||||
eq::EqGadget,
|
||||
fields::{fp::FpVar, nonnative::NonNativeFieldVar},
|
||||
};
|
||||
use ark_relations::r1cs::ConstraintSystem;
|
||||
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
|
||||
|
||||
use crate::commitment::pedersen::Pedersen;
|
||||
use crate::folding::nova::{get_pedersen_params_len, ProverParams, VerifierParams};
|
||||
use crate::frontend::tests::{CubicFCircuit, CustomFCircuit, WrapperCircuit};
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
use crate::FoldingScheme;
|
||||
|
||||
use crate::ccs::r1cs::{extract_r1cs, extract_w_x};
|
||||
use crate::ccs::r1cs::{
|
||||
tests::{get_test_r1cs, get_test_z},
|
||||
R1CS,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_relaxed_r1cs_small_gadget_handcrafted() {
|
||||
let r1cs: R1CS<Fr> = get_test_r1cs();
|
||||
let rel_r1cs = r1cs.clone().relax();
|
||||
let z = get_test_z(3);
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
let zVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
|
||||
let EVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(rel_r1cs.E)).unwrap();
|
||||
let uVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(rel_r1cs.u)).unwrap();
|
||||
let r1csVar = R1CSVar::<Fr, Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
|
||||
|
||||
RelaxedR1CSGadget::<Fr, Fr, FpVar<Fr>>::check(r1csVar, EVar, uVar, zVar).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
}
|
||||
|
||||
// gets as input a circuit that implements the ConstraintSynthesizer trait, and that has been
|
||||
// initialized.
|
||||
fn test_relaxed_r1cs_gadget<CS: ConstraintSynthesizer<Fr>>(circuit: CS) {
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
circuit.generate_constraints(cs.clone()).unwrap();
|
||||
cs.finalize();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
|
||||
let cs = cs.into_inner().unwrap();
|
||||
|
||||
let r1cs = extract_r1cs::<Fr>(&cs);
|
||||
let (w, x) = extract_w_x::<Fr>(&cs);
|
||||
let z = [vec![Fr::one()], x, w].concat();
|
||||
r1cs.check_relation(&z).unwrap();
|
||||
|
||||
let relaxed_r1cs = r1cs.clone().relax();
|
||||
relaxed_r1cs.check_relation(&z).unwrap();
|
||||
|
||||
// set new CS for the circuit that checks the RelaxedR1CS of our original circuit
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
// prepare the inputs for our circuit
|
||||
let zVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
|
||||
let EVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(relaxed_r1cs.E)).unwrap();
|
||||
let uVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap();
|
||||
let r1csVar = R1CSVar::<Fr, Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
|
||||
|
||||
RelaxedR1CSGadget::<Fr, Fr, FpVar<Fr>>::check(r1csVar, EVar, uVar, zVar).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_relaxed_r1cs_small_gadget_arkworks() {
|
||||
let z_i = vec![Fr::from(3_u32)];
|
||||
let cubic_circuit = CubicFCircuit::<Fr>::new(());
|
||||
let circuit = WrapperCircuit::<Fr, CubicFCircuit<Fr>> {
|
||||
FC: cubic_circuit,
|
||||
z_i: Some(z_i.clone()),
|
||||
z_i1: Some(cubic_circuit.step_native(z_i).unwrap()),
|
||||
};
|
||||
|
||||
test_relaxed_r1cs_gadget(circuit);
|
||||
}
|
||||
|
||||
struct Sha256TestCircuit<F: PrimeField> {
|
||||
_f: PhantomData<F>,
|
||||
pub x: Vec<u8>,
|
||||
pub y: Vec<u8>,
|
||||
}
|
||||
impl<F: PrimeField> ConstraintSynthesizer<F> for Sha256TestCircuit<F> {
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> Result<(), SynthesisError> {
|
||||
let x = Vec::<UInt8<F>>::new_witness(cs.clone(), || Ok(self.x))?;
|
||||
let y = Vec::<UInt8<F>>::new_input(cs.clone(), || Ok(self.y))?;
|
||||
|
||||
let unitVar = UnitVar::default();
|
||||
let comp_y = <Sha256Gadget<F> as CRHSchemeGadget<Sha256, F>>::evaluate(&unitVar, &x)?;
|
||||
comp_y.0.enforce_equal(&y)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_relaxed_r1cs_medium_gadget_arkworks() {
|
||||
let x = Fr::from(5_u32).into_bigint().to_bytes_le();
|
||||
let y = <Sha256 as CRHScheme>::evaluate(&(), x.clone()).unwrap();
|
||||
|
||||
let circuit = Sha256TestCircuit::<Fr> {
|
||||
_f: PhantomData,
|
||||
x,
|
||||
y,
|
||||
};
|
||||
test_relaxed_r1cs_gadget(circuit);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_relaxed_r1cs_custom_circuit() {
|
||||
let n_constraints = 10_000;
|
||||
let custom_circuit = CustomFCircuit::<Fr>::new(n_constraints);
|
||||
let z_i = vec![Fr::from(5_u32)];
|
||||
let circuit = WrapperCircuit::<Fr, CustomFCircuit<Fr>> {
|
||||
FC: custom_circuit,
|
||||
z_i: Some(z_i.clone()),
|
||||
z_i1: Some(custom_circuit.step_native(z_i).unwrap()),
|
||||
};
|
||||
test_relaxed_r1cs_gadget(circuit);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_relaxed_r1cs_nonnative_circuit() {
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
// in practice we would use CycleFoldCircuit, but is a very big circuit (when computed
|
||||
// non-natively inside the RelaxedR1CS circuit), so in order to have a short test we use a
|
||||
// custom circuit.
|
||||
let custom_circuit = CustomFCircuit::<Fq>::new(10);
|
||||
let z_i = vec![Fq::from(5_u32)];
|
||||
let circuit = WrapperCircuit::<Fq, CustomFCircuit<Fq>> {
|
||||
FC: custom_circuit,
|
||||
z_i: Some(z_i.clone()),
|
||||
z_i1: Some(custom_circuit.step_native(z_i).unwrap()),
|
||||
};
|
||||
circuit.generate_constraints(cs.clone()).unwrap();
|
||||
cs.finalize();
|
||||
let cs = cs.into_inner().unwrap();
|
||||
let r1cs = extract_r1cs::<Fq>(&cs);
|
||||
let (w, x) = extract_w_x::<Fq>(&cs);
|
||||
let z = [vec![Fq::one()], x, w].concat();
|
||||
|
||||
let relaxed_r1cs = r1cs.clone().relax();
|
||||
|
||||
// natively
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
let zVar = Vec::<FpVar<Fq>>::new_witness(cs.clone(), || Ok(z.clone())).unwrap();
|
||||
let EVar =
|
||||
Vec::<FpVar<Fq>>::new_witness(cs.clone(), || Ok(relaxed_r1cs.clone().E)).unwrap();
|
||||
let uVar = FpVar::<Fq>::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap();
|
||||
let r1csVar =
|
||||
R1CSVar::<Fq, Fq, FpVar<Fq>>::new_witness(cs.clone(), || Ok(r1cs.clone())).unwrap();
|
||||
RelaxedR1CSGadget::<Fq, Fq, FpVar<Fq>>::check(r1csVar, EVar, uVar, zVar).unwrap();
|
||||
|
||||
// non-natively
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
let zVar = Vec::<NonNativeFieldVar<Fq, Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
|
||||
let EVar = Vec::<NonNativeFieldVar<Fq, Fr>>::new_witness(cs.clone(), || Ok(relaxed_r1cs.E))
|
||||
.unwrap();
|
||||
let uVar =
|
||||
NonNativeFieldVar::<Fq, Fr>::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap();
|
||||
let r1csVar =
|
||||
R1CSVar::<Fq, Fr, NonNativeFieldVar<Fq, Fr>>::new_witness(cs.clone(), || Ok(r1cs))
|
||||
.unwrap();
|
||||
RelaxedR1CSGadget::<Fq, Fr, NonNativeFieldVar<Fq, Fr>>::check(r1csVar, EVar, uVar, zVar)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decider_circuit() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let F_circuit = CubicFCircuit::<Fr>::new(());
|
||||
let z_0 = vec![Fr::from(3_u32)];
|
||||
|
||||
// get the CM & CF_CM len
|
||||
let (cm_len, cf_cm_len) =
|
||||
get_pedersen_params_len::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>>(
|
||||
&poseidon_config,
|
||||
F_circuit,
|
||||
)
|
||||
.unwrap();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
|
||||
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
|
||||
|
||||
let prover_params =
|
||||
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
cm_params: pedersen_params,
|
||||
cf_cm_params: cf_pedersen_params,
|
||||
};
|
||||
|
||||
type NOVA = Nova<
|
||||
Projective,
|
||||
GVar,
|
||||
Projective2,
|
||||
GVar2,
|
||||
CubicFCircuit<Fr>,
|
||||
Pedersen<Projective>,
|
||||
Pedersen<Projective2>,
|
||||
>;
|
||||
|
||||
// generate a Nova instance and do a step of it
|
||||
let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap();
|
||||
nova.prove_step().unwrap();
|
||||
let ivc_v = nova.clone();
|
||||
let verifier_params = VerifierParams::<Projective, Projective2> {
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
r1cs: ivc_v.r1cs,
|
||||
cf_r1cs: ivc_v.cf_r1cs,
|
||||
};
|
||||
NOVA::verify(
|
||||
verifier_params,
|
||||
z_0,
|
||||
ivc_v.z_i,
|
||||
Fr::one(),
|
||||
(ivc_v.U_i, ivc_v.W_i),
|
||||
(ivc_v.u_i, ivc_v.w_i),
|
||||
(ivc_v.cf_U_i, ivc_v.cf_W_i),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// load the DeciderEthCircuit from the generated Nova instance
|
||||
let decider_circuit = DeciderEthCircuit::<
|
||||
Projective,
|
||||
GVar,
|
||||
Projective2,
|
||||
GVar2,
|
||||
Pedersen<Projective>,
|
||||
Pedersen<Projective2>,
|
||||
>::from_nova(nova);
|
||||
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
|
||||
// generate the constraints and check that are satisfied by the inputs
|
||||
decider_circuit.generate_constraints(cs.clone()).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
dbg!(cs.num_constraints());
|
||||
}
|
||||
}
|
||||
717
folding-schemes/src/folding/nova/mod.rs
Normal file
717
folding-schemes/src/folding/nova/mod.rs
Normal file
@@ -0,0 +1,717 @@
|
||||
/// Implements the scheme described in [Nova](https://eprint.iacr.org/2021/370.pdf) and
|
||||
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf).
|
||||
use ark_crypto_primitives::{
|
||||
crh::{poseidon::CRH, CRHScheme},
|
||||
sponge::{poseidon::PoseidonConfig, Absorb},
|
||||
};
|
||||
use ark_ec::{AffineRepr, CurveGroup, Group};
|
||||
use ark_ff::{BigInteger, PrimeField};
|
||||
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar};
|
||||
use ark_std::fmt::Debug;
|
||||
use ark_std::{One, Zero};
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
|
||||
|
||||
use crate::ccs::r1cs::{extract_r1cs, extract_w_x, R1CS};
|
||||
use crate::commitment::CommitmentProver;
|
||||
use crate::folding::circuits::nonnative::point_to_nonnative_limbs;
|
||||
use crate::frontend::FCircuit;
|
||||
use crate::utils::vec::is_zero_vec;
|
||||
use crate::Error;
|
||||
use crate::FoldingScheme;
|
||||
|
||||
pub mod circuits;
|
||||
pub mod cyclefold;
|
||||
pub mod decider_eth;
|
||||
pub mod decider_eth_circuit;
|
||||
pub mod nifs;
|
||||
pub mod traits;
|
||||
|
||||
use circuits::{AugmentedFCircuit, ChallengeGadget, CF2};
|
||||
use cyclefold::{CycleFoldChallengeGadget, CycleFoldCircuit};
|
||||
use nifs::NIFS;
|
||||
use traits::NovaR1CS;
|
||||
|
||||
#[cfg(test)]
|
||||
use cyclefold::CF_IO_LEN;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct CommittedInstance<C: CurveGroup> {
|
||||
pub cmE: C,
|
||||
pub u: C::ScalarField,
|
||||
pub cmW: C,
|
||||
pub x: Vec<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> CommittedInstance<C> {
|
||||
pub fn dummy(io_len: usize) -> Self {
|
||||
Self {
|
||||
cmE: C::zero(),
|
||||
u: C::ScalarField::zero(),
|
||||
cmW: C::zero(),
|
||||
x: vec![C::ScalarField::zero(); io_len],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> CommittedInstance<C>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
{
|
||||
/// hash implements the committed instance hash compatible with the gadget implemented in
|
||||
/// nova/circuits.rs::CommittedInstanceVar.hash.
|
||||
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the
|
||||
/// `CommittedInstance`.
|
||||
pub fn hash(
|
||||
&self,
|
||||
poseidon_config: &PoseidonConfig<C::ScalarField>,
|
||||
i: C::ScalarField,
|
||||
z_0: Vec<C::ScalarField>,
|
||||
z_i: Vec<C::ScalarField>,
|
||||
) -> Result<C::ScalarField, Error> {
|
||||
let (cmE_x, cmE_y) = point_to_nonnative_limbs::<C>(self.cmE)?;
|
||||
let (cmW_x, cmW_y) = point_to_nonnative_limbs::<C>(self.cmW)?;
|
||||
|
||||
CRH::<C::ScalarField>::evaluate(
|
||||
poseidon_config,
|
||||
vec![
|
||||
vec![i],
|
||||
z_0,
|
||||
z_i,
|
||||
vec![self.u],
|
||||
self.x.clone(),
|
||||
cmE_x,
|
||||
cmE_y,
|
||||
cmW_x,
|
||||
cmW_y,
|
||||
]
|
||||
.concat(),
|
||||
)
|
||||
.map_err(|e| Error::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Witness<C: CurveGroup> {
|
||||
pub E: Vec<C::ScalarField>,
|
||||
pub rE: C::ScalarField,
|
||||
pub W: Vec<C::ScalarField>,
|
||||
pub rW: C::ScalarField,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> Witness<C>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
{
|
||||
pub fn new(w: Vec<C::ScalarField>, e_len: usize) -> Self {
|
||||
// note: at the current version, we don't use the blinding factors and we set them to 0
|
||||
// always.
|
||||
Self {
|
||||
E: vec![C::ScalarField::zero(); e_len],
|
||||
rE: C::ScalarField::zero(),
|
||||
W: w,
|
||||
rW: C::ScalarField::zero(),
|
||||
}
|
||||
}
|
||||
pub fn commit<CP: CommitmentProver<C>>(
|
||||
&self,
|
||||
params: &CP::Params,
|
||||
x: Vec<C::ScalarField>,
|
||||
) -> Result<CommittedInstance<C>, Error> {
|
||||
let mut cmE = C::zero();
|
||||
if !is_zero_vec::<C::ScalarField>(&self.E) {
|
||||
cmE = CP::commit(params, &self.E, &self.rE)?;
|
||||
}
|
||||
let cmW = CP::commit(params, &self.W, &self.rW)?;
|
||||
Ok(CommittedInstance {
|
||||
cmE,
|
||||
u: C::ScalarField::one(),
|
||||
cmW,
|
||||
x,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProverParams<C1, C2, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
C2: CurveGroup,
|
||||
CP1: CommitmentProver<C1>,
|
||||
CP2: CommitmentProver<C2>,
|
||||
{
|
||||
pub poseidon_config: PoseidonConfig<C1::ScalarField>,
|
||||
pub cm_params: CP1::Params,
|
||||
pub cf_cm_params: CP2::Params,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VerifierParams<C1: CurveGroup, C2: CurveGroup> {
|
||||
pub poseidon_config: PoseidonConfig<C1::ScalarField>,
|
||||
pub r1cs: R1CS<C1::ScalarField>,
|
||||
pub cf_r1cs: R1CS<C2::ScalarField>,
|
||||
}
|
||||
|
||||
/// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and
|
||||
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Nova<C1, GC1, C2, GC2, FC, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<C1::ScalarField>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
CP2: CommitmentProver<C2>,
|
||||
{
|
||||
_gc1: PhantomData<GC1>,
|
||||
_c2: PhantomData<C2>,
|
||||
_gc2: PhantomData<GC2>,
|
||||
/// R1CS of the Augmented Function circuit
|
||||
pub r1cs: R1CS<C1::ScalarField>,
|
||||
/// R1CS of the CycleFold circuit
|
||||
pub cf_r1cs: R1CS<C2::ScalarField>,
|
||||
pub poseidon_config: PoseidonConfig<C1::ScalarField>,
|
||||
/// CommitmentProver::Params over C1
|
||||
pub cm_params: CP1::Params,
|
||||
/// CycleFold CommitmentProver::Params, over C2
|
||||
pub cf_cm_params: CP2::Params,
|
||||
/// F circuit, the circuit that is being folded
|
||||
pub F: FC,
|
||||
pub i: C1::ScalarField,
|
||||
/// initial state
|
||||
pub z_0: Vec<C1::ScalarField>,
|
||||
/// current i-th state
|
||||
pub z_i: Vec<C1::ScalarField>,
|
||||
/// Nova instances
|
||||
pub w_i: Witness<C1>,
|
||||
pub u_i: CommittedInstance<C1>,
|
||||
pub W_i: Witness<C1>,
|
||||
pub U_i: CommittedInstance<C1>,
|
||||
|
||||
/// CycleFold running instance
|
||||
pub cf_W_i: Witness<C2>,
|
||||
pub cf_U_i: CommittedInstance<C2>,
|
||||
}
|
||||
|
||||
impl<C1, GC1, C2, GC2, FC, CP1, CP2> FoldingScheme<C1, C2, FC>
|
||||
for Nova<C1, GC1, C2, GC2, FC, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<C1::ScalarField>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
CP2: CommitmentProver<C2>,
|
||||
<C1 as CurveGroup>::BaseField: PrimeField,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
|
||||
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
|
||||
{
|
||||
type PreprocessorParam = (Self::ProverParam, FC);
|
||||
type ProverParam = ProverParams<C1, C2, CP1, CP2>;
|
||||
type VerifierParam = VerifierParams<C1, C2>;
|
||||
type CommittedInstanceWithWitness = (CommittedInstance<C1>, Witness<C1>);
|
||||
type CFCommittedInstanceWithWitness = (CommittedInstance<C2>, Witness<C2>);
|
||||
|
||||
fn preprocess(
|
||||
prep_param: &Self::PreprocessorParam,
|
||||
) -> Result<(Self::ProverParam, Self::VerifierParam), Error> {
|
||||
let (prover_params, F_circuit) = prep_param;
|
||||
|
||||
let (r1cs, cf_r1cs) =
|
||||
get_r1cs::<C1, GC1, C2, GC2, FC>(&prover_params.poseidon_config, *F_circuit)?;
|
||||
|
||||
let verifier_params = VerifierParams::<C1, C2> {
|
||||
poseidon_config: prover_params.poseidon_config.clone(),
|
||||
r1cs,
|
||||
cf_r1cs,
|
||||
};
|
||||
Ok((prover_params.clone(), verifier_params))
|
||||
}
|
||||
|
||||
/// Initializes the Nova+CycleFold's IVC for the given parameters and initial state `z_0`.
|
||||
fn init(pp: &Self::ProverParam, F: FC, z_0: Vec<C1::ScalarField>) -> Result<Self, Error> {
|
||||
// prepare the circuit to obtain its R1CS
|
||||
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
|
||||
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
|
||||
|
||||
let augmented_F_circuit =
|
||||
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(&pp.poseidon_config, F);
|
||||
let cf_circuit = CycleFoldCircuit::<C1, GC1>::empty();
|
||||
|
||||
augmented_F_circuit.generate_constraints(cs.clone())?;
|
||||
cs.finalize();
|
||||
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
|
||||
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
|
||||
|
||||
cf_circuit.generate_constraints(cs2.clone())?;
|
||||
cs2.finalize();
|
||||
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
|
||||
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
|
||||
|
||||
// setup the dummy instances
|
||||
let (w_dummy, u_dummy) = r1cs.dummy_instance();
|
||||
let (cf_w_dummy, cf_u_dummy) = cf_r1cs.dummy_instance();
|
||||
|
||||
// W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the
|
||||
// R1CS that we're working with.
|
||||
Ok(Self {
|
||||
_gc1: PhantomData,
|
||||
_c2: PhantomData,
|
||||
_gc2: PhantomData,
|
||||
r1cs,
|
||||
cf_r1cs,
|
||||
poseidon_config: pp.poseidon_config.clone(),
|
||||
cm_params: pp.cm_params.clone(),
|
||||
cf_cm_params: pp.cf_cm_params.clone(),
|
||||
F,
|
||||
i: C1::ScalarField::zero(),
|
||||
z_0: z_0.clone(),
|
||||
z_i: z_0,
|
||||
w_i: w_dummy.clone(),
|
||||
u_i: u_dummy.clone(),
|
||||
W_i: w_dummy,
|
||||
U_i: u_dummy,
|
||||
// cyclefold running instance
|
||||
cf_W_i: cf_w_dummy.clone(),
|
||||
cf_U_i: cf_u_dummy.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Implements IVC.P of Nova+CycleFold
|
||||
fn prove_step(&mut self) -> Result<(), Error> {
|
||||
let augmented_F_circuit: AugmentedFCircuit<C1, C2, GC2, FC>;
|
||||
let cf_circuit: CycleFoldCircuit<C1, GC1>;
|
||||
|
||||
let z_i1 = self.F.step_native(self.z_i.clone())?;
|
||||
|
||||
// compute T and cmT for AugmentedFCircuit
|
||||
let (T, cmT) = self.compute_cmT()?;
|
||||
|
||||
let r_bits = ChallengeGadget::<C1>::get_challenge_native(
|
||||
&self.poseidon_config,
|
||||
self.u_i.clone(),
|
||||
self.U_i.clone(),
|
||||
cmT,
|
||||
)?;
|
||||
let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
|
||||
.ok_or(Error::OutOfBounds)?;
|
||||
|
||||
// fold Nova instances
|
||||
let (W_i1, U_i1): (Witness<C1>, CommittedInstance<C1>) = NIFS::<C1, CP1>::fold_instances(
|
||||
r_Fr, &self.w_i, &self.u_i, &self.W_i, &self.U_i, &T, cmT,
|
||||
)?;
|
||||
|
||||
// folded instance output (public input, x)
|
||||
// u_{i+1}.x = H(i+1, z_0, z_{i+1}, U_{i+1})
|
||||
let u_i1_x = U_i1.hash(
|
||||
&self.poseidon_config,
|
||||
self.i + C1::ScalarField::one(),
|
||||
self.z_0.clone(),
|
||||
z_i1.clone(),
|
||||
)?;
|
||||
|
||||
if self.i == C1::ScalarField::zero() {
|
||||
// base case
|
||||
augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC> {
|
||||
_gc2: PhantomData,
|
||||
poseidon_config: self.poseidon_config.clone(),
|
||||
i: Some(C1::ScalarField::zero()), // = i=0
|
||||
z_0: Some(self.z_0.clone()), // = z_i
|
||||
z_i: Some(self.z_i.clone()),
|
||||
u_i: Some(self.u_i.clone()), // = dummy
|
||||
U_i: Some(self.U_i.clone()), // = dummy
|
||||
U_i1: Some(U_i1.clone()),
|
||||
cmT: Some(cmT),
|
||||
F: self.F,
|
||||
x: Some(u_i1_x),
|
||||
cf_u_i: None,
|
||||
cf_U_i: None,
|
||||
cf_U_i1: None,
|
||||
cf_cmT: None,
|
||||
cf_r_nonnat: None,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
NIFS::<C1, CP1>::verify_folded_instance(r_Fr, &self.u_i, &self.U_i, &U_i1, &cmT)?;
|
||||
} else {
|
||||
// CycleFold part:
|
||||
// get the vector used as public inputs 'x' in the CycleFold circuit
|
||||
let cf_u_i_x = [
|
||||
get_committed_instance_coordinates(&self.u_i),
|
||||
get_committed_instance_coordinates(&self.U_i),
|
||||
get_committed_instance_coordinates(&U_i1),
|
||||
]
|
||||
.concat();
|
||||
|
||||
cf_circuit = CycleFoldCircuit::<C1, GC1> {
|
||||
_gc: PhantomData,
|
||||
r_bits: Some(r_bits.clone()),
|
||||
cmT: Some(cmT),
|
||||
u_i: Some(self.u_i.clone()),
|
||||
U_i: Some(self.U_i.clone()),
|
||||
U_i1: Some(U_i1.clone()),
|
||||
x: Some(cf_u_i_x.clone()),
|
||||
};
|
||||
|
||||
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
|
||||
cf_circuit.generate_constraints(cs2.clone())?;
|
||||
|
||||
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
|
||||
let (cf_w_i, cf_x_i) = extract_w_x::<C1::BaseField>(&cs2);
|
||||
if cf_x_i != cf_u_i_x {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
if cf_x_i.len() != CF_IO_LEN {
|
||||
return Err(Error::NotExpectedLength(cf_x_i.len(), CF_IO_LEN));
|
||||
}
|
||||
|
||||
// fold cyclefold instances
|
||||
let cf_w_i = Witness::<C2>::new(cf_w_i.clone(), self.cf_r1cs.A.n_rows);
|
||||
let cf_u_i: CommittedInstance<C2> =
|
||||
cf_w_i.commit::<CP2>(&self.cf_cm_params, cf_x_i.clone())?;
|
||||
|
||||
// compute T* and cmT* for CycleFoldCircuit
|
||||
let (cf_T, cf_cmT) = self.compute_cf_cmT(&cf_w_i, &cf_u_i)?;
|
||||
|
||||
let cf_r_bits = CycleFoldChallengeGadget::<C2, GC2>::get_challenge_native(
|
||||
&self.poseidon_config,
|
||||
cf_u_i.clone(),
|
||||
self.cf_U_i.clone(),
|
||||
cf_cmT,
|
||||
)?;
|
||||
let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits))
|
||||
.ok_or(Error::OutOfBounds)?;
|
||||
|
||||
let (cf_W_i1, cf_U_i1) = NIFS::<C2, CP2>::fold_instances(
|
||||
cf_r_Fq,
|
||||
&self.cf_W_i,
|
||||
&self.cf_U_i,
|
||||
&cf_w_i,
|
||||
&cf_u_i,
|
||||
&cf_T,
|
||||
cf_cmT,
|
||||
)?;
|
||||
|
||||
augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC> {
|
||||
_gc2: PhantomData,
|
||||
poseidon_config: self.poseidon_config.clone(),
|
||||
i: Some(self.i),
|
||||
z_0: Some(self.z_0.clone()),
|
||||
z_i: Some(self.z_i.clone()),
|
||||
u_i: Some(self.u_i.clone()),
|
||||
U_i: Some(self.U_i.clone()),
|
||||
U_i1: Some(U_i1.clone()),
|
||||
cmT: Some(cmT),
|
||||
F: self.F,
|
||||
x: Some(u_i1_x),
|
||||
// cyclefold values
|
||||
cf_u_i: Some(cf_u_i.clone()),
|
||||
cf_U_i: Some(self.cf_U_i.clone()),
|
||||
cf_U_i1: Some(cf_U_i1.clone()),
|
||||
cf_cmT: Some(cf_cmT),
|
||||
cf_r_nonnat: Some(cf_r_Fq),
|
||||
};
|
||||
|
||||
self.cf_W_i = cf_W_i1.clone();
|
||||
self.cf_U_i = cf_U_i1.clone();
|
||||
|
||||
#[cfg(test)]
|
||||
{
|
||||
self.cf_r1cs.check_instance_relation(&cf_w_i, &cf_u_i)?;
|
||||
self.cf_r1cs
|
||||
.check_relaxed_instance_relation(&self.cf_W_i, &self.cf_U_i)?;
|
||||
}
|
||||
}
|
||||
|
||||
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
|
||||
|
||||
augmented_F_circuit.generate_constraints(cs.clone())?;
|
||||
|
||||
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
|
||||
let (w_i1, x_i1) = extract_w_x::<C1::ScalarField>(&cs);
|
||||
if x_i1[0] != u_i1_x {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
if x_i1.len() != 1 {
|
||||
return Err(Error::NotExpectedLength(x_i1.len(), 1));
|
||||
}
|
||||
|
||||
// set values for next iteration
|
||||
self.i += C1::ScalarField::one();
|
||||
self.z_i = z_i1.clone();
|
||||
self.w_i = Witness::<C1>::new(w_i1, self.r1cs.A.n_rows);
|
||||
self.u_i = self.w_i.commit::<CP1>(&self.cm_params, vec![u_i1_x])?;
|
||||
self.W_i = W_i1.clone();
|
||||
self.U_i = U_i1.clone();
|
||||
|
||||
#[cfg(test)]
|
||||
{
|
||||
self.r1cs.check_instance_relation(&self.w_i, &self.u_i)?;
|
||||
self.r1cs
|
||||
.check_relaxed_instance_relation(&self.W_i, &self.U_i)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn state(&self) -> Vec<C1::ScalarField> {
|
||||
self.z_i.clone()
|
||||
}
|
||||
fn instances(
|
||||
&self,
|
||||
) -> (
|
||||
Self::CommittedInstanceWithWitness,
|
||||
Self::CommittedInstanceWithWitness,
|
||||
Self::CFCommittedInstanceWithWitness,
|
||||
) {
|
||||
(
|
||||
(self.U_i.clone(), self.W_i.clone()),
|
||||
(self.u_i.clone(), self.w_i.clone()),
|
||||
(self.cf_U_i.clone(), self.cf_W_i.clone()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Implements IVC.V of Nova+CycleFold
|
||||
fn verify(
|
||||
vp: Self::VerifierParam,
|
||||
z_0: Vec<C1::ScalarField>, // initial state
|
||||
z_i: Vec<C1::ScalarField>, // last state
|
||||
num_steps: C1::ScalarField,
|
||||
running_instance: Self::CommittedInstanceWithWitness,
|
||||
incomming_instance: Self::CommittedInstanceWithWitness,
|
||||
cyclefold_instance: Self::CFCommittedInstanceWithWitness,
|
||||
) -> Result<(), Error> {
|
||||
let (U_i, W_i) = running_instance;
|
||||
let (u_i, w_i) = incomming_instance;
|
||||
let (cf_U_i, cf_W_i) = cyclefold_instance;
|
||||
|
||||
if u_i.x.len() != 1 || U_i.x.len() != 1 {
|
||||
return Err(Error::IVCVerificationFail);
|
||||
}
|
||||
|
||||
// check that u_i's output points to the running instance
|
||||
// u_i.X == H(i, z_0, z_i, U_i)
|
||||
let expected_u_i_x = U_i.hash(&vp.poseidon_config, num_steps, z_0, z_i.clone())?;
|
||||
if expected_u_i_x != u_i.x[0] {
|
||||
return Err(Error::IVCVerificationFail);
|
||||
}
|
||||
|
||||
// check u_i.cmE==0, u_i.u==1 (=u_i is a un-relaxed instance)
|
||||
if !u_i.cmE.is_zero() || !u_i.u.is_one() {
|
||||
return Err(Error::IVCVerificationFail);
|
||||
}
|
||||
|
||||
// check R1CS satisfiability
|
||||
vp.r1cs.check_instance_relation(&w_i, &u_i)?;
|
||||
// check RelaxedR1CS satisfiability
|
||||
vp.r1cs.check_relaxed_instance_relation(&W_i, &U_i)?;
|
||||
|
||||
// check CycleFold RelaxedR1CS satisfiability
|
||||
vp.cf_r1cs
|
||||
.check_relaxed_instance_relation(&cf_W_i, &cf_U_i)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<C1, GC1, C2, GC2, FC, CP1, CP2> Nova<C1, GC1, C2, GC2, FC, CP1, CP2>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<C1::ScalarField>,
|
||||
CP1: CommitmentProver<C1>,
|
||||
CP2: CommitmentProver<C2>,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
{
|
||||
// computes T and cmT for the AugmentedFCircuit
|
||||
fn compute_cmT(&self) -> Result<(Vec<C1::ScalarField>, C1), Error> {
|
||||
NIFS::<C1, CP1>::compute_cmT(
|
||||
&self.cm_params,
|
||||
&self.r1cs,
|
||||
&self.w_i,
|
||||
&self.u_i,
|
||||
&self.W_i,
|
||||
&self.U_i,
|
||||
)
|
||||
}
|
||||
// computes T* and cmT* for the CycleFoldCircuit
|
||||
fn compute_cf_cmT(
|
||||
&self,
|
||||
cf_w_i: &Witness<C2>,
|
||||
cf_u_i: &CommittedInstance<C2>,
|
||||
) -> Result<(Vec<C2::ScalarField>, C2), Error> {
|
||||
NIFS::<C2, CP2>::compute_cyclefold_cmT(
|
||||
&self.cf_cm_params,
|
||||
&self.cf_r1cs,
|
||||
cf_w_i,
|
||||
cf_u_i,
|
||||
&self.cf_W_i,
|
||||
&self.cf_U_i,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// helper method to get the r1cs from the ConstraintSynthesizer
|
||||
pub fn get_r1cs_from_cs<F: PrimeField>(
|
||||
circuit: impl ConstraintSynthesizer<F>,
|
||||
) -> Result<R1CS<F>, Error> {
|
||||
let cs = ConstraintSystem::<F>::new_ref();
|
||||
circuit.generate_constraints(cs.clone())?;
|
||||
cs.finalize();
|
||||
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
|
||||
let r1cs = extract_r1cs::<F>(&cs);
|
||||
Ok(r1cs)
|
||||
}
|
||||
|
||||
/// helper method to get the R1CS for both the AugmentedFCircuit and the CycleFold circuit
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn get_r1cs<C1, GC1, C2, GC2, FC>(
|
||||
poseidon_config: &PoseidonConfig<C1::ScalarField>,
|
||||
F_circuit: FC,
|
||||
) -> Result<(R1CS<C1::ScalarField>, R1CS<C2::ScalarField>), Error>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<C1::ScalarField>,
|
||||
<C1 as CurveGroup>::BaseField: PrimeField,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
|
||||
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
|
||||
{
|
||||
let augmented_F_circuit =
|
||||
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(poseidon_config, F_circuit);
|
||||
let cf_circuit = CycleFoldCircuit::<C1, GC1>::empty();
|
||||
let r1cs = get_r1cs_from_cs::<C1::ScalarField>(augmented_F_circuit)?;
|
||||
let cf_r1cs = get_r1cs_from_cs::<C2::ScalarField>(cf_circuit)?;
|
||||
Ok((r1cs, cf_r1cs))
|
||||
}
|
||||
|
||||
/// helper method to get the pedersen params length for both the AugmentedFCircuit and the
|
||||
/// CycleFold circuit
|
||||
pub fn get_pedersen_params_len<C1, GC1, C2, GC2, FC>(
|
||||
poseidon_config: &PoseidonConfig<C1::ScalarField>,
|
||||
F_circuit: FC,
|
||||
) -> Result<(usize, usize), Error>
|
||||
where
|
||||
C1: CurveGroup,
|
||||
GC1: CurveVar<C1, CF2<C1>>,
|
||||
C2: CurveGroup,
|
||||
GC2: CurveVar<C2, CF2<C2>>,
|
||||
FC: FCircuit<C1::ScalarField>,
|
||||
<C1 as CurveGroup>::BaseField: PrimeField,
|
||||
<C2 as CurveGroup>::BaseField: PrimeField,
|
||||
<C1 as Group>::ScalarField: Absorb,
|
||||
<C2 as Group>::ScalarField: Absorb,
|
||||
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
|
||||
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
|
||||
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
|
||||
{
|
||||
let (r1cs, cf_r1cs) = get_r1cs::<C1, GC1, C2, GC2, FC>(poseidon_config, F_circuit)?;
|
||||
Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows))
|
||||
}
|
||||
|
||||
pub(crate) fn get_committed_instance_coordinates<C: CurveGroup>(
|
||||
u: &CommittedInstance<C>,
|
||||
) -> Vec<C::BaseField> {
|
||||
let zero = (&C::BaseField::zero(), &C::BaseField::one());
|
||||
|
||||
let cmE = u.cmE.into_affine();
|
||||
let (cmE_x, cmE_y) = cmE.xy().unwrap_or(zero);
|
||||
|
||||
let cmW = u.cmW.into_affine();
|
||||
let (cmW_x, cmW_y) = cmW.xy().unwrap_or(zero);
|
||||
vec![*cmE_x, *cmE_y, *cmW_x, *cmW_y]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_pallas::{constraints::GVar, Fr, Projective};
|
||||
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
|
||||
|
||||
use crate::commitment::pedersen::Pedersen;
|
||||
use crate::frontend::tests::CubicFCircuit;
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
|
||||
#[test]
|
||||
fn test_ivc() {
|
||||
type NOVA = Nova<
|
||||
Projective,
|
||||
GVar,
|
||||
Projective2,
|
||||
GVar2,
|
||||
CubicFCircuit<Fr>,
|
||||
Pedersen<Projective>,
|
||||
Pedersen<Projective2>,
|
||||
>;
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let F_circuit = CubicFCircuit::<Fr>::new(());
|
||||
let z_0 = vec![Fr::from(3_u32)];
|
||||
|
||||
let (cm_len, cf_cm_len) =
|
||||
get_pedersen_params_len::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>>(
|
||||
&poseidon_config,
|
||||
F_circuit,
|
||||
)
|
||||
.unwrap();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
|
||||
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
|
||||
|
||||
let prover_params =
|
||||
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
|
||||
poseidon_config: poseidon_config.clone(),
|
||||
cm_params: pedersen_params,
|
||||
cf_cm_params: cf_pedersen_params,
|
||||
};
|
||||
|
||||
let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap();
|
||||
|
||||
let num_steps: usize = 3;
|
||||
for _ in 0..num_steps {
|
||||
nova.prove_step().unwrap();
|
||||
}
|
||||
assert_eq!(Fr::from(num_steps as u32), nova.i);
|
||||
|
||||
let verifier_params = VerifierParams::<Projective, Projective2> {
|
||||
poseidon_config,
|
||||
r1cs: nova.r1cs,
|
||||
cf_r1cs: nova.cf_r1cs,
|
||||
};
|
||||
NOVA::verify(
|
||||
verifier_params,
|
||||
z_0,
|
||||
nova.z_i,
|
||||
nova.i,
|
||||
(nova.U_i, nova.W_i),
|
||||
(nova.u_i, nova.w_i),
|
||||
(nova.cf_U_i, nova.cf_W_i),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
481
folding-schemes/src/folding/nova/nifs.rs
Normal file
481
folding-schemes/src/folding/nova/nifs.rs
Normal file
@@ -0,0 +1,481 @@
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_std::One;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use super::{CommittedInstance, Witness};
|
||||
use crate::ccs::r1cs::R1CS;
|
||||
use crate::commitment::CommitmentProver;
|
||||
use crate::transcript::Transcript;
|
||||
use crate::utils::vec::*;
|
||||
use crate::Error;
|
||||
|
||||
/// Implements the Non-Interactive Folding Scheme described in section 4 of
|
||||
/// [Nova](https://eprint.iacr.org/2021/370.pdf)
|
||||
pub struct NIFS<C: CurveGroup, CP: CommitmentProver<C>> {
|
||||
_c: PhantomData<C>,
|
||||
_cp: PhantomData<CP>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup, CP: CommitmentProver<C>> NIFS<C, CP>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
{
|
||||
// compute_T: compute cross-terms T
|
||||
pub fn compute_T(
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
u1: C::ScalarField,
|
||||
u2: C::ScalarField,
|
||||
z1: &[C::ScalarField],
|
||||
z2: &[C::ScalarField],
|
||||
) -> Result<Vec<C::ScalarField>, Error> {
|
||||
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
|
||||
|
||||
// this is parallelizable (for the future)
|
||||
let Az1 = mat_vec_mul_sparse(&A, z1)?;
|
||||
let Bz1 = mat_vec_mul_sparse(&B, z1)?;
|
||||
let Cz1 = mat_vec_mul_sparse(&C, z1)?;
|
||||
let Az2 = mat_vec_mul_sparse(&A, z2)?;
|
||||
let Bz2 = mat_vec_mul_sparse(&B, z2)?;
|
||||
let Cz2 = mat_vec_mul_sparse(&C, z2)?;
|
||||
|
||||
let Az1_Bz2 = hadamard(&Az1, &Bz2)?;
|
||||
let Az2_Bz1 = hadamard(&Az2, &Bz1)?;
|
||||
let u1Cz2 = vec_scalar_mul(&Cz2, &u1);
|
||||
let u2Cz1 = vec_scalar_mul(&Cz1, &u2);
|
||||
|
||||
vec_sub(&vec_sub(&vec_add(&Az1_Bz2, &Az2_Bz1)?, &u1Cz2)?, &u2Cz1)
|
||||
}
|
||||
|
||||
pub fn fold_witness(
|
||||
r: C::ScalarField,
|
||||
w1: &Witness<C>,
|
||||
w2: &Witness<C>,
|
||||
T: &[C::ScalarField],
|
||||
rT: C::ScalarField,
|
||||
) -> Result<Witness<C>, Error> {
|
||||
let r2 = r * r;
|
||||
let E: Vec<C::ScalarField> = vec_add(
|
||||
&vec_add(&w1.E, &vec_scalar_mul(T, &r))?,
|
||||
&vec_scalar_mul(&w2.E, &r2),
|
||||
)?;
|
||||
let rE = w1.rE + r * rT + r2 * w2.rE;
|
||||
let W: Vec<C::ScalarField> = w1.W.iter().zip(&w2.W).map(|(a, b)| *a + (r * b)).collect();
|
||||
|
||||
let rW = w1.rW + r * w2.rW;
|
||||
Ok(Witness::<C> { E, rE, W, rW })
|
||||
}
|
||||
|
||||
pub fn fold_committed_instance(
|
||||
r: C::ScalarField,
|
||||
ci1: &CommittedInstance<C>,
|
||||
ci2: &CommittedInstance<C>,
|
||||
cmT: &C,
|
||||
) -> CommittedInstance<C> {
|
||||
let r2 = r * r;
|
||||
let cmE = ci1.cmE + cmT.mul(r) + ci2.cmE.mul(r2);
|
||||
let u = ci1.u + r * ci2.u;
|
||||
let cmW = ci1.cmW + ci2.cmW.mul(r);
|
||||
let x = ci1
|
||||
.x
|
||||
.iter()
|
||||
.zip(&ci2.x)
|
||||
.map(|(a, b)| *a + (r * b))
|
||||
.collect::<Vec<C::ScalarField>>();
|
||||
|
||||
CommittedInstance::<C> { cmE, u, cmW, x }
|
||||
}
|
||||
|
||||
/// NIFS.P is the consecutive combination of compute_cmT with fold_instances
|
||||
|
||||
/// compute_cmT is part of the NIFS.P logic
|
||||
pub fn compute_cmT(
|
||||
cm_prover_params: &CP::Params,
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
w1: &Witness<C>,
|
||||
ci1: &CommittedInstance<C>,
|
||||
w2: &Witness<C>,
|
||||
ci2: &CommittedInstance<C>,
|
||||
) -> Result<(Vec<C::ScalarField>, C), Error> {
|
||||
let z1: Vec<C::ScalarField> = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat();
|
||||
let z2: Vec<C::ScalarField> = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat();
|
||||
|
||||
// compute cross terms
|
||||
let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?;
|
||||
// use r_T=1 since we don't need hiding property for cm(T)
|
||||
let cmT = CP::commit(cm_prover_params, &T, &C::ScalarField::one())?;
|
||||
Ok((T, cmT))
|
||||
}
|
||||
pub fn compute_cyclefold_cmT(
|
||||
cm_prover_params: &CP::Params,
|
||||
r1cs: &R1CS<C::ScalarField>, // R1CS over C2.Fr=C1.Fq (here C=C2)
|
||||
w1: &Witness<C>,
|
||||
ci1: &CommittedInstance<C>,
|
||||
w2: &Witness<C>,
|
||||
ci2: &CommittedInstance<C>,
|
||||
) -> Result<(Vec<C::ScalarField>, C), Error>
|
||||
where
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
{
|
||||
let z1: Vec<C::ScalarField> = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat();
|
||||
let z2: Vec<C::ScalarField> = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat();
|
||||
|
||||
// compute cross terms
|
||||
let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?;
|
||||
// use r_T=1 since we don't need hiding property for cm(T)
|
||||
let cmT = CP::commit(cm_prover_params, &T, &C::ScalarField::one())?;
|
||||
Ok((T, cmT))
|
||||
}
|
||||
|
||||
/// fold_instances is part of the NIFS.P logic described in
|
||||
/// [Nova](https://eprint.iacr.org/2021/370.pdf)'s section 4. It returns the folded Committed
|
||||
/// Instances and the Witness.
|
||||
pub fn fold_instances(
|
||||
r: C::ScalarField,
|
||||
w1: &Witness<C>,
|
||||
ci1: &CommittedInstance<C>,
|
||||
w2: &Witness<C>,
|
||||
ci2: &CommittedInstance<C>,
|
||||
T: &[C::ScalarField],
|
||||
cmT: C,
|
||||
) -> Result<(Witness<C>, CommittedInstance<C>), Error> {
|
||||
// fold witness
|
||||
// use r_T=1 since we don't need hiding property for cm(T)
|
||||
let w3 = NIFS::<C, CP>::fold_witness(r, w1, w2, T, C::ScalarField::one())?;
|
||||
|
||||
// fold committed instancs
|
||||
let ci3 = NIFS::<C, CP>::fold_committed_instance(r, ci1, ci2, &cmT);
|
||||
|
||||
Ok((w3, ci3))
|
||||
}
|
||||
|
||||
/// verify implements NIFS.V logic described in [Nova](https://eprint.iacr.org/2021/370.pdf)'s
|
||||
/// section 4. It returns the folded Committed Instance
|
||||
pub fn verify(
|
||||
// r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element
|
||||
r: C::ScalarField,
|
||||
ci1: &CommittedInstance<C>,
|
||||
ci2: &CommittedInstance<C>,
|
||||
cmT: &C,
|
||||
) -> CommittedInstance<C> {
|
||||
NIFS::<C, CP>::fold_committed_instance(r, ci1, ci2, cmT)
|
||||
}
|
||||
|
||||
/// Verify commited folded instance (ci) relations. Notice that this method does not open the
|
||||
/// commitments, but just checks that the given committed instances (ci1, ci2) when folded
|
||||
/// result in the folded committed instance (ci3) values.
|
||||
pub fn verify_folded_instance(
|
||||
r: C::ScalarField,
|
||||
ci1: &CommittedInstance<C>,
|
||||
ci2: &CommittedInstance<C>,
|
||||
ci3: &CommittedInstance<C>,
|
||||
cmT: &C,
|
||||
) -> Result<(), Error> {
|
||||
let expected = Self::fold_committed_instance(r, ci1, ci2, cmT);
|
||||
if ci3.cmE != expected.cmE
|
||||
|| ci3.u != expected.u
|
||||
|| ci3.cmW != expected.cmW
|
||||
|| ci3.x != expected.x
|
||||
{
|
||||
return Err(Error::NotSatisfied);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn prove_commitments(
|
||||
tr: &mut impl Transcript<C>,
|
||||
cm_prover_params: &CP::Params,
|
||||
w: &Witness<C>,
|
||||
ci: &CommittedInstance<C>,
|
||||
T: Vec<C::ScalarField>,
|
||||
cmT: &C,
|
||||
) -> Result<[CP::Proof; 3], Error> {
|
||||
let cmE_proof = CP::prove(cm_prover_params, tr, &ci.cmE, &w.E, &w.rE)?;
|
||||
let cmW_proof = CP::prove(cm_prover_params, tr, &ci.cmW, &w.W, &w.rW)?;
|
||||
let cmT_proof = CP::prove(cm_prover_params, tr, cmT, &T, &C::ScalarField::one())?; // cm(T) is committed with rT=1
|
||||
Ok([cmE_proof, cmW_proof, cmT_proof])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
|
||||
use ark_ff::{BigInteger, PrimeField};
|
||||
use ark_pallas::{Fr, Projective};
|
||||
use ark_std::{ops::Mul, UniformRand, Zero};
|
||||
|
||||
use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z};
|
||||
use crate::commitment::pedersen::{Params as PedersenParams, Pedersen};
|
||||
use crate::folding::nova::circuits::ChallengeGadget;
|
||||
use crate::folding::nova::traits::NovaR1CS;
|
||||
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
|
||||
use crate::utils::vec::vec_scalar_mul;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub(crate) fn prepare_simple_fold_inputs() -> (
|
||||
PedersenParams<Projective>,
|
||||
PoseidonConfig<Fr>,
|
||||
R1CS<Fr>,
|
||||
Witness<Projective>, // w1
|
||||
CommittedInstance<Projective>, // ci1
|
||||
Witness<Projective>, // w2
|
||||
CommittedInstance<Projective>, // ci2
|
||||
Witness<Projective>, // w3
|
||||
CommittedInstance<Projective>, // ci3
|
||||
Vec<Fr>, // T
|
||||
Projective, // cmT
|
||||
Vec<bool>, // r_bits
|
||||
Fr, // r_Fr
|
||||
) {
|
||||
let r1cs = get_test_r1cs();
|
||||
let z1 = get_test_z(3);
|
||||
let z2 = get_test_z(4);
|
||||
let (w1, x1) = r1cs.split_z(&z1);
|
||||
let (w2, x2) = r1cs.split_z(&z2);
|
||||
|
||||
let w1 = Witness::<Projective>::new(w1.clone(), r1cs.A.n_rows);
|
||||
let w2 = Witness::<Projective>::new(w2.clone(), r1cs.A.n_rows);
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_cols);
|
||||
|
||||
// compute committed instances
|
||||
let ci1 = w1
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, x1.clone())
|
||||
.unwrap();
|
||||
let ci2 = w2
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, x2.clone())
|
||||
.unwrap();
|
||||
|
||||
// NIFS.P
|
||||
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
|
||||
&pedersen_params,
|
||||
&r1cs,
|
||||
&w1,
|
||||
&ci1,
|
||||
&w2,
|
||||
&ci2,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
|
||||
&poseidon_config,
|
||||
ci1.clone(),
|
||||
ci2.clone(),
|
||||
cmT,
|
||||
)
|
||||
.unwrap();
|
||||
let r_Fr = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
|
||||
|
||||
let (w3, ci3) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
|
||||
r_Fr, &w1, &ci1, &w2, &ci2, &T, cmT,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
(
|
||||
pedersen_params,
|
||||
poseidon_config,
|
||||
r1cs,
|
||||
w1,
|
||||
ci1,
|
||||
w2,
|
||||
ci2,
|
||||
w3,
|
||||
ci3,
|
||||
T,
|
||||
cmT,
|
||||
r_bits,
|
||||
r_Fr,
|
||||
)
|
||||
}
|
||||
|
||||
// fold 2 dummy instances and check that the folded instance holds the relaxed R1CS relation
|
||||
#[test]
|
||||
fn test_nifs_fold_dummy() {
|
||||
let r1cs = get_test_r1cs::<Fr>();
|
||||
let z1 = get_test_z(3);
|
||||
let (w1, x1) = r1cs.split_z(&z1);
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_cols);
|
||||
|
||||
// dummy instance, witness and public inputs zeroes
|
||||
let w_dummy = Witness::<Projective>::new(vec![Fr::zero(); w1.len()], r1cs.A.n_rows);
|
||||
let mut u_dummy = w_dummy
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, vec![Fr::zero(); x1.len()])
|
||||
.unwrap();
|
||||
u_dummy.u = Fr::zero();
|
||||
|
||||
let w_i = w_dummy.clone();
|
||||
let u_i = u_dummy.clone();
|
||||
let W_i = w_dummy.clone();
|
||||
let U_i = u_dummy.clone();
|
||||
r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap();
|
||||
r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap();
|
||||
|
||||
let r_Fr = Fr::from(3_u32);
|
||||
|
||||
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
|
||||
&pedersen_params,
|
||||
&r1cs,
|
||||
&w_i,
|
||||
&u_i,
|
||||
&W_i,
|
||||
&U_i,
|
||||
)
|
||||
.unwrap();
|
||||
let (W_i1, U_i1) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
|
||||
r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT,
|
||||
)
|
||||
.unwrap();
|
||||
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
|
||||
}
|
||||
|
||||
// fold 2 instances into one
|
||||
#[test]
|
||||
fn test_nifs_one_fold() {
|
||||
let (pedersen_params, poseidon_config, r1cs, w1, ci1, w2, ci2, w3, ci3, T, cmT, _, r) =
|
||||
prepare_simple_fold_inputs();
|
||||
|
||||
// NIFS.V
|
||||
let ci3_v = NIFS::<Projective, Pedersen<Projective>>::verify(r, &ci1, &ci2, &cmT);
|
||||
assert_eq!(ci3_v, ci3);
|
||||
|
||||
// check that relations hold for the 2 inputted instances and the folded one
|
||||
r1cs.check_relaxed_instance_relation(&w1, &ci1).unwrap();
|
||||
r1cs.check_relaxed_instance_relation(&w2, &ci2).unwrap();
|
||||
r1cs.check_relaxed_instance_relation(&w3, &ci3).unwrap();
|
||||
|
||||
// check that folded commitments from folded instance (ci) are equal to folding the
|
||||
// use folded rE, rW to commit w3
|
||||
let ci3_expected = w3
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, ci3.x.clone())
|
||||
.unwrap();
|
||||
assert_eq!(ci3_expected.cmE, ci3.cmE);
|
||||
assert_eq!(ci3_expected.cmW, ci3.cmW);
|
||||
|
||||
// next equalities should hold since we started from two cmE of zero-vector E's
|
||||
assert_eq!(ci3.cmE, cmT.mul(r));
|
||||
assert_eq!(w3.E, vec_scalar_mul(&T, &r));
|
||||
|
||||
// NIFS.Verify_Folded_Instance:
|
||||
NIFS::<Projective, Pedersen<Projective>>::verify_folded_instance(r, &ci1, &ci2, &ci3, &cmT)
|
||||
.unwrap();
|
||||
|
||||
// init Prover's transcript
|
||||
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
// init Verifier's transcript
|
||||
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
|
||||
// prove the ci3.cmE, ci3.cmW, cmT commitments
|
||||
let cm_proofs = NIFS::<Projective, Pedersen<Projective>>::prove_commitments(
|
||||
&mut transcript_p,
|
||||
&pedersen_params,
|
||||
&w3,
|
||||
&ci3,
|
||||
T,
|
||||
&cmT,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// verify the ci3.cmE, ci3.cmW, cmT commitments
|
||||
assert_eq!(cm_proofs.len(), 3);
|
||||
Pedersen::<Projective>::verify(
|
||||
&pedersen_params,
|
||||
&mut transcript_v,
|
||||
ci3.cmE,
|
||||
cm_proofs[0].clone(),
|
||||
)
|
||||
.unwrap();
|
||||
Pedersen::<Projective>::verify(
|
||||
&pedersen_params,
|
||||
&mut transcript_v,
|
||||
ci3.cmW,
|
||||
cm_proofs[1].clone(),
|
||||
)
|
||||
.unwrap();
|
||||
Pedersen::<Projective>::verify(
|
||||
&pedersen_params,
|
||||
&mut transcript_v,
|
||||
cmT,
|
||||
cm_proofs[2].clone(),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nifs_fold_loop() {
|
||||
let r1cs = get_test_r1cs();
|
||||
let z = get_test_z(3);
|
||||
let (w, x) = r1cs.split_z(&z);
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_cols);
|
||||
|
||||
// prepare the running instance
|
||||
let mut running_instance_w = Witness::<Projective>::new(w.clone(), r1cs.A.n_rows);
|
||||
let mut running_committed_instance = running_instance_w
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, x)
|
||||
.unwrap();
|
||||
|
||||
r1cs.check_relaxed_instance_relation(&running_instance_w, &running_committed_instance)
|
||||
.unwrap();
|
||||
|
||||
let num_iters = 10;
|
||||
for i in 0..num_iters {
|
||||
// prepare the incomming instance
|
||||
let incomming_instance_z = get_test_z(i + 4);
|
||||
let (w, x) = r1cs.split_z(&incomming_instance_z);
|
||||
let incomming_instance_w = Witness::<Projective>::new(w.clone(), r1cs.A.n_rows);
|
||||
let incomming_committed_instance = incomming_instance_w
|
||||
.commit::<Pedersen<Projective>>(&pedersen_params, x)
|
||||
.unwrap();
|
||||
r1cs.check_relaxed_instance_relation(
|
||||
&incomming_instance_w,
|
||||
&incomming_committed_instance,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let r = Fr::rand(&mut rng); // folding challenge would come from the RO
|
||||
|
||||
// NIFS.P
|
||||
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
|
||||
&pedersen_params,
|
||||
&r1cs,
|
||||
&running_instance_w,
|
||||
&running_committed_instance,
|
||||
&incomming_instance_w,
|
||||
&incomming_committed_instance,
|
||||
)
|
||||
.unwrap();
|
||||
let (folded_w, _) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
|
||||
r,
|
||||
&running_instance_w,
|
||||
&running_committed_instance,
|
||||
&incomming_instance_w,
|
||||
&incomming_committed_instance,
|
||||
&T,
|
||||
cmT,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// NIFS.V
|
||||
let folded_committed_instance = NIFS::<Projective, Pedersen<Projective>>::verify(
|
||||
r,
|
||||
&running_committed_instance,
|
||||
&incomming_committed_instance,
|
||||
&cmT,
|
||||
);
|
||||
|
||||
r1cs.check_relaxed_instance_relation(&folded_w, &folded_committed_instance)
|
||||
.unwrap();
|
||||
|
||||
// set running_instance for next loop iteration
|
||||
running_instance_w = folded_w;
|
||||
running_committed_instance = folded_committed_instance;
|
||||
}
|
||||
}
|
||||
}
|
||||
67
folding-schemes/src/folding/nova/traits.rs
Normal file
67
folding-schemes/src/folding/nova/traits.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_std::{One, Zero};
|
||||
|
||||
use super::{CommittedInstance, Witness};
|
||||
use crate::ccs::r1cs::R1CS;
|
||||
use crate::Error;
|
||||
|
||||
/// NovaR1CS extends R1CS methods with Nova specific methods
|
||||
pub trait NovaR1CS<C: CurveGroup> {
|
||||
/// returns a dummy instance (Witness and CommittedInstance) for the current R1CS structure
|
||||
fn dummy_instance(&self) -> (Witness<C>, CommittedInstance<C>);
|
||||
|
||||
/// checks the R1CS relation (un-relaxed) for the given Witness and CommittedInstance.
|
||||
fn check_instance_relation(
|
||||
&self,
|
||||
W: &Witness<C>,
|
||||
U: &CommittedInstance<C>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// checks the Relaxed R1CS relation (corresponding to the current R1CS) for the given Witness
|
||||
/// and CommittedInstance.
|
||||
fn check_relaxed_instance_relation(
|
||||
&self,
|
||||
W: &Witness<C>,
|
||||
U: &CommittedInstance<C>,
|
||||
) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
impl<C: CurveGroup> NovaR1CS<C> for R1CS<C::ScalarField>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
|
||||
{
|
||||
fn dummy_instance(&self) -> (Witness<C>, CommittedInstance<C>) {
|
||||
let w_len = self.A.n_cols - 1 - self.l;
|
||||
let w_dummy = Witness::<C>::new(vec![C::ScalarField::zero(); w_len], self.A.n_rows);
|
||||
let u_dummy = CommittedInstance::<C>::dummy(self.l);
|
||||
(w_dummy, u_dummy)
|
||||
}
|
||||
|
||||
fn check_instance_relation(
|
||||
&self,
|
||||
W: &Witness<C>,
|
||||
U: &CommittedInstance<C>,
|
||||
) -> Result<(), Error> {
|
||||
if U.cmE != C::zero() || U.u != C::ScalarField::one() {
|
||||
return Err(Error::R1CSUnrelaxedFail);
|
||||
}
|
||||
|
||||
let Z: Vec<C::ScalarField> = [vec![U.u], U.x.to_vec(), W.W.to_vec()].concat();
|
||||
self.check_relation(&Z)
|
||||
}
|
||||
|
||||
fn check_relaxed_instance_relation(
|
||||
&self,
|
||||
W: &Witness<C>,
|
||||
U: &CommittedInstance<C>,
|
||||
) -> Result<(), Error> {
|
||||
let mut rel_r1cs = self.clone().relax();
|
||||
rel_r1cs.u = U.u;
|
||||
rel_r1cs.E = W.E.clone();
|
||||
|
||||
let Z: Vec<C::ScalarField> = [vec![U.u], U.x.to_vec(), W.W.to_vec()].concat();
|
||||
rel_r1cs.check_relation(&Z)
|
||||
}
|
||||
}
|
||||
599
folding-schemes/src/folding/protogalaxy/folding.rs
Normal file
599
folding-schemes/src/folding/protogalaxy/folding.rs
Normal file
@@ -0,0 +1,599 @@
|
||||
/// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::{
|
||||
univariate::{DensePolynomial, SparsePolynomial},
|
||||
DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial,
|
||||
};
|
||||
use ark_std::log2;
|
||||
use ark_std::{cfg_into_iter, Zero};
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use super::traits::ProtoGalaxyTranscript;
|
||||
use super::utils::{all_powers, betas_star, exponential_powers};
|
||||
use super::ProtoGalaxyError;
|
||||
use super::{CommittedInstance, Witness};
|
||||
|
||||
use crate::ccs::r1cs::R1CS;
|
||||
use crate::transcript::Transcript;
|
||||
use crate::utils::{bit::bit_decompose, vec::*};
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Implements the protocol described in section 4 of
|
||||
/// [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
|
||||
pub struct Folding<C: CurveGroup> {
|
||||
_phantom: PhantomData<C>,
|
||||
}
|
||||
impl<C: CurveGroup> Folding<C>
|
||||
where
|
||||
<C as Group>::ScalarField: Absorb,
|
||||
<C as CurveGroup>::BaseField: Absorb,
|
||||
{
|
||||
#![allow(clippy::type_complexity)]
|
||||
/// implements the non-interactive Prover from the folding scheme described in section 4
|
||||
pub fn prove(
|
||||
transcript: &mut (impl Transcript<C> + ProtoGalaxyTranscript<C>),
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
// running instance
|
||||
instance: &CommittedInstance<C>,
|
||||
w: &Witness<C::ScalarField>,
|
||||
// incomming instances
|
||||
vec_instances: &[CommittedInstance<C>],
|
||||
vec_w: &[Witness<C::ScalarField>],
|
||||
) -> Result<
|
||||
(
|
||||
CommittedInstance<C>,
|
||||
Witness<C::ScalarField>,
|
||||
Vec<C::ScalarField>, // F_X coeffs
|
||||
Vec<C::ScalarField>, // K_X coeffs
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
if vec_instances.len() != vec_w.len() {
|
||||
return Err(Error::NotSameLength(
|
||||
"vec_instances.len()".to_string(),
|
||||
vec_instances.len(),
|
||||
"vec_w.len()".to_string(),
|
||||
vec_w.len(),
|
||||
));
|
||||
}
|
||||
let d = 2; // for the moment hardcoded to 2 since it only supports R1CS
|
||||
let k = vec_instances.len();
|
||||
let t = instance.betas.len();
|
||||
let n = r1cs.A.n_cols;
|
||||
if w.w.len() != n {
|
||||
return Err(Error::NotSameLength(
|
||||
"w.w.len()".to_string(),
|
||||
w.w.len(),
|
||||
"n".to_string(),
|
||||
n,
|
||||
));
|
||||
}
|
||||
if log2(n) as usize != t {
|
||||
return Err(Error::NotEqual);
|
||||
}
|
||||
if !(k + 1).is_power_of_two() {
|
||||
return Err(Error::ProtoGalaxy(ProtoGalaxyError::WrongNumInstances(k)));
|
||||
}
|
||||
|
||||
// absorb the committed instances
|
||||
transcript.absorb_committed_instance(instance)?;
|
||||
for ci in vec_instances.iter() {
|
||||
transcript.absorb_committed_instance(ci)?;
|
||||
}
|
||||
|
||||
let delta = transcript.get_challenge();
|
||||
let deltas = exponential_powers(delta, t);
|
||||
|
||||
let f_w = eval_f(r1cs, &w.w)?;
|
||||
|
||||
// F(X)
|
||||
let F_X: SparsePolynomial<C::ScalarField> =
|
||||
calc_f_from_btree(&f_w, &instance.betas, &deltas).expect("Error calculating F[x]");
|
||||
let F_X_dense = DensePolynomial::from(F_X.clone());
|
||||
transcript.absorb_vec(&F_X_dense.coeffs);
|
||||
|
||||
let alpha = transcript.get_challenge();
|
||||
|
||||
// eval F(alpha)
|
||||
let F_alpha = F_X.evaluate(&alpha);
|
||||
|
||||
// betas*
|
||||
let betas_star = betas_star(&instance.betas, &deltas, alpha);
|
||||
|
||||
// sanity check: check that the new randomized instance (the original instance but with
|
||||
// 'refreshed' randomness) satisfies the relation.
|
||||
#[cfg(test)]
|
||||
tests::check_instance(
|
||||
r1cs,
|
||||
&CommittedInstance {
|
||||
phi: instance.phi,
|
||||
betas: betas_star.clone(),
|
||||
e: F_alpha,
|
||||
},
|
||||
w,
|
||||
)?;
|
||||
|
||||
let ws: Vec<Vec<C::ScalarField>> = std::iter::once(w.w.clone())
|
||||
.chain(
|
||||
vec_w
|
||||
.iter()
|
||||
.map(|wj| {
|
||||
if wj.w.len() != n {
|
||||
return Err(Error::NotSameLength(
|
||||
"wj.w.len()".to_string(),
|
||||
wj.w.len(),
|
||||
"n".to_string(),
|
||||
n,
|
||||
));
|
||||
}
|
||||
Ok(wj.w.clone())
|
||||
})
|
||||
.collect::<Result<Vec<Vec<C::ScalarField>>, Error>>()?,
|
||||
)
|
||||
.collect::<Vec<Vec<C::ScalarField>>>();
|
||||
|
||||
let H =
|
||||
GeneralEvaluationDomain::<C::ScalarField>::new(k + 1).ok_or(Error::NewDomainFail)?;
|
||||
let G_domain = GeneralEvaluationDomain::<C::ScalarField>::new((d * k) + 1)
|
||||
.ok_or(Error::NewDomainFail)?;
|
||||
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
|
||||
|
||||
// K(X) computation in a naive way, next iterations will compute K(X) as described in Claim
|
||||
// 4.5 of the paper.
|
||||
let mut G_evals: Vec<C::ScalarField> = vec![C::ScalarField::zero(); G_domain.size()];
|
||||
for (hi, h) in G_domain.elements().enumerate() {
|
||||
// each iteration evaluates G(h)
|
||||
// inner = L_0(x) * w + \sum_k L_i(x) * w_j
|
||||
let mut inner: Vec<C::ScalarField> = vec![C::ScalarField::zero(); ws[0].len()];
|
||||
for (i, w) in ws.iter().enumerate() {
|
||||
// Li_w_h = (Li(X)*wj)(h) = Li(h) * wj
|
||||
let mut Liw_h: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w.len()];
|
||||
for (j, wj) in w.iter().enumerate() {
|
||||
Liw_h[j] = (&L_X[i] * *wj).evaluate(&h);
|
||||
}
|
||||
|
||||
for j in 0..inner.len() {
|
||||
inner[j] += Liw_h[j];
|
||||
}
|
||||
}
|
||||
let f_ev = eval_f(r1cs, &inner)?;
|
||||
|
||||
let mut Gsum = C::ScalarField::zero();
|
||||
for (i, f_ev_i) in f_ev.iter().enumerate() {
|
||||
let pow_i_betas = pow_i(i, &betas_star);
|
||||
let curr = pow_i_betas * f_ev_i;
|
||||
Gsum += curr;
|
||||
}
|
||||
G_evals[hi] = Gsum;
|
||||
}
|
||||
let G_X: DensePolynomial<C::ScalarField> =
|
||||
Evaluations::<C::ScalarField>::from_vec_and_domain(G_evals, G_domain).interpolate();
|
||||
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
|
||||
// K(X) = (G(X) - F(alpha)*L_0(X)) / Z(X)
|
||||
// Notice that L0(X)*F(a) will be 0 in the native case (the instance of the first folding
|
||||
// iteration case).
|
||||
let L0_e = &L_X[0] * F_alpha;
|
||||
let G_L0e = &G_X - &L0_e;
|
||||
// Pending optimization: move division by Z_X to the prev loop
|
||||
let (K_X, remainder) = G_L0e.divide_by_vanishing_poly(H).ok_or(Error::ProtoGalaxy(
|
||||
ProtoGalaxyError::CouldNotDivideByVanishing,
|
||||
))?;
|
||||
if !remainder.is_zero() {
|
||||
return Err(Error::ProtoGalaxy(ProtoGalaxyError::RemainderNotZero));
|
||||
}
|
||||
|
||||
transcript.absorb_vec(&K_X.coeffs);
|
||||
|
||||
let gamma = transcript.get_challenge();
|
||||
|
||||
let e_star =
|
||||
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
|
||||
|
||||
let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma);
|
||||
for i in 0..k {
|
||||
phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma);
|
||||
}
|
||||
let mut w_star: Vec<C::ScalarField> = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma));
|
||||
let mut r_w_star: C::ScalarField = w.r_w * L_X[0].evaluate(&gamma);
|
||||
for i in 0..k {
|
||||
let L_X_at_i1 = L_X[i + 1].evaluate(&gamma);
|
||||
w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_at_i1))?;
|
||||
r_w_star += vec_w[i].r_w * L_X_at_i1;
|
||||
}
|
||||
|
||||
Ok((
|
||||
CommittedInstance {
|
||||
betas: betas_star,
|
||||
phi: phi_star,
|
||||
e: e_star,
|
||||
},
|
||||
Witness {
|
||||
w: w_star,
|
||||
r_w: r_w_star,
|
||||
},
|
||||
F_X_dense.coeffs,
|
||||
K_X.coeffs,
|
||||
))
|
||||
}
|
||||
|
||||
/// implements the non-interactive Verifier from the folding scheme described in section 4
|
||||
pub fn verify(
|
||||
transcript: &mut (impl Transcript<C> + ProtoGalaxyTranscript<C>),
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
// running instance
|
||||
instance: &CommittedInstance<C>,
|
||||
// incomming instances
|
||||
vec_instances: &[CommittedInstance<C>],
|
||||
// polys from P
|
||||
F_coeffs: Vec<C::ScalarField>,
|
||||
K_coeffs: Vec<C::ScalarField>,
|
||||
) -> Result<CommittedInstance<C>, Error> {
|
||||
let t = instance.betas.len();
|
||||
let n = r1cs.A.n_cols;
|
||||
|
||||
// absorb the committed instances
|
||||
transcript.absorb_committed_instance(instance)?;
|
||||
for ci in vec_instances.iter() {
|
||||
transcript.absorb_committed_instance(ci)?;
|
||||
}
|
||||
|
||||
let delta = transcript.get_challenge();
|
||||
let deltas = exponential_powers(delta, t);
|
||||
|
||||
transcript.absorb_vec(&F_coeffs);
|
||||
|
||||
let alpha = transcript.get_challenge();
|
||||
let alphas = all_powers(alpha, n);
|
||||
|
||||
// F(alpha) = e + \sum_t F_i * alpha^i
|
||||
let mut F_alpha = instance.e;
|
||||
for (i, F_i) in F_coeffs.iter().skip(1).enumerate() {
|
||||
F_alpha += *F_i * alphas[i + 1];
|
||||
}
|
||||
|
||||
let betas_star = betas_star(&instance.betas, &deltas, alpha);
|
||||
|
||||
let k = vec_instances.len();
|
||||
let H =
|
||||
GeneralEvaluationDomain::<C::ScalarField>::new(k + 1).ok_or(Error::NewDomainFail)?;
|
||||
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
|
||||
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
|
||||
let K_X: DensePolynomial<C::ScalarField> =
|
||||
DensePolynomial::<C::ScalarField>::from_coefficients_vec(K_coeffs);
|
||||
|
||||
transcript.absorb_vec(&K_X.coeffs);
|
||||
|
||||
let gamma = transcript.get_challenge();
|
||||
|
||||
let e_star =
|
||||
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
|
||||
|
||||
let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma);
|
||||
for i in 0..k {
|
||||
phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma);
|
||||
}
|
||||
|
||||
// return the folded instance
|
||||
Ok(CommittedInstance {
|
||||
betas: betas_star,
|
||||
phi: phi_star,
|
||||
e: e_star,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// naive impl of pow_i for betas, assuming that betas=(b, b^2, b^4, ..., b^{2^{t-1}})
|
||||
fn pow_i<F: PrimeField>(i: usize, betas: &Vec<F>) -> F {
|
||||
// WIP check if makes more sense to do it with ifs instead of arithmetic
|
||||
|
||||
let n = 2_u64.pow(betas.len() as u32);
|
||||
let b = bit_decompose(i as u64, n as usize);
|
||||
|
||||
let mut r: F = F::one();
|
||||
for (j, beta_j) in betas.iter().enumerate() {
|
||||
let mut b_j = F::zero();
|
||||
if b[j] {
|
||||
b_j = F::one();
|
||||
}
|
||||
r *= (F::one() - b_j) + b_j * beta_j;
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
/// calculates F[x] using the optimized binary-tree technique
|
||||
/// described in Claim 4.4
|
||||
/// of [Protogalaxy](https://eprint.iacr.org/2023/1106.pdf)
|
||||
fn calc_f_from_btree<F: PrimeField>(
|
||||
fw: &[F],
|
||||
betas: &[F],
|
||||
deltas: &[F],
|
||||
) -> Result<SparsePolynomial<F>, Error> {
|
||||
let fw_len = fw.len();
|
||||
let betas_len = betas.len();
|
||||
let deltas_len = deltas.len();
|
||||
|
||||
// ensure our binary tree is full
|
||||
if !fw_len.is_power_of_two() {
|
||||
return Err(Error::ProtoGalaxy(ProtoGalaxyError::BTreeNotFull(fw_len)));
|
||||
}
|
||||
|
||||
if betas_len != deltas_len {
|
||||
return Err(Error::ProtoGalaxy(ProtoGalaxyError::WrongLenBetas(
|
||||
betas_len, deltas_len,
|
||||
)));
|
||||
}
|
||||
|
||||
let mut layers: Vec<Vec<SparsePolynomial<F>>> = Vec::new();
|
||||
let leaves: Vec<SparsePolynomial<F>> = fw
|
||||
.iter()
|
||||
.copied()
|
||||
.map(|e| SparsePolynomial::<F>::from_coefficients_slice(&[(0, e)]))
|
||||
.collect();
|
||||
layers.push(leaves.to_vec());
|
||||
let mut currentNodes = leaves.clone();
|
||||
while currentNodes.len() > 1 {
|
||||
let index = layers.len();
|
||||
layers.push(vec![]);
|
||||
for (i, ni) in currentNodes.iter().enumerate().step_by(2) {
|
||||
let left = ni.clone();
|
||||
let right = SparsePolynomial::<F>::from_coefficients_vec(vec![
|
||||
(0, betas[layers.len() - 2]),
|
||||
(1, deltas[layers.len() - 2]),
|
||||
])
|
||||
.mul(¤tNodes[i + 1]);
|
||||
|
||||
layers[index].push(left + right);
|
||||
}
|
||||
currentNodes = layers[index].clone();
|
||||
}
|
||||
let root_index = layers.len() - 1;
|
||||
Ok(layers[root_index][0].clone())
|
||||
}
|
||||
|
||||
// lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300
|
||||
fn lagrange_polys<F: PrimeField>(domain_n: GeneralEvaluationDomain<F>) -> Vec<DensePolynomial<F>> {
|
||||
let mut lagrange_polynomials: Vec<DensePolynomial<F>> = Vec::new();
|
||||
for i in 0..domain_n.size() {
|
||||
let evals: Vec<F> = cfg_into_iter!(0..domain_n.size())
|
||||
.map(|k| if k == i { F::one() } else { F::zero() })
|
||||
.collect();
|
||||
lagrange_polynomials.push(Evaluations::from_vec_and_domain(evals, domain_n).interpolate());
|
||||
}
|
||||
lagrange_polynomials
|
||||
}
|
||||
|
||||
// f(w) in R1CS context. For the moment we use R1CS, in the future we will abstract this with a
|
||||
// trait
|
||||
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, w: &[F]) -> Result<Vec<F>, Error> {
|
||||
let Az = mat_vec_mul_sparse(&r1cs.A, w)?;
|
||||
let Bz = mat_vec_mul_sparse(&r1cs.B, w)?;
|
||||
let Cz = mat_vec_mul_sparse(&r1cs.C, w)?;
|
||||
let AzBz = hadamard(&Az, &Bz)?;
|
||||
vec_sub(&AzBz, &Cz)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ark_pallas::{Fr, Projective};
|
||||
use ark_std::UniformRand;
|
||||
|
||||
use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z};
|
||||
use crate::commitment::{pedersen::Pedersen, CommitmentProver};
|
||||
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
|
||||
|
||||
pub(crate) fn check_instance<C: CurveGroup>(
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
instance: &CommittedInstance<C>,
|
||||
w: &Witness<C::ScalarField>,
|
||||
) -> Result<(), Error> {
|
||||
if instance.betas.len() != log2(w.w.len()) as usize {
|
||||
return Err(Error::NotSameLength(
|
||||
"instance.betas.len()".to_string(),
|
||||
instance.betas.len(),
|
||||
"log2(w.w.len())".to_string(),
|
||||
log2(w.w.len()) as usize,
|
||||
));
|
||||
}
|
||||
|
||||
let f_w = eval_f(r1cs, &w.w)?; // f(w)
|
||||
|
||||
let mut r = C::ScalarField::zero();
|
||||
for (i, f_w_i) in f_w.iter().enumerate() {
|
||||
r += pow_i(i, &instance.betas) * f_w_i;
|
||||
}
|
||||
if instance.e == r {
|
||||
return Ok(());
|
||||
}
|
||||
Err(Error::NotSatisfied)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pow_i() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let t = 4;
|
||||
let n = 16;
|
||||
let beta = Fr::rand(&mut rng);
|
||||
let betas = exponential_powers(beta, t);
|
||||
let not_betas = all_powers(beta, n);
|
||||
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0..n {
|
||||
assert_eq!(pow_i(i, &betas), not_betas[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eval_f() {
|
||||
let r1cs = get_test_r1cs::<Fr>();
|
||||
let mut z = get_test_z::<Fr>(3);
|
||||
|
||||
let f_w = eval_f(&r1cs, &z).unwrap();
|
||||
assert!(is_zero_vec(&f_w));
|
||||
|
||||
z[1] = Fr::from(111);
|
||||
let f_w = eval_f(&r1cs, &z).unwrap();
|
||||
assert!(!is_zero_vec(&f_w));
|
||||
}
|
||||
|
||||
// k represents the number of instances to be fold, appart from the running instance
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn prepare_inputs(
|
||||
k: usize,
|
||||
) -> (
|
||||
Witness<Fr>,
|
||||
CommittedInstance<Projective>,
|
||||
Vec<Witness<Fr>>,
|
||||
Vec<CommittedInstance<Projective>>,
|
||||
) {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, 100); // 100 is wip, will get it from actual vec
|
||||
|
||||
let z = get_test_z::<Fr>(3);
|
||||
let mut zs: Vec<Vec<Fr>> = Vec::new();
|
||||
for i in 0..k {
|
||||
let z_i = get_test_z::<Fr>(i + 4);
|
||||
zs.push(z_i);
|
||||
}
|
||||
|
||||
let n = z.len();
|
||||
let t = log2(n) as usize;
|
||||
|
||||
let beta = Fr::rand(&mut rng);
|
||||
let betas = exponential_powers(beta, t);
|
||||
|
||||
let witness = Witness::<Fr> {
|
||||
w: z.clone(),
|
||||
r_w: Fr::rand(&mut rng),
|
||||
};
|
||||
let phi =
|
||||
Pedersen::<Projective>::commit(&pedersen_params, &witness.w, &witness.r_w).unwrap();
|
||||
let instance = CommittedInstance::<Projective> {
|
||||
phi,
|
||||
betas: betas.clone(),
|
||||
e: Fr::zero(),
|
||||
};
|
||||
// same for the other instances
|
||||
let mut witnesses: Vec<Witness<Fr>> = Vec::new();
|
||||
let mut instances: Vec<CommittedInstance<Projective>> = Vec::new();
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0..k {
|
||||
let witness_i = Witness::<Fr> {
|
||||
w: zs[i].clone(),
|
||||
r_w: Fr::rand(&mut rng),
|
||||
};
|
||||
let phi_i =
|
||||
Pedersen::<Projective>::commit(&pedersen_params, &witness_i.w, &witness_i.r_w)
|
||||
.unwrap();
|
||||
let instance_i = CommittedInstance::<Projective> {
|
||||
phi: phi_i,
|
||||
betas: betas.clone(),
|
||||
e: Fr::zero(),
|
||||
};
|
||||
witnesses.push(witness_i);
|
||||
instances.push(instance_i);
|
||||
}
|
||||
|
||||
(witness, instance, witnesses, instances)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fold_native_case() {
|
||||
let k = 7;
|
||||
let (witness, instance, witnesses, instances) = prepare_inputs(k);
|
||||
let r1cs = get_test_r1cs::<Fr>();
|
||||
|
||||
// init Prover & Verifier's transcript
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
|
||||
let (folded_instance, folded_witness, F_coeffs, K_coeffs) = Folding::<Projective>::prove(
|
||||
&mut transcript_p,
|
||||
&r1cs,
|
||||
&instance,
|
||||
&witness,
|
||||
&instances,
|
||||
&witnesses,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// veriier
|
||||
let folded_instance_v = Folding::<Projective>::verify(
|
||||
&mut transcript_v,
|
||||
&r1cs,
|
||||
&instance,
|
||||
&instances,
|
||||
F_coeffs,
|
||||
K_coeffs,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// check that prover & verifier folded instances are the same values
|
||||
assert_eq!(folded_instance.phi, folded_instance_v.phi);
|
||||
assert_eq!(folded_instance.betas, folded_instance_v.betas);
|
||||
assert_eq!(folded_instance.e, folded_instance_v.e);
|
||||
assert!(!folded_instance.e.is_zero());
|
||||
|
||||
// check that the folded instance satisfies the relation
|
||||
check_instance(&r1cs, &folded_instance, &folded_witness).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fold_various_iterations() {
|
||||
let r1cs = get_test_r1cs::<Fr>();
|
||||
|
||||
// init Prover & Verifier's transcript
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
|
||||
let (mut running_witness, mut running_instance, _, _) = prepare_inputs(0);
|
||||
|
||||
// fold k instances on each of num_iters iterations
|
||||
let k = 7;
|
||||
let num_iters = 10;
|
||||
for _ in 0..num_iters {
|
||||
// generate the instances to be fold
|
||||
let (_, _, witnesses, instances) = prepare_inputs(k);
|
||||
|
||||
let (folded_instance, folded_witness, F_coeffs, K_coeffs) =
|
||||
Folding::<Projective>::prove(
|
||||
&mut transcript_p,
|
||||
&r1cs,
|
||||
&running_instance,
|
||||
&running_witness,
|
||||
&instances,
|
||||
&witnesses,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// veriier
|
||||
let folded_instance_v = Folding::<Projective>::verify(
|
||||
&mut transcript_v,
|
||||
&r1cs,
|
||||
&running_instance,
|
||||
&instances,
|
||||
F_coeffs,
|
||||
K_coeffs,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// check that prover & verifier folded instances are the same values
|
||||
assert_eq!(folded_instance.phi, folded_instance_v.phi);
|
||||
assert_eq!(folded_instance.betas, folded_instance_v.betas);
|
||||
assert_eq!(folded_instance.e, folded_instance_v.e);
|
||||
assert!(!folded_instance.e.is_zero());
|
||||
|
||||
// check that the folded instance satisfies the relation
|
||||
check_instance(&r1cs, &folded_instance, &folded_witness).unwrap();
|
||||
|
||||
running_witness = folded_witness;
|
||||
running_instance = folded_instance;
|
||||
}
|
||||
}
|
||||
}
|
||||
35
folding-schemes/src/folding/protogalaxy/mod.rs
Normal file
35
folding-schemes/src/folding/protogalaxy/mod.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
/// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use thiserror::Error;
|
||||
|
||||
pub mod folding;
|
||||
pub mod traits;
|
||||
pub(crate) mod utils;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CommittedInstance<C: CurveGroup> {
|
||||
phi: C,
|
||||
betas: Vec<C::ScalarField>,
|
||||
e: C::ScalarField,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Witness<F: PrimeField> {
|
||||
w: Vec<F>,
|
||||
r_w: F,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum ProtoGalaxyError {
|
||||
#[error("The remainder from G(X)-F(α)*L_0(X)) / Z(X) should be zero")]
|
||||
RemainderNotZero,
|
||||
#[error("Could not divide by vanishing polynomial")]
|
||||
CouldNotDivideByVanishing,
|
||||
#[error("The number of incoming instances + 1 should be a power of two, current number of instances: {0}")]
|
||||
WrongNumInstances(usize),
|
||||
#[error("The number of incoming items should be a power of two, current number of coefficients: {0}")]
|
||||
BTreeNotFull(usize),
|
||||
#[error("The lengths of β and δ do not equal: |β| = {0}, |δ|={0}")]
|
||||
WrongLenBetas(usize, usize),
|
||||
}
|
||||
23
folding-schemes/src/folding/protogalaxy/traits.rs
Normal file
23
folding-schemes/src/folding/protogalaxy/traits.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
|
||||
use super::CommittedInstance;
|
||||
use crate::transcript::{poseidon::PoseidonTranscript, Transcript};
|
||||
use crate::Error;
|
||||
|
||||
/// ProtoGalaxyTranscript extends [`Transcript`] with the method to absorb ProtoGalaxy's
|
||||
/// CommittedInstance.
|
||||
pub trait ProtoGalaxyTranscript<C: CurveGroup>: Transcript<C> {
|
||||
fn absorb_committed_instance(&mut self, ci: &CommittedInstance<C>) -> Result<(), Error> {
|
||||
self.absorb_point(&ci.phi)?;
|
||||
self.absorb_vec(&ci.betas);
|
||||
self.absorb(&ci.e);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Implements ProtoGalaxyTranscript for PoseidonTranscript
|
||||
impl<C: CurveGroup> ProtoGalaxyTranscript<C> for PoseidonTranscript<C> where
|
||||
<C as Group>::ScalarField: Absorb
|
||||
{
|
||||
}
|
||||
32
folding-schemes/src/folding/protogalaxy/utils.rs
Normal file
32
folding-schemes/src/folding/protogalaxy/utils.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
// returns (b, b^2, b^4, ..., b^{2^{t-1}})
|
||||
pub fn exponential_powers<F: PrimeField>(b: F, t: usize) -> Vec<F> {
|
||||
let mut r = vec![F::zero(); t];
|
||||
r[0] = b;
|
||||
for i in 1..t {
|
||||
r[i] = r[i - 1].square();
|
||||
}
|
||||
r
|
||||
}
|
||||
pub fn all_powers<F: PrimeField>(a: F, n: usize) -> Vec<F> {
|
||||
let mut r = vec![F::zero(); n];
|
||||
for (i, r_i) in r.iter_mut().enumerate() {
|
||||
*r_i = a.pow([i as u64]);
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ
|
||||
pub fn betas_star<F: PrimeField>(betas: &[F], deltas: &[F], alpha: F) -> Vec<F> {
|
||||
betas
|
||||
.iter()
|
||||
.zip(
|
||||
deltas
|
||||
.iter()
|
||||
.map(|delta_i| alpha * delta_i)
|
||||
.collect::<Vec<F>>(),
|
||||
)
|
||||
.map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha)
|
||||
.collect()
|
||||
}
|
||||
Reference in New Issue
Block a user